aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/CodingStyle100
-rw-r--r--Documentation/DocBook/kernel-api.tmpl13
-rw-r--r--Documentation/RCU/whatisRCU.txt1
-rw-r--r--Documentation/SubmitChecklist57
-rw-r--r--Documentation/devices.txt135
-rw-r--r--Documentation/feature-removal-schedule.txt15
-rw-r--r--Documentation/filesystems/Locking9
-rw-r--r--Documentation/filesystems/porting7
-rw-r--r--Documentation/filesystems/vfs.txt6
-rw-r--r--Documentation/ioctl-number.txt2
-rw-r--r--Documentation/kernel-parameters.txt3
-rw-r--r--Documentation/power/swsusp.txt45
-rw-r--r--Documentation/power/video.txt4
-rw-r--r--Documentation/sparse.txt36
-rw-r--r--Documentation/sysctl/vm.txt13
-rw-r--r--Documentation/vm/page_migration114
-rw-r--r--arch/alpha/kernel/alpha_ksyms.c12
-rw-r--r--arch/alpha/kernel/osf_sys.c2
-rw-r--r--arch/alpha/kernel/signal.c2
-rw-r--r--arch/arm/kernel/irq.c4
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c11
-rw-r--r--arch/frv/kernel/entry.S2
-rw-r--r--arch/frv/kernel/frv_ksyms.c18
-rw-r--r--arch/frv/kernel/irq-routing.c8
-rw-r--r--arch/frv/kernel/irq.c6
-rw-r--r--arch/frv/kernel/pm.c40
-rw-r--r--arch/frv/kernel/process.c2
-rw-r--r--arch/frv/kernel/setup.c2
-rw-r--r--arch/frv/kernel/signal.c22
-rw-r--r--arch/frv/kernel/sys_frv.c2
-rw-r--r--arch/frv/kernel/sysctl.c4
-rw-r--r--arch/frv/kernel/uaccess.c6
-rw-r--r--arch/frv/mb93090-mb00/pci-irq.c10
-rw-r--r--arch/frv/mm/kmap.c6
-rw-r--r--arch/h8300/kernel/signal.c2
-rw-r--r--arch/i386/Kconfig7
-rw-r--r--arch/i386/kernel/acpi/boot.c8
-rw-r--r--arch/i386/kernel/acpi/processor.c2
-rw-r--r--arch/i386/kernel/acpi/sleep.c19
-rw-r--r--arch/i386/kernel/acpi/wakeup.S11
-rw-r--r--arch/i386/kernel/apic.c4
-rw-r--r--arch/i386/kernel/apm.c39
-rw-r--r--arch/i386/kernel/cpu/common.c2
-rw-r--r--arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c291
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c254
-rw-r--r--arch/i386/kernel/cpu/cyrix.c9
-rw-r--r--arch/i386/kernel/cpu/proc.c2
-rw-r--r--arch/i386/kernel/i387.c2
-rw-r--r--arch/i386/kernel/i8259.c4
-rw-r--r--arch/i386/kernel/io_apic.c31
-rw-r--r--arch/i386/kernel/irq.c4
-rw-r--r--arch/i386/kernel/kprobes.c2
-rw-r--r--arch/i386/kernel/microcode.c73
-rw-r--r--arch/i386/kernel/setup.c106
-rw-r--r--arch/i386/kernel/srat.c19
-rw-r--r--arch/i386/kernel/syscall_table.S1
-rw-r--r--arch/i386/kernel/traps.c11
-rw-r--r--arch/i386/lib/usercopy.c137
-rw-r--r--arch/i386/mm/fault.c21
-rw-r--r--arch/i386/mm/init.c3
-rw-r--r--arch/i386/mm/pageattr.c8
-rw-r--r--arch/i386/pci/irq.c15
-rw-r--r--arch/i386/power/cpu.c2
-rw-r--r--arch/ia64/Kconfig2
-rw-r--r--arch/ia64/hp/common/sba_iommu.c4
-rw-r--r--arch/ia64/kernel/acpi.c28
-rw-r--r--arch/ia64/kernel/entry.S2
-rw-r--r--arch/ia64/kernel/perfmon.c10
-rw-r--r--arch/ia64/kernel/uncached.c200
-rw-r--r--arch/ia64/mm/init.c2
-rw-r--r--arch/ia64/pci/pci.c2
-rw-r--r--arch/ia64/sn/kernel/setup.c4
-rw-r--r--arch/ia64/sn/kernel/sn2/cache.c15
-rw-r--r--arch/m68k/kernel/entry.S4
-rw-r--r--arch/m68k/kernel/ints.c6
-rw-r--r--arch/m68k/kernel/signal.c2
-rw-r--r--arch/m68k/kernel/traps.c8
-rw-r--r--arch/m68k/lib/Makefile4
-rw-r--r--arch/m68k/lib/uaccess.c222
-rw-r--r--arch/m68k/mac/config.c13
-rw-r--r--arch/m68k/mac/macints.c1
-rw-r--r--arch/m68k/mac/via.c17
-rw-r--r--arch/m68k/mm/motorola.c12
-rw-r--r--arch/m68k/mm/sun3mmu.c5
-rw-r--r--arch/m68knommu/kernel/signal.c2
-rw-r--r--arch/mips/kernel/scall32-o32.S2
-rw-r--r--arch/mips/kernel/sysirix.c12
-rw-r--r--arch/parisc/hpux/sys_hpux.c10
-rw-r--r--arch/parisc/kernel/signal.c2
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/kernel/irq.c4
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c6
-rw-r--r--arch/s390/Kconfig8
-rw-r--r--arch/s390/Makefile2
-rw-r--r--arch/s390/hypfs/Makefile7
-rw-r--r--arch/s390/hypfs/hypfs.h30
-rw-r--r--arch/s390/hypfs/hypfs_diag.c696
-rw-r--r--arch/s390/hypfs/hypfs_diag.h16
-rw-r--r--arch/s390/hypfs/inode.c492
-rw-r--r--arch/sh64/kernel/signal.c2
-rw-r--r--arch/sparc64/solaris/fs.c4
-rw-r--r--arch/um/Kconfig.debug1
-rw-r--r--arch/v850/kernel/signal.c2
-rw-r--r--arch/x86_64/Kconfig1
-rw-r--r--arch/x86_64/ia32/ia32entry.S1
-rw-r--r--arch/x86_64/kernel/acpi/Makefile1
-rw-r--r--arch/x86_64/kernel/acpi/processor.c72
-rw-r--r--arch/x86_64/kernel/acpi/sleep.c7
-rw-r--r--arch/x86_64/kernel/apic.c2
-rw-r--r--arch/x86_64/kernel/i387.c2
-rw-r--r--arch/x86_64/kernel/setup.c95
-rw-r--r--arch/x86_64/mm/srat.c33
-rw-r--r--arch/xtensa/Kconfig4
-rw-r--r--arch/xtensa/kernel/entry.S2
-rw-r--r--arch/xtensa/kernel/signal.c12
-rw-r--r--block/ll_rw_blk.c5
-rw-r--r--drivers/acpi/Kconfig5
-rw-r--r--drivers/acpi/acpi_memhotplug.c12
-rw-r--r--drivers/acpi/asus_acpi.c32
-rw-r--r--drivers/acpi/bus.c22
-rw-r--r--drivers/acpi/dispatcher/dsfield.c13
-rw-r--r--drivers/acpi/dispatcher/dsinit.c6
-rw-r--r--drivers/acpi/dispatcher/dsmethod.c232
-rw-r--r--drivers/acpi/dispatcher/dsmthdat.c43
-rw-r--r--drivers/acpi/dispatcher/dsobject.c25
-rw-r--r--drivers/acpi/dispatcher/dsopcode.c63
-rw-r--r--drivers/acpi/dispatcher/dsutils.c25
-rw-r--r--drivers/acpi/dispatcher/dswexec.c26
-rw-r--r--drivers/acpi/dispatcher/dswload.c67
-rw-r--r--drivers/acpi/dispatcher/dswscope.c10
-rw-r--r--drivers/acpi/dispatcher/dswstate.c72
-rw-r--r--drivers/acpi/ec.c72
-rw-r--r--drivers/acpi/events/evevent.c10
-rw-r--r--drivers/acpi/events/evgpe.c85
-rw-r--r--drivers/acpi/events/evgpeblk.c96
-rw-r--r--drivers/acpi/events/evmisc.c41
-rw-r--r--drivers/acpi/events/evregion.c115
-rw-r--r--drivers/acpi/events/evrgnini.c48
-rw-r--r--drivers/acpi/events/evsci.c8
-rw-r--r--drivers/acpi/events/evxface.c49
-rw-r--r--drivers/acpi/events/evxfevnt.c67
-rw-r--r--drivers/acpi/events/evxfregn.c15
-rw-r--r--drivers/acpi/executer/exconfig.c54
-rw-r--r--drivers/acpi/executer/exconvrt.c12
-rw-r--r--drivers/acpi/executer/excreate.c25
-rw-r--r--drivers/acpi/executer/exdump.c36
-rw-r--r--drivers/acpi/executer/exfield.c18
-rw-r--r--drivers/acpi/executer/exfldio.c67
-rw-r--r--drivers/acpi/executer/exmisc.c25
-rw-r--r--drivers/acpi/executer/exmutex.c15
-rw-r--r--drivers/acpi/executer/exnames.c28
-rw-r--r--drivers/acpi/executer/exoparg1.c101
-rw-r--r--drivers/acpi/executer/exoparg2.c89
-rw-r--r--drivers/acpi/executer/exoparg3.c17
-rw-r--r--drivers/acpi/executer/exoparg6.c3
-rw-r--r--drivers/acpi/executer/exprep.c45
-rw-r--r--drivers/acpi/executer/exregion.c40
-rw-r--r--drivers/acpi/executer/exresnte.c17
-rw-r--r--drivers/acpi/executer/exresolv.c77
-rw-r--r--drivers/acpi/executer/exresop.c12
-rw-r--r--drivers/acpi/executer/exstore.c15
-rw-r--r--drivers/acpi/executer/exstoren.c7
-rw-r--r--drivers/acpi/executer/exstorob.c17
-rw-r--r--drivers/acpi/executer/exsystem.c12
-rw-r--r--drivers/acpi/executer/exutils.c17
-rw-r--r--drivers/acpi/fan.c40
-rw-r--r--drivers/acpi/hardware/hwacpi.c6
-rw-r--r--drivers/acpi/hardware/hwgpe.c8
-rw-r--r--drivers/acpi/hardware/hwregs.c146
-rw-r--r--drivers/acpi/hardware/hwsleep.c31
-rw-r--r--drivers/acpi/hardware/hwtimer.c20
-rw-r--r--drivers/acpi/hotkey.c2
-rw-r--r--drivers/acpi/ibm_acpi.c70
-rw-r--r--drivers/acpi/motherboard.c61
-rw-r--r--drivers/acpi/namespace/nsaccess.c46
-rw-r--r--drivers/acpi/namespace/nsalloc.c118
-rw-r--r--drivers/acpi/namespace/nsdump.c15
-rw-r--r--drivers/acpi/namespace/nsdumpdv.c6
-rw-r--r--drivers/acpi/namespace/nseval.c484
-rw-r--r--drivers/acpi/namespace/nsinit.c298
-rw-r--r--drivers/acpi/namespace/nsload.c27
-rw-r--r--drivers/acpi/namespace/nsnames.c14
-rw-r--r--drivers/acpi/namespace/nsobject.c15
-rw-r--r--drivers/acpi/namespace/nsparse.c6
-rw-r--r--drivers/acpi/namespace/nssearch.c148
-rw-r--r--drivers/acpi/namespace/nsutils.c104
-rw-r--r--drivers/acpi/namespace/nswalk.c6
-rw-r--r--drivers/acpi/namespace/nsxfeval.c203
-rw-r--r--drivers/acpi/namespace/nsxfname.c22
-rw-r--r--drivers/acpi/namespace/nsxfobj.c11
-rw-r--r--drivers/acpi/numa.c48
-rw-r--r--drivers/acpi/osl.c159
-rw-r--r--drivers/acpi/parser/psargs.c25
-rw-r--r--drivers/acpi/parser/psloop.c25
-rw-r--r--drivers/acpi/parser/psopcode.c6
-rw-r--r--drivers/acpi/parser/psparse.c35
-rw-r--r--drivers/acpi/parser/psscope.c17
-rw-r--r--drivers/acpi/parser/pstree.c8
-rw-r--r--drivers/acpi/parser/psutils.c5
-rw-r--r--drivers/acpi/parser/pswalk.c5
-rw-r--r--drivers/acpi/parser/psxface.c46
-rw-r--r--drivers/acpi/pci_link.c25
-rw-r--r--drivers/acpi/processor_core.c16
-rw-r--r--drivers/acpi/processor_idle.c8
-rw-r--r--drivers/acpi/processor_perflib.c247
-rw-r--r--drivers/acpi/resources/rscalc.c108
-rw-r--r--drivers/acpi/resources/rscreate.c33
-rw-r--r--drivers/acpi/resources/rsdump.c42
-rw-r--r--drivers/acpi/resources/rsinfo.c1
-rw-r--r--drivers/acpi/resources/rslist.c102
-rw-r--r--drivers/acpi/resources/rsmisc.c12
-rw-r--r--drivers/acpi/resources/rsutils.c155
-rw-r--r--drivers/acpi/resources/rsxface.c395
-rw-r--r--drivers/acpi/scan.c178
-rw-r--r--drivers/acpi/sleep/main.c8
-rw-r--r--drivers/acpi/sleep/wakeup.c3
-rw-r--r--drivers/acpi/system.c6
-rw-r--r--drivers/acpi/tables.c4
-rw-r--r--drivers/acpi/tables/tbconvrt.c46
-rw-r--r--drivers/acpi/tables/tbget.c65
-rw-r--r--drivers/acpi/tables/tbgetall.c11
-rw-r--r--drivers/acpi/tables/tbinstal.c50
-rw-r--r--drivers/acpi/tables/tbrsdt.c46
-rw-r--r--drivers/acpi/tables/tbutils.c149
-rw-r--r--drivers/acpi/tables/tbxface.c42
-rw-r--r--drivers/acpi/tables/tbxfroot.c82
-rw-r--r--drivers/acpi/thermal.c25
-rw-r--r--drivers/acpi/utilities/utalloc.c634
-rw-r--r--drivers/acpi/utilities/utcache.c18
-rw-r--r--drivers/acpi/utilities/utcopy.c41
-rw-r--r--drivers/acpi/utilities/utdebug.c65
-rw-r--r--drivers/acpi/utilities/utdelete.c62
-rw-r--r--drivers/acpi/utilities/uteval.c141
-rw-r--r--drivers/acpi/utilities/utglobal.c62
-rw-r--r--drivers/acpi/utilities/utinit.c26
-rw-r--r--drivers/acpi/utilities/utmath.c8
-rw-r--r--drivers/acpi/utilities/utmisc.c293
-rw-r--r--drivers/acpi/utilities/utmutex.c42
-rw-r--r--drivers/acpi/utilities/utobject.c23
-rw-r--r--drivers/acpi/utilities/utresrc.c256
-rw-r--r--drivers/acpi/utilities/utstate.c36
-rw-r--r--drivers/acpi/utilities/utxface.c44
-rw-r--r--drivers/acpi/utils.c2
-rw-r--r--drivers/acpi/video.c23
-rw-r--r--drivers/block/Kconfig1
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/char/agp/hp-agp.c2
-rw-r--r--drivers/char/hpet.c5
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c25
-rw-r--r--drivers/char/pcmcia/synclink_cs.c54
-rw-r--r--drivers/char/sonypi.c10
-rw-r--r--drivers/connector/cn_proc.c1
-rw-r--r--drivers/connector/connector.c7
-rw-r--r--drivers/ide/ide-cd.c8
-rw-r--r--drivers/ide/ide-dma.c6
-rw-r--r--drivers/ide/ide-floppy.c12
-rw-r--r--drivers/ide/ide-iops.c12
-rw-r--r--drivers/ide/ide-lib.c3
-rw-r--r--drivers/ide/ide-taskfile.c3
-rw-r--r--drivers/ide/ide.c4
-rw-r--r--drivers/ide/legacy/q40ide.c1
-rw-r--r--drivers/ide/pci/sgiioc4.c6
-rw-r--r--drivers/ide/pci/trm290.c3
-rw-r--r--drivers/ieee1394/Kconfig2
-rw-r--r--drivers/infiniband/core/uverbs_main.c7
-rw-r--r--drivers/infiniband/hw/ipath/ipath_fs.c13
-rw-r--r--drivers/input/evdev.c2
-rw-r--r--drivers/input/keyboard/amikbd.c34
-rw-r--r--drivers/isdn/capi/capifs.c6
-rw-r--r--drivers/isdn/i4l/isdn_common.c8
-rw-r--r--drivers/isdn/sc/ioctl.c1
-rw-r--r--drivers/leds/Kconfig6
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/leds-ams-delta.c162
-rw-r--r--drivers/macintosh/Kconfig1
-rw-r--r--drivers/md/raid6algos.c7
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c7
-rw-r--r--drivers/oprofile/oprofilefs.c6
-rw-r--r--drivers/parport/Kconfig5
-rw-r--r--drivers/parport/parport_arc.c139
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c199
-rw-r--r--drivers/scsi/53c700.h6
-rw-r--r--drivers/scsi/aacraid/aachba.c15
-rw-r--r--drivers/scsi/aacraid/commsup.c6
-rw-r--r--drivers/scsi/arm/queue.c6
-rw-r--r--drivers/scsi/ibmmca.c3
-rw-r--r--drivers/scsi/ide-scsi.c6
-rw-r--r--drivers/scsi/megaraid.c2
-rw-r--r--drivers/scsi/osst.c2
-rw-r--r--drivers/scsi/st.c2
-rw-r--r--drivers/scsi/wd33c93.c4
-rw-r--r--drivers/serial/ioc4_serial.c9
-rw-r--r--drivers/sn/ioc4.c64
-rw-r--r--drivers/usb/core/inode.c6
-rw-r--r--drivers/usb/gadget/inode.c6
-rw-r--r--drivers/usb/host/Kconfig3
-rw-r--r--drivers/video/aty/atyfb_base.c4
-rw-r--r--drivers/w1/Kconfig1
-rw-r--r--fs/9p/vfs_super.c21
-rw-r--r--fs/Kconfig2
-rw-r--r--fs/adfs/super.c15
-rw-r--r--fs/affs/super.c12
-rw-r--r--fs/afs/dir.c4
-rw-r--r--fs/afs/mntpt.c11
-rw-r--r--fs/afs/super.c24
-rw-r--r--fs/aio.c4
-rw-r--r--fs/autofs/init.c6
-rw-r--r--fs/autofs4/init.c6
-rw-r--r--fs/befs/linuxvfs.c12
-rw-r--r--fs/bfs/inode.c9
-rw-r--r--fs/binfmt_elf.c347
-rw-r--r--fs/binfmt_elf_fdpic.c26
-rw-r--r--fs/binfmt_misc.c6
-rw-r--r--fs/block_dev.c6
-rw-r--r--fs/cifs/cifsfs.c13
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/file.c26
-rw-r--r--fs/coda/file.c2
-rw-r--r--fs/coda/inode.c12
-rw-r--r--fs/coda/upcall.c4
-rw-r--r--fs/compat.c8
-rw-r--r--fs/configfs/mount.c6
-rw-r--r--fs/cramfs/inode.c15
-rw-r--r--fs/dcache.c40
-rw-r--r--fs/debugfs/inode.c8
-rw-r--r--fs/devfs/base.c8
-rw-r--r--fs/devpts/inode.c6
-rw-r--r--fs/efs/super.c12
-rw-r--r--fs/eventpoll.c13
-rw-r--r--fs/ext2/dir.c3
-rw-r--r--fs/ext2/super.c36
-rw-r--r--fs/ext3/super.c70
-rw-r--r--fs/fat/inode.c8
-rw-r--r--fs/fat/misc.c1
-rw-r--r--fs/file_table.c2
-rw-r--r--fs/freevxfs/vxfs_subr.c3
-rw-r--r--fs/freevxfs/vxfs_super.c20
-rw-r--r--fs/fs-writeback.c4
-rw-r--r--fs/fuse/file.c2
-rw-r--r--fs/fuse/inode.c11
-rw-r--r--fs/hfs/bnode.c2
-rw-r--r--fs/hfs/btree.c2
-rw-r--r--fs/hfs/super.c11
-rw-r--r--fs/hfsplus/bitmap.c15
-rw-r--r--fs/hfsplus/bnode.c2
-rw-r--r--fs/hfsplus/btree.c2
-rw-r--r--fs/hfsplus/super.c12
-rw-r--r--fs/hostfs/hostfs_kern.c12
-rw-r--r--fs/hpfs/super.c10
-rw-r--r--fs/hppfs/hppfs_kern.c10
-rw-r--r--fs/hugetlbfs/inode.c31
-rw-r--r--fs/inotify_user.c6
-rw-r--r--fs/ioprio.c6
-rw-r--r--fs/isofs/inode.c13
-rw-r--r--fs/jbd/checkpoint.c419
-rw-r--r--fs/jbd/commit.c21
-rw-r--r--fs/jbd/transaction.c12
-rw-r--r--fs/jffs/inode-v23.c11
-rw-r--r--fs/jffs2/fs.c4
-rw-r--r--fs/jffs2/os-linux.h2
-rw-r--r--fs/jffs2/super.c49
-rw-r--r--fs/jfs/jfs_metapage.c5
-rw-r--r--fs/jfs/super.c11
-rw-r--r--fs/libfs.c16
-rw-r--r--fs/locks.c66
-rw-r--r--fs/minix/dir.c3
-rw-r--r--fs/minix/inode.c17
-rw-r--r--fs/mpage.c22
-rw-r--r--fs/msdos/namei.c9
-rw-r--r--fs/namei.c3
-rw-r--r--fs/namespace.c9
-rw-r--r--fs/ncpfs/inode.c11
-rw-r--r--fs/nfs/file.c4
-rw-r--r--fs/nfs/inode.c101
-rw-r--r--fs/nfsd/nfs4xdr.c2
-rw-r--r--fs/nfsd/nfsctl.c6
-rw-r--r--fs/nfsd/vfs.c2
-rw-r--r--fs/ntfs/aops.h3
-rw-r--r--fs/ntfs/attrib.c6
-rw-r--r--fs/ntfs/file.c3
-rw-r--r--fs/ntfs/super.c14
-rw-r--r--fs/ocfs2/dlm/dlmfs.c6
-rw-r--r--fs/ocfs2/super.c22
-rw-r--r--fs/ocfs2/symlink.c3
-rw-r--r--fs/open.c28
-rw-r--r--fs/openpromfs/inode.c6
-rw-r--r--fs/partitions/check.c8
-rw-r--r--fs/pipe.c9
-rw-r--r--fs/proc/root.c6
-rw-r--r--fs/qnx4/inode.c13
-rw-r--r--fs/ramfs/inode.c13
-rw-r--r--fs/reiserfs/super.c17
-rw-r--r--fs/reiserfs/xattr.c3
-rw-r--r--fs/romfs/inode.c11
-rw-r--r--fs/select.c83
-rw-r--r--fs/smbfs/inode.c12
-rw-r--r--fs/smbfs/proc.c4
-rw-r--r--fs/smbfs/proto.h2
-rw-r--r--fs/super.c111
-rw-r--r--fs/sync.c2
-rw-r--r--fs/sysfs/mount.c6
-rw-r--r--fs/sysv/dir.c3
-rw-r--r--fs/sysv/inode.c3
-rw-r--r--fs/sysv/super.c13
-rw-r--r--fs/udf/super.c12
-rw-r--r--fs/ufs/super.c9
-rw-r--r--fs/vfat/namei.c9
-rw-r--r--fs/xfs/linux-2.6/xfs_stats.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c13
-rw-r--r--fs/xfs/linux-2.6/xfs_sysctl.c2
-rw-r--r--include/acpi/acconfig.h18
-rw-r--r--include/acpi/acdisasm.h173
-rw-r--r--include/acpi/acdispat.h6
-rw-r--r--include/acpi/acevents.h4
-rw-r--r--include/acpi/acexcep.h6
-rw-r--r--include/acpi/acglobal.h29
-rw-r--r--include/acpi/aclocal.h284
-rw-r--r--include/acpi/acmacros.h81
-rw-r--r--include/acpi/acnamesp.h28
-rw-r--r--include/acpi/acobject.h188
-rw-r--r--include/acpi/acopcode.h2
-rw-r--r--include/acpi/acoutput.h10
-rw-r--r--include/acpi/acparser.h4
-rw-r--r--include/acpi/acpi_bus.h10
-rw-r--r--include/acpi/acpi_numa.h23
-rw-r--r--include/acpi/acpiosxf.h35
-rw-r--r--include/acpi/acpixf.h4
-rw-r--r--include/acpi/acresrc.h20
-rw-r--r--include/acpi/acstruct.h121
-rw-r--r--include/acpi/actables.h6
-rw-r--r--include/acpi/actbl.h400
-rw-r--r--include/acpi/actbl1.h639
-rw-r--r--include/acpi/actbl2.h230
-rw-r--r--include/acpi/actypes.h88
-rw-r--r--include/acpi/acutils.h95
-rw-r--r--include/acpi/amlcode.h6
-rw-r--r--include/acpi/amlresrc.h85
-rw-r--r--include/acpi/pdc_intel.h5
-rw-r--r--include/acpi/platform/acenv.h47
-rw-r--r--include/acpi/platform/aclinux.h23
-rw-r--r--include/acpi/processor.h27
-rw-r--r--include/asm-alpha/irq.h4
-rw-r--r--include/asm-arm/irq.h4
-rw-r--r--include/asm-arm26/irq.h4
-rw-r--r--include/asm-frv/atomic.h4
-rw-r--r--include/asm-frv/checksum.h2
-rw-r--r--include/asm-frv/highmem.h2
-rw-r--r--include/asm-frv/io.h40
-rw-r--r--include/asm-frv/mb-regs.h27
-rw-r--r--include/asm-frv/signal.h6
-rw-r--r--include/asm-frv/uaccess.h64
-rw-r--r--include/asm-frv/unistd.h19
-rw-r--r--include/asm-generic/memory_model.h27
-rw-r--r--include/asm-h8300/irq.h4
-rw-r--r--include/asm-i386/alternative.h2
-rw-r--r--include/asm-i386/apic.h2
-rw-r--r--include/asm-i386/apicdef.h1
-rw-r--r--include/asm-i386/cpufeature.h12
-rw-r--r--include/asm-i386/mce.h5
-rw-r--r--include/asm-i386/mtrr.h4
-rw-r--r--include/asm-i386/processor.h14
-rw-r--r--include/asm-i386/uaccess.h35
-rw-r--r--include/asm-i386/unistd.h3
-rw-r--r--include/asm-ia64/unistd.h2
-rw-r--r--include/asm-m68k/irq.h4
-rw-r--r--include/asm-m68k/processor.h8
-rw-r--r--include/asm-m68k/uaccess.h1084
-rw-r--r--include/asm-m68k/unistd.h39
-rw-r--r--include/asm-m68knommu/irq.h4
-rw-r--r--include/asm-m68knommu/ptrace.h2
-rw-r--r--include/asm-mips/mach-au1x00/au1xxx_psc.h9
-rw-r--r--include/asm-mips/mach-db1x00/db1x00.h12
-rw-r--r--include/asm-mips/mmzone.h1
-rw-r--r--include/asm-parisc/mmzone.h5
-rw-r--r--include/asm-s390/irq.h4
-rw-r--r--include/asm-sparc/irq.h4
-rw-r--r--include/asm-v850/irq.h2
-rw-r--r--include/asm-x86_64/acpi.h2
-rw-r--r--include/asm-x86_64/apicdef.h2
-rw-r--r--include/asm-x86_64/mmzone.h1
-rw-r--r--include/asm-x86_64/numa.h1
-rw-r--r--include/asm-x86_64/unistd.h4
-rw-r--r--include/asm-xtensa/checksum.h3
-rw-r--r--include/asm-xtensa/rwsem.h7
-rw-r--r--include/asm-xtensa/uaccess.h34
-rw-r--r--include/linux/acpi.h9
-rw-r--r--include/linux/bootmem.h4
-rw-r--r--include/linux/cn_proc.h21
-rw-r--r--include/linux/coda_linux.h2
-rw-r--r--include/linux/coda_psdev.h2
-rw-r--r--include/linux/cpufreq.h4
-rw-r--r--include/linux/dcache.h1
-rw-r--r--include/linux/fs.h41
-rw-r--r--include/linux/genalloc.h35
-rw-r--r--include/linux/hugetlb.h8
-rw-r--r--include/linux/interrupt.h21
-rw-r--r--include/linux/ioc4.h5
-rw-r--r--include/linux/irq.h11
-rw-r--r--include/linux/irqreturn.h25
-rw-r--r--include/linux/jbd.h8
-rw-r--r--include/linux/kernel.h5
-rw-r--r--include/linux/kexec.h1
-rw-r--r--include/linux/list.h23
-rw-r--r--include/linux/migrate.h20
-rw-r--r--include/linux/mm.h11
-rw-r--r--include/linux/mmzone.h12
-rw-r--r--include/linux/module.h2
-rw-r--r--include/linux/mount.h5
-rw-r--r--include/linux/page-flags.h14
-rw-r--r--include/linux/pagemap.h7
-rw-r--r--include/linux/percpu_counter.h38
-rw-r--r--include/linux/ptrace.h4
-rw-r--r--include/linux/radix-tree.h5
-rw-r--r--include/linux/ramfs.h4
-rw-r--r--include/linux/rcupdate.h3
-rw-r--r--include/linux/rmap.h1
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/security.h45
-rw-r--r--include/linux/slab.h50
-rw-r--r--include/linux/string.h1
-rw-r--r--include/linux/suspend.h1
-rw-r--r--include/linux/swap.h101
-rw-r--r--include/linux/swapops.h53
-rw-r--r--include/linux/syscalls.h10
-rw-r--r--include/linux/sysctl.h1
-rw-r--r--include/linux/uaccess.h22
-rw-r--r--include/linux/vmalloc.h8
-rw-r--r--include/linux/writeback.h5
-rw-r--r--include/linux/zorro.h42
-rw-r--r--ipc/mqueue.c10
-rw-r--r--ipc/shm.c3
-rw-r--r--kernel/acct.c2
-rw-r--r--kernel/auditsc.c1
-rw-r--r--kernel/compat.c23
-rw-r--r--kernel/cpuset.c16
-rw-r--r--kernel/exit.c5
-rw-r--r--kernel/fork.c5
-rw-r--r--kernel/futex.c8
-rw-r--r--kernel/irq/handle.c5
-rw-r--r--kernel/irq/migration.c4
-rw-r--r--kernel/irq/proc.c3
-rw-r--r--kernel/irq/spurious.c12
-rw-r--r--kernel/kexec.c6
-rw-r--r--kernel/ksysfs.c19
-rw-r--r--kernel/power/power.h6
-rw-r--r--kernel/power/snapshot.c260
-rw-r--r--kernel/power/swsusp.c32
-rw-r--r--kernel/rcupdate.c13
-rw-r--r--kernel/sched.c12
-rw-r--r--kernel/sys.c1
-rw-r--r--kernel/sys_ni.c2
-rw-r--r--kernel/sysctl.c11
-rw-r--r--kernel/timer.c30
-rw-r--r--kernel/workqueue.c4
-rw-r--r--lib/Makefile1
-rw-r--r--lib/genalloc.c263
-rw-r--r--lib/percpu_counter.c46
-rw-r--r--lib/radix-tree.c197
-rw-r--r--lib/string.c30
-rw-r--r--mm/Kconfig4
-rw-r--r--mm/filemap.c183
-rw-r--r--mm/filemap.h6
-rw-r--r--mm/fremap.c9
-rw-r--r--mm/hugetlb.c282
-rw-r--r--mm/memory.c125
-rw-r--r--mm/memory_hotplug.c27
-rw-r--r--mm/mempolicy.c36
-rw-r--r--mm/migrate.c1058
-rw-r--r--mm/mmap.c12
-rw-r--r--mm/mprotect.c37
-rw-r--r--mm/oom_kill.c9
-rw-r--r--mm/page-writeback.c3
-rw-r--r--mm/page_alloc.c184
-rw-r--r--mm/pdflush.c3
-rw-r--r--mm/rmap.c107
-rw-r--r--mm/shmem.c18
-rw-r--r--mm/slab.c249
-rw-r--r--mm/sparse.c22
-rw-r--r--mm/swap.c42
-rw-r--r--mm/swapfile.c43
-rw-r--r--mm/truncate.c22
-rw-r--r--mm/vmalloc.c122
-rw-r--r--mm/vmscan.c240
-rw-r--r--net/core/dev.c6
-rw-r--r--net/core/link_watch.c5
-rw-r--r--net/dccp/Kconfig2
-rw-r--r--net/socket.c7
-rw-r--r--net/sunrpc/rpc_pipe.c6
-rw-r--r--security/dummy.c14
-rw-r--r--security/inode.c8
-rw-r--r--security/selinux/hooks.c18
-rw-r--r--security/selinux/selinuxfs.c7
-rw-r--r--sound/oss/au1550_ac97.c2
-rw-r--r--sound/oss/cs46xx.c1272
594 files changed, 14635 insertions, 10490 deletions
diff --git a/Documentation/CodingStyle b/Documentation/CodingStyle
index ce5d2c038cf..6d2412ec91e 100644
--- a/Documentation/CodingStyle
+++ b/Documentation/CodingStyle
@@ -155,7 +155,83 @@ problem, which is called the function-growth-hormone-imbalance syndrome.
See next chapter.
- Chapter 5: Functions
+ Chapter 5: Typedefs
+
+Please don't use things like "vps_t".
+
+It's a _mistake_ to use typedef for structures and pointers. When you see a
+
+ vps_t a;
+
+in the source, what does it mean?
+
+In contrast, if it says
+
+ struct virtual_container *a;
+
+you can actually tell what "a" is.
+
+Lots of people think that typedefs "help readability". Not so. They are
+useful only for:
+
+ (a) totally opaque objects (where the typedef is actively used to _hide_
+ what the object is).
+
+ Example: "pte_t" etc. opaque objects that you can only access using
+ the proper accessor functions.
+
+ NOTE! Opaqueness and "accessor functions" are not good in themselves.
+ The reason we have them for things like pte_t etc. is that there
+ really is absolutely _zero_ portably accessible information there.
+
+ (b) Clear integer types, where the abstraction _helps_ avoid confusion
+ whether it is "int" or "long".
+
+ u8/u16/u32 are perfectly fine typedefs, although they fit into
+ category (d) better than here.
+
+ NOTE! Again - there needs to be a _reason_ for this. If something is
+ "unsigned long", then there's no reason to do
+
+ typedef unsigned long myflags_t;
+
+ but if there is a clear reason for why it under certain circumstances
+ might be an "unsigned int" and under other configurations might be
+ "unsigned long", then by all means go ahead and use a typedef.
+
+ (c) when you use sparse to literally create a _new_ type for
+ type-checking.
+
+ (d) New types which are identical to standard C99 types, in certain
+ exceptional circumstances.
+
+ Although it would only take a short amount of time for the eyes and
+ brain to become accustomed to the standard types like 'uint32_t',
+ some people object to their use anyway.
+
+ Therefore, the Linux-specific 'u8/u16/u32/u64' types and their
+ signed equivalents which are identical to standard types are
+ permitted -- although they are not mandatory in new code of your
+ own.
+
+ When editing existing code which already uses one or the other set
+ of types, you should conform to the existing choices in that code.
+
+ (e) Types safe for use in userspace.
+
+ In certain structures which are visible to userspace, we cannot
+ require C99 types and cannot use the 'u32' form above. Thus, we
+ use __u32 and similar types in all structures which are shared
+ with userspace.
+
+Maybe there are other cases too, but the rule should basically be to NEVER
+EVER use a typedef unless you can clearly match one of those rules.
+
+In general, a pointer, or a struct that has elements that can reasonably
+be directly accessed should _never_ be a typedef.
+
+
+ Chapter 6: Functions
Functions should be short and sweet, and do just one thing. They should
fit on one or two screenfuls of text (the ISO/ANSI screen size is 80x24,
@@ -183,7 +259,7 @@ and it gets confused. You know you're brilliant, but maybe you'd like
to understand what you did 2 weeks from now.
- Chapter 6: Centralized exiting of functions
+ Chapter 7: Centralized exiting of functions
Albeit deprecated by some people, the equivalent of the goto statement is
used frequently by compilers in form of the unconditional jump instruction.
@@ -220,7 +296,7 @@ out:
return result;
}
- Chapter 7: Commenting
+ Chapter 8: Commenting
Comments are good, but there is also a danger of over-commenting. NEVER
try to explain HOW your code works in a comment: it's much better to
@@ -240,7 +316,7 @@ When commenting the kernel API functions, please use the kerneldoc format.
See the files Documentation/kernel-doc-nano-HOWTO.txt and scripts/kernel-doc
for details.
- Chapter 8: You've made a mess of it
+ Chapter 9: You've made a mess of it
That's OK, we all do. You've probably been told by your long-time Unix
user helper that "GNU emacs" automatically formats the C sources for
@@ -288,7 +364,7 @@ re-formatting you may want to take a look at the man page. But
remember: "indent" is not a fix for bad programming.
- Chapter 9: Configuration-files
+ Chapter 10: Configuration-files
For configuration options (arch/xxx/Kconfig, and all the Kconfig files),
somewhat different indentation is used.
@@ -313,7 +389,7 @@ support for file-systems, for instance) should be denoted (DANGEROUS), other
experimental options should be denoted (EXPERIMENTAL).
- Chapter 10: Data structures
+ Chapter 11: Data structures
Data structures that have visibility outside the single-threaded
environment they are created and destroyed in should always have
@@ -344,7 +420,7 @@ Remember: if another thread can find your data structure, and you don't
have a reference count on it, you almost certainly have a bug.
- Chapter 11: Macros, Enums and RTL
+ Chapter 12: Macros, Enums and RTL
Names of macros defining constants and labels in enums are capitalized.
@@ -399,7 +475,7 @@ The cpp manual deals with macros exhaustively. The gcc internals manual also
covers RTL which is used frequently with assembly language in the kernel.
- Chapter 12: Printing kernel messages
+ Chapter 13: Printing kernel messages
Kernel developers like to be seen as literate. Do mind the spelling
of kernel messages to make a good impression. Do not use crippled
@@ -410,7 +486,7 @@ Kernel messages do not have to be terminated with a period.
Printing numbers in parentheses (%d) adds no value and should be avoided.
- Chapter 13: Allocating memory
+ Chapter 14: Allocating memory
The kernel provides the following general purpose memory allocators:
kmalloc(), kzalloc(), kcalloc(), and vmalloc(). Please refer to the API
@@ -429,7 +505,7 @@ from void pointer to any other pointer type is guaranteed by the C programming
language.
- Chapter 14: The inline disease
+ Chapter 15: The inline disease
There appears to be a common misperception that gcc has a magic "make me
faster" speedup option called "inline". While the use of inlines can be
@@ -457,7 +533,7 @@ something it would have done anyway.
- Chapter 15: References
+ Appendix I: References
The C Programming Language, Second Edition
by Brian W. Kernighan and Dennis M. Ritchie.
@@ -481,4 +557,4 @@ Kernel CodingStyle, by greg@kroah.com at OLS 2002:
http://www.kroah.com/linux/talks/ols_2002_kernel_codingstyle_talk/html/
--
-Last updated on 30 December 2005 by a community effort on LKML.
+Last updated on 30 April 2006.
diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl
index ca02e04a906..31b727ceb12 100644
--- a/Documentation/DocBook/kernel-api.tmpl
+++ b/Documentation/DocBook/kernel-api.tmpl
@@ -117,6 +117,7 @@ X!Ilib/string.c
<chapter id="mm">
<title>Memory Management in Linux</title>
<sect1><title>The Slab Cache</title>
+!Iinclude/linux/slab.h
!Emm/slab.c
</sect1>
<sect1><title>User Space Memory Access</title>
@@ -331,6 +332,18 @@ X!Earch/i386/kernel/mca.c
!Esecurity/security.c
</chapter>
+ <chapter id="audit">
+ <title>Audit Interfaces</title>
+!Ekernel/audit.c
+!Ikernel/auditsc.c
+!Ikernel/auditfilter.c
+ </chapter>
+
+ <chapter id="accounting">
+ <title>Accounting Framework</title>
+!Ikernel/acct.c
+ </chapter>
+
<chapter id="pmfuncs">
<title>Power Management</title>
!Ekernel/power/pm.c
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index 07cb93b82ba..6e459420ee9 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -790,7 +790,6 @@ RCU pointer update:
RCU grace period:
- synchronize_kernel (deprecated)
synchronize_net
synchronize_sched
synchronize_rcu
diff --git a/Documentation/SubmitChecklist b/Documentation/SubmitChecklist
new file mode 100644
index 00000000000..8230098da52
--- /dev/null
+++ b/Documentation/SubmitChecklist
@@ -0,0 +1,57 @@
+Linux Kernel patch sumbittal checklist
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Here are some basic things that developers should do if they
+want to see their kernel patch submittals accepted quicker.
+
+These are all above and beyond the documentation that is provided
+in Documentation/SubmittingPatches and elsewhere about submitting
+Linux kernel patches.
+
+
+
+- Builds cleanly with applicable or modified CONFIG options =y, =m, and =n.
+ No gcc warnings/errors, no linker warnings/errors.
+
+- Passes allnoconfig, allmodconfig
+
+- Builds on multiple CPU arch-es by using local cross-compile tools
+ or something like PLM at OSDL.
+
+- ppc64 is a good architecture for cross-compilation checking because it
+ tends to use `unsigned long' for 64-bit quantities.
+
+- Matches kernel coding style(!)
+
+- Any new or modified CONFIG options don't muck up the config menu.
+
+- All new Kconfig options have help text.
+
+- Has been carefully reviewed with respect to relevant Kconfig
+ combinations. This is very hard to get right with testing --
+ brainpower pays off here.
+
+- Check cleanly with sparse.
+
+- Use 'make checkstack' and 'make namespacecheck' and fix any
+ problems that they find. Note: checkstack does not point out
+ problems explicitly, but any one function that uses more than
+ 512 bytes on the stack is a candidate for change.
+
+- Include kernel-doc to document global kernel APIs. (Not required
+ for static functions, but OK there also.) Use 'make htmldocs'
+ or 'make mandocs' to check the kernel-doc and fix any issues.
+
+- Has been tested with CONFIG_PREEMPT, CONFIG_DEBUG_PREEMPT,
+ CONFIG_DEBUG_SLAB, CONFIG_DEBUG_PAGEALLOC, CONFIG_DEBUG_MUTEXES,
+ CONFIG_DEBUG_SPINLOCK, CONFIG_DEBUG_SPINLOCK_SLEEP all simultaneously
+ enabled.
+
+- Has been build- and runtime tested with and without CONFIG_SMP and
+ CONFIG_PREEMPT.
+
+- If the patch affects IO/Disk, etc: has been tested with and without
+ CONFIG_LBD.
+
+
+2006-APR-27
diff --git a/Documentation/devices.txt b/Documentation/devices.txt
index b369a8c46a7..b2f593fc76c 100644
--- a/Documentation/devices.txt
+++ b/Documentation/devices.txt
@@ -3,7 +3,7 @@
Maintained by Torben Mathiasen <device@lanana.org>
- Last revised: 25 January 2005
+ Last revised: 01 March 2006
This list is the Linux Device List, the official registry of allocated
device numbers and /dev directory nodes for the Linux operating
@@ -94,7 +94,6 @@ Your cooperation is appreciated.
9 = /dev/urandom Faster, less secure random number gen.
10 = /dev/aio Asyncronous I/O notification interface
11 = /dev/kmsg Writes to this come out as printk's
- 12 = /dev/oldmem Access to crash dump from kexec kernel
1 block RAM disk
0 = /dev/ram0 First RAM disk
1 = /dev/ram1 Second RAM disk
@@ -262,13 +261,13 @@ Your cooperation is appreciated.
NOTE: These devices permit both read and write access.
7 block Loopback devices
- 0 = /dev/loop0 First loopback device
- 1 = /dev/loop1 Second loopback device
+ 0 = /dev/loop0 First loop device
+ 1 = /dev/loop1 Second loop device
...
- The loopback devices are used to mount filesystems not
+ The loop devices are used to mount filesystems not
associated with block devices. The binding to the
- loopback devices is handled by mount(8) or losetup(8).
+ loop devices is handled by mount(8) or losetup(8).
8 block SCSI disk devices (0-15)
0 = /dev/sda First SCSI disk whole disk
@@ -943,7 +942,7 @@ Your cooperation is appreciated.
240 = /dev/ftlp FTL on 16th Memory Technology Device
Partitions are handled in the same way as for IDE
- disks (see major number 3) expect that the partition
+ disks (see major number 3) except that the partition
limit is 15 rather than 63 per disk (same as SCSI.)
45 char isdn4linux ISDN BRI driver
@@ -1168,7 +1167,7 @@ Your cooperation is appreciated.
The filename of the encrypted container and the passwords
are sent via ioctls (using the sdmount tool) to the master
node which then activates them via one of the
- /dev/scramdisk/x nodes for loopback mounting (all handled
+ /dev/scramdisk/x nodes for loop mounting (all handled
through the sdmount tool).
Requested by: andy@scramdisklinux.org
@@ -2538,18 +2537,32 @@ Your cooperation is appreciated.
0 = /dev/usb/lp0 First USB printer
...
15 = /dev/usb/lp15 16th USB printer
- 16 = /dev/usb/mouse0 First USB mouse
- ...
- 31 = /dev/usb/mouse15 16th USB mouse
- 32 = /dev/usb/ez0 First USB firmware loader
- ...
- 47 = /dev/usb/ez15 16th USB firmware loader
48 = /dev/usb/scanner0 First USB scanner
...
63 = /dev/usb/scanner15 16th USB scanner
64 = /dev/usb/rio500 Diamond Rio 500
65 = /dev/usb/usblcd USBLCD Interface (info@usblcd.de)
66 = /dev/usb/cpad0 Synaptics cPad (mouse/LCD)
+ 96 = /dev/usb/hiddev0 1st USB HID device
+ ...
+ 111 = /dev/usb/hiddev15 16th USB HID device
+ 112 = /dev/usb/auer0 1st auerswald ISDN device
+ ...
+ 127 = /dev/usb/auer15 16th auerswald ISDN device
+ 128 = /dev/usb/brlvgr0 First Braille Voyager device
+ ...
+ 131 = /dev/usb/brlvgr3 Fourth Braille Voyager device
+ 132 = /dev/usb/idmouse ID Mouse (fingerprint scanner) device
+ 133 = /dev/usb/sisusbvga1 First SiSUSB VGA device
+ ...
+ 140 = /dev/usb/sisusbvga8 Eigth SISUSB VGA device
+ 144 = /dev/usb/lcd USB LCD device
+ 160 = /dev/usb/legousbtower0 1st USB Legotower device
+ ...
+ 175 = /dev/usb/legousbtower15 16th USB Legotower device
+ 240 = /dev/usb/dabusb0 First daubusb device
+ ...
+ 243 = /dev/usb/dabusb3 Fourth dabusb device
180 block USB block devices
0 = /dev/uba First USB block device
@@ -2710,6 +2723,17 @@ Your cooperation is appreciated.
1 = /dev/cpu/1/msr MSRs on CPU 1
...
+202 block Xen Virtual Block Device
+ 0 = /dev/xvda First Xen VBD whole disk
+ 16 = /dev/xvdb Second Xen VBD whole disk
+ 32 = /dev/xvdc Third Xen VBD whole disk
+ ...
+ 240 = /dev/xvdp Sixteenth Xen VBD whole disk
+
+ Partitions are handled in the same way as for IDE
+ disks (see major number 3) except that the limit on
+ partitions is 15.
+
203 char CPU CPUID information
0 = /dev/cpu/0/cpuid CPUID on CPU 0
1 = /dev/cpu/1/cpuid CPUID on CPU 1
@@ -2747,11 +2771,26 @@ Your cooperation is appreciated.
46 = /dev/ttyCPM0 PPC CPM (SCC or SMC) - port 0
...
47 = /dev/ttyCPM5 PPC CPM (SCC or SMC) - port 5
- 50 = /dev/ttyIOC40 Altix serial card
+ 50 = /dev/ttyIOC0 Altix serial card
+ ...
+ 81 = /dev/ttyIOC31 Altix serial card
+ 82 = /dev/ttyVR0 NEC VR4100 series SIU
+ 83 = /dev/ttyVR1 NEC VR4100 series DSIU
+ 84 = /dev/ttyIOC84 Altix ioc4 serial card
+ ...
+ 115 = /dev/ttyIOC115 Altix ioc4 serial card
+ 116 = /dev/ttySIOC0 Altix ioc3 serial card
+ ...
+ 147 = /dev/ttySIOC31 Altix ioc3 serial card
+ 148 = /dev/ttyPSC0 PPC PSC - port 0
+ ...
+ 153 = /dev/ttyPSC5 PPC PSC - port 5
+ 154 = /dev/ttyAT0 ATMEL serial port 0
...
- 81 = /dev/ttyIOC431 Altix serial card
- 82 = /dev/ttyVR0 NEC VR4100 series SIU
- 83 = /dev/ttyVR1 NEC VR4100 series DSIU
+ 169 = /dev/ttyAT15 ATMEL serial port 15
+ 170 = /dev/ttyNX0 Hilscher netX serial port 0
+ ...
+ 185 = /dev/ttyNX15 Hilscher netX serial port 15
205 char Low-density serial ports (alternate device)
0 = /dev/culu0 Callout device for ttyLU0
@@ -2786,8 +2825,8 @@ Your cooperation is appreciated.
50 = /dev/cuioc40 Callout device for ttyIOC40
...
81 = /dev/cuioc431 Callout device for ttyIOC431
- 82 = /dev/cuvr0 Callout device for ttyVR0
- 83 = /dev/cuvr1 Callout device for ttyVR1
+ 82 = /dev/cuvr0 Callout device for ttyVR0
+ 83 = /dev/cuvr1 Callout device for ttyVR1
206 char OnStream SC-x0 tape devices
@@ -2897,7 +2936,6 @@ Your cooperation is appreciated.
...
196 = /dev/dvb/adapter3/video0 first video decoder of fourth card
-
216 char Bluetooth RFCOMM TTY devices
0 = /dev/rfcomm0 First Bluetooth RFCOMM TTY device
1 = /dev/rfcomm1 Second Bluetooth RFCOMM TTY device
@@ -3002,12 +3040,43 @@ Your cooperation is appreciated.
ioctl()'s can be used to rewind the tape regardless of
the device used to access it.
-231 char InfiniBand MAD
+231 char InfiniBand
0 = /dev/infiniband/umad0
1 = /dev/infiniband/umad1
- ...
+ ...
+ 63 = /dev/infiniband/umad63 63rd InfiniBandMad device
+ 64 = /dev/infiniband/issm0 First InfiniBand IsSM device
+ 65 = /dev/infiniband/issm1 Second InfiniBand IsSM device
+ ...
+ 127 = /dev/infiniband/issm63 63rd InfiniBand IsSM device
+ 128 = /dev/infiniband/uverbs0 First InfiniBand verbs device
+ 129 = /dev/infiniband/uverbs1 Second InfiniBand verbs device
+ ...
+ 159 = /dev/infiniband/uverbs31 31st InfiniBand verbs device
+
+232 char Biometric Devices
+ 0 = /dev/biometric/sensor0/fingerprint first fingerprint sensor on first device
+ 1 = /dev/biometric/sensor0/iris first iris sensor on first device
+ 2 = /dev/biometric/sensor0/retina first retina sensor on first device
+ 3 = /dev/biometric/sensor0/voiceprint first voiceprint sensor on first device
+ 4 = /dev/biometric/sensor0/facial first facial sensor on first device
+ 5 = /dev/biometric/sensor0/hand first hand sensor on first device
+ ...
+ 10 = /dev/biometric/sensor1/fingerprint first fingerprint sensor on second device
+ ...
+ 20 = /dev/biometric/sensor2/fingerprint first fingerprint sensor on third device
+ ...
-232-239 UNASSIGNED
+233 char PathScale InfiniPath interconnect
+ 0 = /dev/ipath Primary device for programs (any unit)
+ 1 = /dev/ipath0 Access specifically to unit 0
+ 2 = /dev/ipath1 Access specifically to unit 1
+ ...
+ 4 = /dev/ipath3 Access specifically to unit 3
+ 129 = /dev/ipath_sma Device used by Subnet Management Agent
+ 130 = /dev/ipath_diag Device used by diagnostics programs
+
+234-239 UNASSIGNED
240-254 char LOCAL/EXPERIMENTAL USE
240-254 block LOCAL/EXPERIMENTAL USE
@@ -3021,6 +3090,24 @@ Your cooperation is appreciated.
This major is reserved to assist the expansion to a
larger number space. No device nodes with this major
should ever be created on the filesystem.
+ (This is probaly not true anymore, but I'll leave it
+ for now /Torben)
+
+---LARGE MAJORS!!!!!---
+
+256 char Equinox SST multi-port serial boards
+ 0 = /dev/ttyEQ0 First serial port on first Equinox SST board
+ 127 = /dev/ttyEQ127 Last serial port on first Equinox SST board
+ 128 = /dev/ttyEQ128 First serial port on second Equinox SST board
+ ...
+ 1027 = /dev/ttyEQ1027 Last serial port on eighth Equinox SST board
+
+256 block Resident Flash Disk Flash Translation Layer
+ 0 = /dev/rfda First RFD FTL layer
+ 16 = /dev/rfdb Second RFD FTL layer
+ ...
+ 240 = /dev/rfdp 16th RFD FTL layer
+
**** ADDITIONAL /dev DIRECTORY ENTRIES
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index f7293297f32..027285d0c26 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -33,21 +33,6 @@ Who: Adrian Bunk <bunk@stusta.de>
---------------------------
-What: RCU API moves to EXPORT_SYMBOL_GPL
-When: April 2006
-Files: include/linux/rcupdate.h, kernel/rcupdate.c
-Why: Outside of Linux, the only implementations of anything even
- vaguely resembling RCU that I am aware of are in DYNIX/ptx,
- VM/XA, Tornado, and K42. I do not expect anyone to port binary
- drivers or kernel modules from any of these, since the first two
- are owned by IBM and the last two are open-source research OSes.
- So these will move to GPL after a grace period to allow
- people, who might be using implementations that I am not aware
- of, to adjust to this upcoming change.
-Who: Paul E. McKenney <paulmck@us.ibm.com>
-
----------------------------
-
What: raw1394: requests of type RAW1394_REQ_ISO_SEND, RAW1394_REQ_ISO_LISTEN
When: November 2006
Why: Deprecated in favour of the new ioctl-based rawiso interface, which is
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 1045da582b9..d31efbbdfe5 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -99,7 +99,7 @@ prototypes:
int (*sync_fs)(struct super_block *sb, int wait);
void (*write_super_lockfs) (struct super_block *);
void (*unlockfs) (struct super_block *);
- int (*statfs) (struct super_block *, struct kstatfs *);
+ int (*statfs) (struct dentry *, struct kstatfs *);
int (*remount_fs) (struct super_block *, int *, char *);
void (*clear_inode) (struct inode *);
void (*umount_begin) (struct super_block *);
@@ -142,15 +142,16 @@ see also dquot_operations section.
--------------------------- file_system_type ---------------------------
prototypes:
- struct super_block *(*get_sb) (struct file_system_type *, int,
- const char *, void *);
+ struct int (*get_sb) (struct file_system_type *, int,
+ const char *, void *, struct vfsmount *);
void (*kill_sb) (struct super_block *);
locking rules:
may block BKL
get_sb yes yes
kill_sb yes yes
-->get_sb() returns error or a locked superblock (exclusive on ->s_umount).
+->get_sb() returns error or 0 with locked superblock attached to the vfsmount
+(exclusive on ->s_umount).
->kill_sb() takes a write-locked superblock, does all shutdown work on it,
unlocks and drops the reference.
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index 2f388460cbe..5531694059a 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -50,10 +50,11 @@ Turn your foo_read_super() into a function that would return 0 in case of
success and negative number in case of error (-EINVAL unless you have more
informative error value to report). Call it foo_fill_super(). Now declare
-struct super_block foo_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+int foo_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, ext2_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, foo_fill_super,
+ mnt);
}
(or similar with s/bdev/nodev/ or s/bdev/single/, depending on the kind of
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 3a2e5520c1e..9d3aed628bc 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -113,8 +113,8 @@ members are defined:
struct file_system_type {
const char *name;
int fs_flags;
- struct super_block *(*get_sb) (struct file_system_type *, int,
- const char *, void *);
+ struct int (*get_sb) (struct file_system_type *, int,
+ const char *, void *, struct vfsmount *);
void (*kill_sb) (struct super_block *);
struct module *owner;
struct file_system_type * next;
@@ -211,7 +211,7 @@ struct super_operations {
int (*sync_fs)(struct super_block *sb, int wait);
void (*write_super_lockfs) (struct super_block *);
void (*unlockfs) (struct super_block *);
- int (*statfs) (struct super_block *, struct kstatfs *);
+ int (*statfs) (struct dentry *, struct kstatfs *);
int (*remount_fs) (struct super_block *, int *, char *);
void (*clear_inode) (struct inode *);
void (*umount_begin) (struct super_block *);
diff --git a/Documentation/ioctl-number.txt b/Documentation/ioctl-number.txt
index 171a44ebd93..1543802ef53 100644
--- a/Documentation/ioctl-number.txt
+++ b/Documentation/ioctl-number.txt
@@ -85,7 +85,9 @@ Code Seq# Include File Comments
<mailto:maassen@uni-freiburg.de>
'C' all linux/soundcard.h
'D' all asm-s390/dasd.h
+'E' all linux/input.h
'F' all linux/fb.h
+'H' all linux/hiddev.h
'I' all linux/isdn.h
'J' 00-1F drivers/scsi/gdth_ioctl.h
'K' all linux/kd.h
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index a9d3a1794b2..bca6f389da6 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -147,6 +147,9 @@ running once the system is up.
acpi_irq_isa= [HW,ACPI] If irq_balance, mark listed IRQs used by ISA
Format: <irq>,<irq>...
+ acpi_os_name= [HW,ACPI] Tell ACPI BIOS the name of the OS
+ Format: To spoof as Windows 98: ="Microsoft Windows"
+
acpi_osi= [HW,ACPI] empty param disables _OSI
acpi_serialize [HW,ACPI] force serialization of AML methods
diff --git a/Documentation/power/swsusp.txt b/Documentation/power/swsusp.txt
index 516c5019013..823b2cf6e3d 100644
--- a/Documentation/power/swsusp.txt
+++ b/Documentation/power/swsusp.txt
@@ -350,9 +350,34 @@ Q: How do I make suspend more verbose?
A: If you want to see any non-error kernel messages on the virtual
terminal the kernel switches to during suspend, you have to set the
-kernel console loglevel to at least 5, for example by doing
-
- echo 5 > /proc/sys/kernel/printk
+kernel console loglevel to at least 4 (KERN_WARNING), for example by
+doing
+
+ # save the old loglevel
+ read LOGLEVEL DUMMY < /proc/sys/kernel/printk
+ # set the loglevel so we see the progress bar.
+ # if the level is higher than needed, we leave it alone.
+ if [ $LOGLEVEL -lt 5 ]; then
+ echo 5 > /proc/sys/kernel/printk
+ fi
+
+ IMG_SZ=0
+ read IMG_SZ < /sys/power/image_size
+ echo -n disk > /sys/power/state
+ RET=$?
+ #
+ # the logic here is:
+ # if image_size > 0 (without kernel support, IMG_SZ will be zero),
+ # then try again with image_size set to zero.
+ if [ $RET -ne 0 -a $IMG_SZ -ne 0 ]; then # try again with minimal image size
+ echo 0 > /sys/power/image_size
+ echo -n disk > /sys/power/state
+ RET=$?
+ fi
+
+ # restore previous loglevel
+ echo $LOGLEVEL > /proc/sys/kernel/printk
+ exit $RET
Q: Is this true that if I have a mounted filesystem on a USB device and
I suspend to disk, I can lose data unless the filesystem has been mounted
@@ -380,3 +405,17 @@ safest thing is to unmount all filesystems on removable media (such USB,
Firewire, CompactFlash, MMC, external SATA, or even IDE hotplug bays)
before suspending; then remount them after resuming.
+Q: I upgraded the kernel from 2.6.15 to 2.6.16. Both kernels were
+compiled with the similar configuration files. Anyway I found that
+suspend to disk (and resume) is much slower on 2.6.16 compared to
+2.6.15. Any idea for why that might happen or how can I speed it up?
+
+A: This is because the size of the suspend image is now greater than
+for 2.6.15 (by saving more data we can get more responsive system
+after resume).
+
+There's the /sys/power/image_size knob that controls the size of the
+image. If you set it to 0 (eg. by echo 0 > /sys/power/image_size as
+root), the 2.6.15 behavior should be restored. If it is still too
+slow, take a look at suspend.sf.net -- userland suspend is faster and
+supports LZF compression to speed it up further.
diff --git a/Documentation/power/video.txt b/Documentation/power/video.txt
index 43a889f8f08..d859faa3a46 100644
--- a/Documentation/power/video.txt
+++ b/Documentation/power/video.txt
@@ -90,6 +90,7 @@ Table of known working notebooks:
Model hack (or "how to do it")
------------------------------------------------------------------------------
Acer Aspire 1406LC ole's late BIOS init (7), turn off DRI
+Acer TM 230 s3_bios (2)
Acer TM 242FX vbetool (6)
Acer TM C110 video_post (8)
Acer TM C300 vga=normal (only suspend on console, not in X), vbetool (6) or video_post (8)
@@ -115,6 +116,7 @@ Dell D610 vga=normal and X (possibly vbestate (6) too, but not tested)
Dell Inspiron 4000 ??? (*)
Dell Inspiron 500m ??? (*)
Dell Inspiron 510m ???
+Dell Inspiron 5150 vbetool needed (6)
Dell Inspiron 600m ??? (*)
Dell Inspiron 8200 ??? (*)
Dell Inspiron 8500 ??? (*)
@@ -125,6 +127,7 @@ HP NX7000 ??? (*)
HP Pavilion ZD7000 vbetool post needed, need open-source nv driver for X
HP Omnibook XE3 athlon version none (1)
HP Omnibook XE3GC none (1), video is S3 Savage/IX-MV
+HP Omnibook XE3L-GF vbetool (6)
HP Omnibook 5150 none (1), (S1 also works OK)
IBM TP T20, model 2647-44G none (1), video is S3 Inc. 86C270-294 Savage/IX-MV, vesafb gets "interesting" but X work.
IBM TP A31 / Type 2652-M5G s3_mode (3) [works ok with BIOS 1.04 2002-08-23, but not at all with BIOS 1.11 2004-11-05 :-(]
@@ -157,6 +160,7 @@ Sony Vaio vgn-s260 X or boot-radeon can init it (5)
Sony Vaio vgn-S580BH vga=normal, but suspend from X. Console will be blank unless you return to X.
Sony Vaio vgn-FS115B s3_bios (2),s3_mode (4)
Toshiba Libretto L5 none (1)
+Toshiba Libretto 100CT/110CT vbetool (6)
Toshiba Portege 3020CT s3_mode (3)
Toshiba Satellite 4030CDT s3_mode (3) (S1 also works OK)
Toshiba Satellite 4080XCDT s3_mode (3) (S1 also works OK)
diff --git a/Documentation/sparse.txt b/Documentation/sparse.txt
index 3f1c5464b1c..5a311c38dd1 100644
--- a/Documentation/sparse.txt
+++ b/Documentation/sparse.txt
@@ -1,5 +1,6 @@
Copyright 2004 Linus Torvalds
Copyright 2004 Pavel Machek <pavel@suse.cz>
+Copyright 2006 Bob Copeland <me@bobcopeland.com>
Using sparse for typechecking
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -41,15 +42,8 @@ sure that bitwise types don't get mixed up (little-endian vs big-endian
vs cpu-endian vs whatever), and there the constant "0" really _is_
special.
-Use
-
- make C=[12] CF=-Wbitwise
-
-or you don't get any checking at all.
-
-
-Where to get sparse
-~~~~~~~~~~~~~~~~~~~
+Getting sparse
+~~~~~~~~~~~~~~
With git, you can just get it from
@@ -57,7 +51,7 @@ With git, you can just get it from
and DaveJ has tar-balls at
- http://www.codemonkey.org.uk/projects/git-snapshots/sparse/
+ http://www.codemonkey.org.uk/projects/git-snapshots/sparse/
Once you have it, just do
@@ -65,8 +59,20 @@ Once you have it, just do
make
make install
-as your regular user, and it will install sparse in your ~/bin directory.
-After that, doing a kernel make with "make C=1" will run sparse on all the
-C files that get recompiled, or with "make C=2" will run sparse on the
-files whether they need to be recompiled or not (ie the latter is fast way
-to check the whole tree if you have already built it).
+as a regular user, and it will install sparse in your ~/bin directory.
+
+Using sparse
+~~~~~~~~~~~~
+
+Do a kernel make with "make C=1" to run sparse on all the C files that get
+recompiled, or use "make C=2" to run sparse on the files whether they need to
+be recompiled or not. The latter is a fast way to check the whole tree if you
+have already built it.
+
+The optional make variable CF can be used to pass arguments to sparse. The
+build system passes -Wbitwise to sparse automatically. To perform endianness
+checks, you may define __CHECK_ENDIAN__:
+
+ make C=2 CF="-D__CHECK_ENDIAN__"
+
+These checks are disabled by default as they generate a host of warnings.
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index a46c10fcddf..2dc246af488 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -29,6 +29,7 @@ Currently, these files are in /proc/sys/vm:
- drop-caches
- zone_reclaim_mode
- zone_reclaim_interval
+- panic_on_oom
==============================================================
@@ -178,3 +179,15 @@ Time is set in seconds and set by default to 30 seconds.
Reduce the interval if undesired off node allocations occur. However, too
frequent scans will have a negative impact onoff node allocation performance.
+=============================================================
+
+panic_on_oom
+
+This enables or disables panic on out-of-memory feature. If this is set to 1,
+the kernel panics when out-of-memory happens. If this is set to 0, the kernel
+will kill some rogue process, called oom_killer. Usually, oom_killer can kill
+rogue processes and system will survive. If you want to panic the system
+rather than killing rogue processes, set this to 1.
+
+The default value is 0.
+
diff --git a/Documentation/vm/page_migration b/Documentation/vm/page_migration
index 0dd4ef30c36..99f89aa1016 100644
--- a/Documentation/vm/page_migration
+++ b/Documentation/vm/page_migration
@@ -26,8 +26,13 @@ a process are located. See also the numa_maps manpage in the numactl package.
Manual migration is useful if for example the scheduler has relocated
a process to a processor on a distant node. A batch scheduler or an
administrator may detect the situation and move the pages of the process
-nearer to the new processor. At some point in the future we may have
-some mechanism in the scheduler that will automatically move the pages.
+nearer to the new processor. The kernel itself does only provide
+manual page migration support. Automatic page migration may be implemented
+through user space processes that move pages. A special function call
+"move_pages" allows the moving of individual pages within a process.
+A NUMA profiler may f.e. obtain a log showing frequent off node
+accesses and may use the result to move pages to more advantageous
+locations.
Larger installations usually partition the system using cpusets into
sections of nodes. Paul Jackson has equipped cpusets with the ability to
@@ -62,22 +67,14 @@ A. In kernel use of migrate_pages()
It also prevents the swapper or other scans to encounter
the page.
-2. Generate a list of newly allocates page. These pages will contain the
- contents of the pages from the first list after page migration is
- complete.
+2. We need to have a function of type new_page_t that can be
+ passed to migrate_pages(). This function should figure out
+ how to allocate the correct new page given the old page.
3. The migrate_pages() function is called which attempts
- to do the migration. It returns the moved pages in the
- list specified as the third parameter and the failed
- migrations in the fourth parameter. The first parameter
- will contain the pages that could still be retried.
-
-4. The leftover pages of various types are returned
- to the LRU using putback_to_lru_pages() or otherwise
- disposed of. The pages will still have the refcount as
- increased by isolate_lru_pages() if putback_to_lru_pages() is not
- used! The kernel may want to handle the various cases of failures in
- different ways.
+ to do the migration. It will call the function to allocate
+ the new page for each page that is considered for
+ moving.
B. How migrate_pages() works
----------------------------
@@ -93,83 +90,58 @@ Steps:
2. Insure that writeback is complete.
-3. Make sure that the page has assigned swap cache entry if
- it is an anonyous page. The swap cache reference is necessary
- to preserve the information contain in the page table maps while
- page migration occurs.
-
-4. Prep the new page that we want to move to. It is locked
+3. Prep the new page that we want to move to. It is locked
and set to not being uptodate so that all accesses to the new
page immediately lock while the move is in progress.
-5. All the page table references to the page are either dropped (file
- backed pages) or converted to swap references (anonymous pages).
- This should decrease the reference count.
+4. The new page is prepped with some settings from the old page so that
+ accesses to the new page will discover a page with the correct settings.
+
+5. All the page table references to the page are converted
+ to migration entries or dropped (nonlinear vmas).
+ This decrease the mapcount of a page. If the resulting
+ mapcount is not zero then we do not migrate the page.
+ All user space processes that attempt to access the page
+ will now wait on the page lock.
6. The radix tree lock is taken. This will cause all processes trying
- to reestablish a pte to block on the radix tree spinlock.
+ to access the page via the mapping to block on the radix tree spinlock.
7. The refcount of the page is examined and we back out if references remain
otherwise we know that we are the only one referencing this page.
8. The radix tree is checked and if it does not contain the pointer to this
- page then we back out because someone else modified the mapping first.
-
-9. The mapping is checked. If the mapping is gone then a truncate action may
- be in progress and we back out.
-
-10. The new page is prepped with some settings from the old page so that
- accesses to the new page will be discovered to have the correct settings.
+ page then we back out because someone else modified the radix tree.
-11. The radix tree is changed to point to the new page.
+9. The radix tree is changed to point to the new page.
-12. The reference count of the old page is dropped because the radix tree
- reference is gone.
+10. The reference count of the old page is dropped because the radix tree
+ reference is gone. A reference to the new page is established because
+ the new page is referenced to by the radix tree.
-13. The radix tree lock is dropped. With that lookups become possible again
- and other processes will move from spinning on the tree lock to sleeping on
- the locked new page.
+11. The radix tree lock is dropped. With that lookups in the mapping
+ become possible again. Processes will move from spinning on the tree_lock
+ to sleeping on the locked new page.
-14. The page contents are copied to the new page.
+12. The page contents are copied to the new page.
-15. The remaining page flags are copied to the new page.
+13. The remaining page flags are copied to the new page.
-16. The old page flags are cleared to indicate that the page does
- not use any information anymore.
+14. The old page flags are cleared to indicate that the page does
+ not provide any information anymore.
-17. Queued up writeback on the new page is triggered.
+15. Queued up writeback on the new page is triggered.
-18. If swap pte's were generated for the page then replace them with real
- ptes. This will reenable access for processes not blocked by the page lock.
+16. If migration entries were page then replace them with real ptes. Doing
+ so will enable access for user space processes not already waiting for
+ the page lock.
19. The page locks are dropped from the old and new page.
- Processes waiting on the page lock can continue.
+ Processes waiting on the page lock will redo their page faults
+ and will reach the new page.
20. The new page is moved to the LRU and can be scanned by the swapper
etc again.
-TODO list
----------
-
-- Page migration requires the use of swap handles to preserve the
- information of the anonymous page table entries. This means that swap
- space is reserved but never used. The maximum number of swap handles used
- is determined by CHUNK_SIZE (see mm/mempolicy.c) per ongoing migration.
- Reservation of pages could be avoided by having a special type of swap
- handle that does not require swap space and that would only track the page
- references. Something like that was proposed by Marcelo Tosatti in the
- past (search for migration cache on lkml or linux-mm@kvack.org).
-
-- Page migration unmaps ptes for file backed pages and requires page
- faults to reestablish these ptes. This could be optimized by somehow
- recording the references before migration and then reestablish them later.
- However, there are several locking challenges that have to be overcome
- before this is possible.
-
-- Page migration generates read ptes for anonymous pages. Dirty page
- faults are required to make the pages writable again. It may be possible
- to generate a pte marked dirty if it is known that the page is dirty and
- that this process has the only reference to that page.
-
-Christoph Lameter, March 8, 2006.
+Christoph Lameter, May 8, 2006.
diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c
index 2b245ad731e..d3848c5b0d2 100644
--- a/arch/alpha/kernel/alpha_ksyms.c
+++ b/arch/alpha/kernel/alpha_ksyms.c
@@ -53,10 +53,6 @@ extern void __divqu (void);
extern void __remqu (void);
EXPORT_SYMBOL(alpha_mv);
-EXPORT_SYMBOL(enable_irq);
-EXPORT_SYMBOL(disable_irq);
-EXPORT_SYMBOL(disable_irq_nosync);
-EXPORT_SYMBOL(probe_irq_mask);
EXPORT_SYMBOL(screen_info);
EXPORT_SYMBOL(perf_irq);
EXPORT_SYMBOL(callback_getenv);
@@ -68,19 +64,13 @@ EXPORT_SYMBOL(alpha_using_srm);
/* platform dependent support */
EXPORT_SYMBOL(strcat);
-EXPORT_SYMBOL(strcmp);
EXPORT_SYMBOL(strcpy);
EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(strncmp);
EXPORT_SYMBOL(strncpy);
-EXPORT_SYMBOL(strnlen);
EXPORT_SYMBOL(strncat);
-EXPORT_SYMBOL(strstr);
EXPORT_SYMBOL(strchr);
EXPORT_SYMBOL(strrchr);
-EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL(memmove);
-EXPORT_SYMBOL(memscan);
EXPORT_SYMBOL(__memcpy);
EXPORT_SYMBOL(__memset);
EXPORT_SYMBOL(__memsetw);
@@ -122,11 +112,9 @@ EXPORT_SYMBOL(alpha_write_fp_reg_s);
/* In-kernel system calls. */
EXPORT_SYMBOL(kernel_thread);
-EXPORT_SYMBOL(sys_open);
EXPORT_SYMBOL(sys_dup);
EXPORT_SYMBOL(sys_exit);
EXPORT_SYMBOL(sys_write);
-EXPORT_SYMBOL(sys_read);
EXPORT_SYMBOL(sys_lseek);
EXPORT_SYMBOL(execve);
EXPORT_SYMBOL(sys_setsid);
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 31afe3d91ac..e15dcf4f3dc 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -244,7 +244,7 @@ do_osf_statfs(struct dentry * dentry, struct osf_statfs __user *buffer,
unsigned long bufsiz)
{
struct kstatfs linux_stat;
- int error = vfs_statfs(dentry->d_inode->i_sb, &linux_stat);
+ int error = vfs_statfs(dentry, &linux_stat);
if (!error)
error = linux_to_osf_statfs(&linux_stat, buffer, bufsiz);
return error;
diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c
index 2e45e8604e3..741da0945dc 100644
--- a/arch/alpha/kernel/signal.c
+++ b/arch/alpha/kernel/signal.c
@@ -375,7 +375,7 @@ give_sigsegv:
static inline void __user *
get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
{
- if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp))
+ if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp))
sp = current->sas_ss_sp + current->sas_ss_size;
return (void __user *)((sp - frame_size) & -32ul);
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index bcc19fbb32d..ec20f8935e8 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -52,7 +52,7 @@
*/
#define MAX_IRQ_CNT 100000
-static int noirqdebug;
+static int noirqdebug __read_mostly;
static volatile unsigned long irq_err_count;
static DEFINE_SPINLOCK(irq_controller_lock);
static LIST_HEAD(irq_pending);
@@ -81,7 +81,7 @@ irqreturn_t no_action(int irq, void *dev_id, struct pt_regs *regs)
void do_bad_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
{
- irq_err_count += 1;
+ irq_err_count++;
printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
}
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index 6178f046f12..73df32aac4c 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -84,6 +84,15 @@ static struct omap_board_config_kernel ams_delta_config[] = {
{ OMAP_TAG_UART, &ams_delta_uart_config },
};
+static struct platform_device ams_delta_led_device = {
+ .name = "ams-delta-led",
+ .id = -1
+};
+
+static struct platform_device *ams_delta_devices[] __initdata = {
+ &ams_delta_led_device,
+};
+
static void __init ams_delta_init(void)
{
iotable_init(ams_delta_io_desc, ARRAY_SIZE(ams_delta_io_desc));
@@ -94,6 +103,8 @@ static void __init ams_delta_init(void)
/* Clear latch2 (NAND, LCD, modem enable) */
ams_delta_latch2_write(~0, 0);
+
+ platform_add_devices(ams_delta_devices, ARRAY_SIZE(ams_delta_devices));
}
static void __init ams_delta_map_io(void)
diff --git a/arch/frv/kernel/entry.S b/arch/frv/kernel/entry.S
index a9b59527a74..81d94e41a18 100644
--- a/arch/frv/kernel/entry.S
+++ b/arch/frv/kernel/entry.S
@@ -1474,7 +1474,7 @@ sys_call_table:
.long sys_mknodat
.long sys_fchownat
.long sys_futimesat
- .long sys_newfstatat /* 300 */
+ .long sys_fstatat64 /* 300 */
.long sys_unlinkat
.long sys_renameat
.long sys_linkat
diff --git a/arch/frv/kernel/frv_ksyms.c b/arch/frv/kernel/frv_ksyms.c
index 0f273a7aca5..dee637fffda 100644
--- a/arch/frv/kernel/frv_ksyms.c
+++ b/arch/frv/kernel/frv_ksyms.c
@@ -26,16 +26,6 @@ extern long __memset_user(void *dst, const void *src, size_t count);
EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(iounmap);
-EXPORT_SYMBOL(strnlen);
-EXPORT_SYMBOL(strrchr);
-EXPORT_SYMBOL(strstr);
-EXPORT_SYMBOL(strchr);
-EXPORT_SYMBOL(strcat);
-EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(strcmp);
-EXPORT_SYMBOL(strncmp);
-EXPORT_SYMBOL(strncpy);
-
EXPORT_SYMBOL(ip_fast_csum);
#if 0
@@ -44,8 +34,6 @@ EXPORT_SYMBOL(local_bh_count);
#endif
EXPORT_SYMBOL(kernel_thread);
-EXPORT_SYMBOL(enable_irq);
-EXPORT_SYMBOL(disable_irq);
EXPORT_SYMBOL(__res_bus_clock_speed_HZ);
EXPORT_SYMBOL(__page_offset);
EXPORT_SYMBOL(__memcpy_user);
@@ -62,18 +50,12 @@ EXPORT_SYMBOL(memory_end);
EXPORT_SYMBOL(__debug_bug_trap);
-/* Networking helper routines. */
-EXPORT_SYMBOL(csum_partial_copy);
-
/* The following are special because they're not called
explicitly (the C compiler generates them). Fortunately,
their interface isn't gonna change any time soon now, so
it's OK to leave it out of version control. */
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(memcmp);
-EXPORT_SYMBOL(memscan);
-EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(__outsl_ns);
EXPORT_SYMBOL(__insl_ns);
diff --git a/arch/frv/kernel/irq-routing.c b/arch/frv/kernel/irq-routing.c
index d4776d1f4e8..b90b70a761d 100644
--- a/arch/frv/kernel/irq-routing.c
+++ b/arch/frv/kernel/irq-routing.c
@@ -112,7 +112,7 @@ struct irq_source frv_cpuuart[2] = {
#define __CPUUART(X, A) \
[X] = { \
.muxname = "uart", \
- .muxdata = (volatile void __iomem *) A, \
+ .muxdata = (volatile void __iomem *)(unsigned long)A,\
.irqmask = 1 << IRQ_CPU_UART##X, \
.doirq = frv_cpuuart_doirq, \
}
@@ -136,7 +136,7 @@ struct irq_source frv_cpudma[8] = {
#define __CPUDMA(X, A) \
[X] = { \
.muxname = "dma", \
- .muxdata = (volatile void __iomem *) A, \
+ .muxdata = (volatile void __iomem *)(unsigned long)A,\
.irqmask = 1 << IRQ_CPU_DMA##X, \
.doirq = frv_cpudma_doirq, \
}
@@ -164,7 +164,7 @@ struct irq_source frv_cputimer[3] = {
#define __CPUTIMER(X) \
[X] = { \
.muxname = "timer", \
- .muxdata = 0, \
+ .muxdata = NULL, \
.irqmask = 1 << IRQ_CPU_TIMER##X, \
.doirq = frv_cputimer_doirq, \
}
@@ -187,7 +187,7 @@ struct irq_source frv_cpuexternal[8] = {
#define __CPUEXTERNAL(X) \
[X] = { \
.muxname = "ext", \
- .muxdata = 0, \
+ .muxdata = NULL, \
.irqmask = 1 << IRQ_CPU_EXTERNAL##X, \
.doirq = frv_cpuexternal_doirq, \
}
diff --git a/arch/frv/kernel/irq.c b/arch/frv/kernel/irq.c
index 11fa326a8f6..8b112b36191 100644
--- a/arch/frv/kernel/irq.c
+++ b/arch/frv/kernel/irq.c
@@ -625,7 +625,7 @@ static struct proc_dir_entry * irq_dir [NR_IRQS];
#define HEX_DIGITS 8
-static unsigned int parse_hex_value (const char *buffer,
+static unsigned int parse_hex_value (const char __user *buffer,
unsigned long count, unsigned long *ret)
{
unsigned char hexnum [HEX_DIGITS];
@@ -672,7 +672,7 @@ static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
return sprintf (page, "%08lx\n", *mask);
}
-static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
+static int prof_cpu_mask_write_proc (struct file *file, const char __user *buffer,
unsigned long count, void *data)
{
unsigned long *mask = (unsigned long *) data, full_count = count, err;
@@ -711,7 +711,7 @@ void init_irq_proc (void)
int i;
/* create /proc/irq */
- root_irq_dir = proc_mkdir("irq", 0);
+ root_irq_dir = proc_mkdir("irq", NULL);
/* create /proc/irq/prof_cpu_mask */
entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
diff --git a/arch/frv/kernel/pm.c b/arch/frv/kernel/pm.c
index f0b8fff3e73..43ce28a13a5 100644
--- a/arch/frv/kernel/pm.c
+++ b/arch/frv/kernel/pm.c
@@ -137,7 +137,7 @@ unsigned long sleep_phys_sp(void *sp)
#define CTL_PM_P0 4
#define CTL_PM_CM 5
-static int user_atoi(char *ubuf, size_t len)
+static int user_atoi(char __user *ubuf, size_t len)
{
char buf[16];
unsigned long ret;
@@ -159,7 +159,7 @@ static int user_atoi(char *ubuf, size_t len)
* Send us to sleep.
*/
static int sysctl_pm_do_suspend(ctl_table *ctl, int write, struct file *filp,
- void *buffer, size_t *lenp, loff_t *fpos)
+ void __user *buffer, size_t *lenp, loff_t *fpos)
{
int retval, mode;
@@ -215,7 +215,7 @@ static int try_set_cmode(int new_cmode)
static int cmode_procctl(ctl_table *ctl, int write, struct file *filp,
- void *buffer, size_t *lenp, loff_t *fpos)
+ void __user *buffer, size_t *lenp, loff_t *fpos)
{
int new_cmode;
@@ -227,9 +227,9 @@ static int cmode_procctl(ctl_table *ctl, int write, struct file *filp,
return try_set_cmode(new_cmode)?:*lenp;
}
-static int cmode_sysctl(ctl_table *table, int *name, int nlen,
- void *oldval, size_t *oldlenp,
- void *newval, size_t newlen, void **context)
+static int cmode_sysctl(ctl_table *table, int __user *name, int nlen,
+ void __user *oldval, size_t __user *oldlenp,
+ void __user *newval, size_t newlen, void **context)
{
if (oldval && oldlenp) {
size_t oldlen;
@@ -240,7 +240,7 @@ static int cmode_sysctl(ctl_table *table, int *name, int nlen,
if (oldlen != sizeof(int))
return -EINVAL;
- if (put_user(clock_cmode_current, (unsigned int *)oldval) ||
+ if (put_user(clock_cmode_current, (unsigned __user *)oldval) ||
put_user(sizeof(int), oldlenp))
return -EFAULT;
}
@@ -250,7 +250,7 @@ static int cmode_sysctl(ctl_table *table, int *name, int nlen,
if (newlen != sizeof(int))
return -EINVAL;
- if (get_user(new_cmode, (int *)newval))
+ if (get_user(new_cmode, (int __user *)newval))
return -EFAULT;
return try_set_cmode(new_cmode)?:1;
@@ -318,7 +318,7 @@ static int try_set_cm(int new_cm)
}
static int p0_procctl(ctl_table *ctl, int write, struct file *filp,
- void *buffer, size_t *lenp, loff_t *fpos)
+ void __user *buffer, size_t *lenp, loff_t *fpos)
{
int new_p0;
@@ -330,9 +330,9 @@ static int p0_procctl(ctl_table *ctl, int write, struct file *filp,
return try_set_p0(new_p0)?:*lenp;
}
-static int p0_sysctl(ctl_table *table, int *name, int nlen,
- void *oldval, size_t *oldlenp,
- void *newval, size_t newlen, void **context)
+static int p0_sysctl(ctl_table *table, int __user *name, int nlen,
+ void __user *oldval, size_t __user *oldlenp,
+ void __user *newval, size_t newlen, void **context)
{
if (oldval && oldlenp) {
size_t oldlen;
@@ -343,7 +343,7 @@ static int p0_sysctl(ctl_table *table, int *name, int nlen,
if (oldlen != sizeof(int))
return -EINVAL;
- if (put_user(clock_p0_current, (unsigned int *)oldval) ||
+ if (put_user(clock_p0_current, (unsigned __user *)oldval) ||
put_user(sizeof(int), oldlenp))
return -EFAULT;
}
@@ -353,7 +353,7 @@ static int p0_sysctl(ctl_table *table, int *name, int nlen,
if (newlen != sizeof(int))
return -EINVAL;
- if (get_user(new_p0, (int *)newval))
+ if (get_user(new_p0, (int __user *)newval))
return -EFAULT;
return try_set_p0(new_p0)?:1;
@@ -362,7 +362,7 @@ static int p0_sysctl(ctl_table *table, int *name, int nlen,
}
static int cm_procctl(ctl_table *ctl, int write, struct file *filp,
- void *buffer, size_t *lenp, loff_t *fpos)
+ void __user *buffer, size_t *lenp, loff_t *fpos)
{
int new_cm;
@@ -374,9 +374,9 @@ static int cm_procctl(ctl_table *ctl, int write, struct file *filp,
return try_set_cm(new_cm)?:*lenp;
}
-static int cm_sysctl(ctl_table *table, int *name, int nlen,
- void *oldval, size_t *oldlenp,
- void *newval, size_t newlen, void **context)
+static int cm_sysctl(ctl_table *table, int __user *name, int nlen,
+ void __user *oldval, size_t __user *oldlenp,
+ void __user *newval, size_t newlen, void **context)
{
if (oldval && oldlenp) {
size_t oldlen;
@@ -387,7 +387,7 @@ static int cm_sysctl(ctl_table *table, int *name, int nlen,
if (oldlen != sizeof(int))
return -EINVAL;
- if (put_user(clock_cm_current, (unsigned int *)oldval) ||
+ if (put_user(clock_cm_current, (unsigned __user *)oldval) ||
put_user(sizeof(int), oldlenp))
return -EFAULT;
}
@@ -397,7 +397,7 @@ static int cm_sysctl(ctl_table *table, int *name, int nlen,
if (newlen != sizeof(int))
return -EINVAL;
- if (get_user(new_cm, (int *)newval))
+ if (get_user(new_cm, (int __user *)newval))
return -EFAULT;
return try_set_cm(new_cm)?:1;
diff --git a/arch/frv/kernel/process.c b/arch/frv/kernel/process.c
index 0fff8a61ef2..489e6c489cb 100644
--- a/arch/frv/kernel/process.c
+++ b/arch/frv/kernel/process.c
@@ -246,7 +246,7 @@ int copy_thread(int nr, unsigned long clone_flags,
/*
* sys_execve() executes a new program.
*/
-asmlinkage int sys_execve(char *name, char **argv, char **envp)
+asmlinkage int sys_execve(char __user *name, char __user * __user *argv, char __user * __user *envp)
{
int error;
char * filename;
diff --git a/arch/frv/kernel/setup.c b/arch/frv/kernel/setup.c
index 5908deae960..1f7d65f29e7 100644
--- a/arch/frv/kernel/setup.c
+++ b/arch/frv/kernel/setup.c
@@ -814,7 +814,7 @@ void __init setup_arch(char **cmdline_p)
* - by now the stack is part of the init task */
printk("Memory %08lx-%08lx\n", memory_start, memory_end);
- if (memory_start == memory_end) BUG();
+ BUG_ON(memory_start == memory_end);
init_mm.start_code = (unsigned long) &_stext;
init_mm.end_code = (unsigned long) &_etext;
diff --git a/arch/frv/kernel/signal.c b/arch/frv/kernel/signal.c
index 679c1d5cc95..b8a5882b862 100644
--- a/arch/frv/kernel/signal.c
+++ b/arch/frv/kernel/signal.c
@@ -98,7 +98,7 @@ int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
struct sigframe
{
- void (*pretcode)(void);
+ __sigrestore_t pretcode;
int sig;
struct sigcontext sc;
unsigned long extramask[_NSIG_WORDS-1];
@@ -107,10 +107,10 @@ struct sigframe
struct rt_sigframe
{
- void (*pretcode)(void);
+ __sigrestore_t pretcode;
int sig;
- struct siginfo *pinfo;
- void *puc;
+ struct siginfo __user *pinfo;
+ void __user *puc;
struct siginfo info;
struct ucontext uc;
uint32_t retcode[2];
@@ -233,7 +233,7 @@ static inline void __user *get_sigframe(struct k_sigaction *ka,
/* This is the X/Open sanctioned signal stack switching. */
if (ka->sa.sa_flags & SA_ONSTACK) {
- if (! on_sig_stack(sp))
+ if (! sas_ss_flags(sp))
sp = current->sas_ss_sp + current->sas_ss_size;
}
@@ -284,7 +284,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set)
* setlos #__NR_sigreturn,gr7
* tira gr0,0
*/
- if (__put_user((void (*)(void))frame->retcode, &frame->pretcode) ||
+ if (__put_user((__sigrestore_t)frame->retcode, &frame->pretcode) ||
__put_user(0x8efc0000|__NR_sigreturn, &frame->retcode[0]) ||
__put_user(0xc0700000, &frame->retcode[1]))
goto give_sigsegv;
@@ -300,7 +300,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set)
if (get_personality & FDPIC_FUNCPTRS) {
struct fdpic_func_descriptor __user *funcptr =
- (struct fdpic_func_descriptor *) ka->sa.sa_handler;
+ (struct fdpic_func_descriptor __user *) ka->sa.sa_handler;
__get_user(__frame->pc, &funcptr->text);
__get_user(__frame->gr15, &funcptr->GOT);
} else {
@@ -359,8 +359,8 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
/* Create the ucontext. */
if (__put_user(0, &frame->uc.uc_flags) ||
- __put_user(0, &frame->uc.uc_link) ||
- __put_user((void*)current->sas_ss_sp, &frame->uc.uc_stack.ss_sp) ||
+ __put_user(NULL, &frame->uc.uc_link) ||
+ __put_user((void __user *)current->sas_ss_sp, &frame->uc.uc_stack.ss_sp) ||
__put_user(sas_ss_flags(__frame->sp), &frame->uc.uc_stack.ss_flags) ||
__put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size))
goto give_sigsegv;
@@ -382,7 +382,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
* setlos #__NR_sigreturn,gr7
* tira gr0,0
*/
- if (__put_user((void (*)(void))frame->retcode, &frame->pretcode) ||
+ if (__put_user((__sigrestore_t)frame->retcode, &frame->pretcode) ||
__put_user(0x8efc0000|__NR_rt_sigreturn, &frame->retcode[0]) ||
__put_user(0xc0700000, &frame->retcode[1]))
goto give_sigsegv;
@@ -398,7 +398,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
__frame->gr9 = (unsigned long) &frame->info;
if (get_personality & FDPIC_FUNCPTRS) {
- struct fdpic_func_descriptor *funcptr =
+ struct fdpic_func_descriptor __user *funcptr =
(struct fdpic_func_descriptor __user *) ka->sa.sa_handler;
__get_user(__frame->pc, &funcptr->text);
__get_user(__frame->gr15, &funcptr->GOT);
diff --git a/arch/frv/kernel/sys_frv.c b/arch/frv/kernel/sys_frv.c
index 931aa6d895e..c4d4348c9e8 100644
--- a/arch/frv/kernel/sys_frv.c
+++ b/arch/frv/kernel/sys_frv.c
@@ -32,7 +32,7 @@
* sys_pipe() is the normal C calling standard for creating
* a pipe. It's not the way unix traditionally does this, though.
*/
-asmlinkage long sys_pipe(unsigned long * fildes)
+asmlinkage long sys_pipe(unsigned long __user * fildes)
{
int fd[2];
int error;
diff --git a/arch/frv/kernel/sysctl.c b/arch/frv/kernel/sysctl.c
index 408b0f382b4..b908863d659 100644
--- a/arch/frv/kernel/sysctl.c
+++ b/arch/frv/kernel/sysctl.c
@@ -49,7 +49,7 @@ static void frv_change_dcache_mode(unsigned long newmode)
* handle requests to dynamically switch the write caching mode delivered by /proc
*/
static int procctl_frv_cachemode(ctl_table *table, int write, struct file *filp,
- void *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
unsigned long hsr0;
char buff[8];
@@ -123,7 +123,7 @@ static int procctl_frv_cachemode(ctl_table *table, int write, struct file *filp,
*/
#ifdef CONFIG_MMU
static int procctl_frv_pin_cxnr(ctl_table *table, int write, struct file *filp,
- void *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
pid_t pid;
char buff[16], *p;
diff --git a/arch/frv/kernel/uaccess.c b/arch/frv/kernel/uaccess.c
index 9b751c0f0e8..9fb771a20df 100644
--- a/arch/frv/kernel/uaccess.c
+++ b/arch/frv/kernel/uaccess.c
@@ -17,7 +17,7 @@
/*
* copy a null terminated string from userspace
*/
-long strncpy_from_user(char *dst, const char *src, long count)
+long strncpy_from_user(char *dst, const char __user *src, long count)
{
unsigned long max;
char *p, ch;
@@ -70,9 +70,9 @@ EXPORT_SYMBOL(strncpy_from_user);
*
* Return 0 on exception, a value greater than N if too long
*/
-long strnlen_user(const char *src, long count)
+long strnlen_user(const char __user *src, long count)
{
- const char *p;
+ const char __user *p;
long err = 0;
char ch;
diff --git a/arch/frv/mb93090-mb00/pci-irq.c b/arch/frv/mb93090-mb00/pci-irq.c
index c4a1144c98b..45ae39d84b6 100644
--- a/arch/frv/mb93090-mb00/pci-irq.c
+++ b/arch/frv/mb93090-mb00/pci-irq.c
@@ -32,11 +32,11 @@
*/
static const uint8_t __initdata pci_bus0_irq_routing[32][4] = {
- [0 ] { IRQ_FPGA_MB86943_PCI_INTA },
- [16] { IRQ_FPGA_RTL8029_INTA },
- [17] { IRQ_FPGA_PCI_INTC, IRQ_FPGA_PCI_INTD, IRQ_FPGA_PCI_INTA, IRQ_FPGA_PCI_INTB },
- [18] { IRQ_FPGA_PCI_INTB, IRQ_FPGA_PCI_INTC, IRQ_FPGA_PCI_INTD, IRQ_FPGA_PCI_INTA },
- [19] { IRQ_FPGA_PCI_INTA, IRQ_FPGA_PCI_INTB, IRQ_FPGA_PCI_INTC, IRQ_FPGA_PCI_INTD },
+ [0 ] = { IRQ_FPGA_MB86943_PCI_INTA },
+ [16] = { IRQ_FPGA_RTL8029_INTA },
+ [17] = { IRQ_FPGA_PCI_INTC, IRQ_FPGA_PCI_INTD, IRQ_FPGA_PCI_INTA, IRQ_FPGA_PCI_INTB },
+ [18] = { IRQ_FPGA_PCI_INTB, IRQ_FPGA_PCI_INTC, IRQ_FPGA_PCI_INTD, IRQ_FPGA_PCI_INTA },
+ [19] = { IRQ_FPGA_PCI_INTA, IRQ_FPGA_PCI_INTB, IRQ_FPGA_PCI_INTC, IRQ_FPGA_PCI_INTD },
};
void __init pcibios_irq_init(void)
diff --git a/arch/frv/mm/kmap.c b/arch/frv/mm/kmap.c
index c54f18e65ea..40b62c5c295 100644
--- a/arch/frv/mm/kmap.c
+++ b/arch/frv/mm/kmap.c
@@ -31,15 +31,15 @@
* Map some physical address range into the kernel address space.
*/
-void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
+void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
{
- return (void *)physaddr;
+ return (void __iomem *)physaddr;
}
/*
* Unmap a ioremap()ed region again
*/
-void iounmap(void *addr)
+void iounmap(void volatile __iomem *addr)
{
}
diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c
index f13d5e82d4b..7787f70a05b 100644
--- a/arch/h8300/kernel/signal.c
+++ b/arch/h8300/kernel/signal.c
@@ -307,7 +307,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
/* This is the X/Open sanctioned signal stack switching. */
if (ka->sa.sa_flags & SA_ONSTACK) {
- if (!on_sig_stack(usp))
+ if (!sas_ss_flags(usp))
usp = current->sas_ss_sp + current->sas_ss_size;
}
return (void *)((usp - frame_size) & -8UL);
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index 8dfa3054f10..1596101cfaf 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -173,6 +173,12 @@ config ACPI_SRAT
bool
default y
depends on NUMA && (X86_SUMMIT || X86_GENERICARCH)
+ select ACPI_NUMA
+
+config HAVE_ARCH_PARSE_SRAT
+ bool
+ default y
+ depends on ACPI_SRAT
config X86_SUMMIT_NUMA
bool
@@ -224,7 +230,6 @@ config NR_CPUS
config SCHED_SMT
bool "SMT (Hyperthreading) scheduler support"
depends on SMP
- default off
help
SMT scheduler support improves the CPU scheduler's decision making
when dealing with Intel Pentium 4 chips with HyperThreading at a
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c
index fbe93084244..97ca17189af 100644
--- a/arch/i386/kernel/acpi/boot.c
+++ b/arch/i386/kernel/acpi/boot.c
@@ -217,7 +217,7 @@ static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
{
struct acpi_table_madt *madt = NULL;
- if (!phys_addr || !size)
+ if (!phys_addr || !size || !cpu_has_apic)
return -EINVAL;
madt = (struct acpi_table_madt *)__acpi_map_table(phys_addr, size);
@@ -623,9 +623,9 @@ extern u32 pmtmr_ioport;
static int __init acpi_parse_fadt(unsigned long phys, unsigned long size)
{
- struct fadt_descriptor_rev2 *fadt = NULL;
+ struct fadt_descriptor *fadt = NULL;
- fadt = (struct fadt_descriptor_rev2 *)__acpi_map_table(phys, size);
+ fadt = (struct fadt_descriptor *)__acpi_map_table(phys, size);
if (!fadt) {
printk(KERN_WARNING PREFIX "Unable to map FADT\n");
return 0;
@@ -756,7 +756,7 @@ static int __init acpi_parse_madt_ioapic_entries(void)
return -ENODEV;
}
- if (!cpu_has_apic)
+ if (!cpu_has_apic)
return -ENODEV;
/*
diff --git a/arch/i386/kernel/acpi/processor.c b/arch/i386/kernel/acpi/processor.c
index 9f4cc02717e..b54fded4983 100644
--- a/arch/i386/kernel/acpi/processor.c
+++ b/arch/i386/kernel/acpi/processor.c
@@ -47,7 +47,7 @@ static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
buf[2] = ACPI_PDC_C_CAPABILITY_SMP;
if (cpu_has(c, X86_FEATURE_EST))
- buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
+ buf[2] |= ACPI_PDC_EST_CAPABILITY_SWSMP;
obj->type = ACPI_TYPE_BUFFER;
obj->buffer.length = 12;
diff --git a/arch/i386/kernel/acpi/sleep.c b/arch/i386/kernel/acpi/sleep.c
index 1cb2b186a3a..4ee83577bf6 100644
--- a/arch/i386/kernel/acpi/sleep.c
+++ b/arch/i386/kernel/acpi/sleep.c
@@ -8,30 +8,17 @@
#include <linux/acpi.h>
#include <linux/bootmem.h>
#include <linux/dmi.h>
+#include <linux/cpumask.h>
+
#include <asm/smp.h>
-#include <asm/tlbflush.h>
/* address in low memory of the wakeup routine. */
unsigned long acpi_wakeup_address = 0;
unsigned long acpi_video_flags;
extern char wakeup_start, wakeup_end;
-extern void zap_low_mappings(void);
-
extern unsigned long FASTCALL(acpi_copy_wakeup_routine(unsigned long));
-static void init_low_mapping(pgd_t * pgd, int pgd_limit)
-{
- int pgd_ofs = 0;
-
- while ((pgd_ofs < pgd_limit)
- && (pgd_ofs + USER_PTRS_PER_PGD < PTRS_PER_PGD)) {
- set_pgd(pgd, *(pgd + USER_PTRS_PER_PGD));
- pgd_ofs++, pgd++;
- }
- flush_tlb_all();
-}
-
/**
* acpi_save_state_mem - save kernel state
*
@@ -42,7 +29,6 @@ int acpi_save_state_mem(void)
{
if (!acpi_wakeup_address)
return 1;
- init_low_mapping(swapper_pg_dir, USER_PTRS_PER_PGD);
memcpy((void *)acpi_wakeup_address, &wakeup_start,
&wakeup_end - &wakeup_start);
acpi_copy_wakeup_routine(acpi_wakeup_address);
@@ -55,7 +41,6 @@ int acpi_save_state_mem(void)
*/
void acpi_restore_state_mem(void)
{
- zap_low_mappings();
}
/**
diff --git a/arch/i386/kernel/acpi/wakeup.S b/arch/i386/kernel/acpi/wakeup.S
index 7c74fe0dc93..9f408eee4e6 100644
--- a/arch/i386/kernel/acpi/wakeup.S
+++ b/arch/i386/kernel/acpi/wakeup.S
@@ -56,7 +56,7 @@ wakeup_code:
1:
# set up page table
- movl $swapper_pg_dir-__PAGE_OFFSET, %eax
+ movl $swsusp_pg_dir-__PAGE_OFFSET, %eax
movl %eax, %cr3
testl $1, real_efer_save_restore - wakeup_code
@@ -265,11 +265,6 @@ ENTRY(acpi_copy_wakeup_routine)
movl $0x12345678, saved_magic
ret
-.data
-ALIGN
-ENTRY(saved_magic) .long 0
-ENTRY(saved_eip) .long 0
-
save_registers:
leal 4(%esp), %eax
movl %eax, saved_context_esp
@@ -304,7 +299,11 @@ ret_point:
call restore_processor_state
ret
+.data
ALIGN
+ENTRY(saved_magic) .long 0
+ENTRY(saved_eip) .long 0
+
# saved registers
saved_gdt: .long 0,0
saved_idt: .long 0,0
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index 3d4b2f3d116..5ab59c12335 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -62,7 +62,7 @@ int apic_verbosity;
static void apic_pm_activate(void);
-int modern_apic(void)
+static int modern_apic(void)
{
unsigned int lvr, version;
/* AMD systems use old APIC versions, so check the CPU */
@@ -113,7 +113,7 @@ void __init apic_intr_init(void)
}
/* Using APIC to generate smp_local_timer_interrupt? */
-int using_apic_timer = 0;
+int using_apic_timer __read_mostly = 0;
static int enabled_via_apicbase;
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c
index df0e1745f18..9e819eb6822 100644
--- a/arch/i386/kernel/apm.c
+++ b/arch/i386/kernel/apm.c
@@ -374,14 +374,14 @@ static struct {
unsigned short segment;
} apm_bios_entry;
static int clock_slowed;
-static int idle_threshold = DEFAULT_IDLE_THRESHOLD;
-static int idle_period = DEFAULT_IDLE_PERIOD;
+static int idle_threshold __read_mostly = DEFAULT_IDLE_THRESHOLD;
+static int idle_period __read_mostly = DEFAULT_IDLE_PERIOD;
static int set_pm_idle;
static int suspends_pending;
static int standbys_pending;
static int ignore_sys_suspend;
static int ignore_normal_resume;
-static int bounce_interval = DEFAULT_BOUNCE_INTERVAL;
+static int bounce_interval __read_mostly = DEFAULT_BOUNCE_INTERVAL;
#ifdef CONFIG_APM_RTC_IS_GMT
# define clock_cmos_diff 0
@@ -390,8 +390,8 @@ static int bounce_interval = DEFAULT_BOUNCE_INTERVAL;
static long clock_cmos_diff;
static int got_clock_diff;
#endif
-static int debug;
-static int smp;
+static int debug __read_mostly;
+static int smp __read_mostly;
static int apm_disabled = -1;
#ifdef CONFIG_SMP
static int power_off;
@@ -403,8 +403,8 @@ static int realmode_power_off = 1;
#else
static int realmode_power_off;
#endif
-static int exit_kapmd;
-static int kapmd_running;
+static int exit_kapmd __read_mostly;
+static int kapmd_running __read_mostly;
#ifdef CONFIG_APM_ALLOW_INTS
static int allow_ints = 1;
#else
@@ -416,15 +416,15 @@ static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue);
static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue);
static struct apm_user * user_list;
static DEFINE_SPINLOCK(user_list_lock);
-static struct desc_struct bad_bios_desc = { 0, 0x00409200 };
+static const struct desc_struct bad_bios_desc = { 0, 0x00409200 };
-static char driver_version[] = "1.16ac"; /* no spaces */
+static const char driver_version[] = "1.16ac"; /* no spaces */
/*
* APM event names taken from the APM 1.2 specification. These are
* the message codes that the BIOS uses to tell us about events
*/
-static char * apm_event_name[] = {
+static const char * const apm_event_name[] = {
"system standby",
"system suspend",
"normal resume",
@@ -616,7 +616,7 @@ static u8 apm_bios_call(u32 func, u32 ebx_in, u32 ecx_in,
* @ecx_in: ECX register value for BIOS call
* @eax: EAX register on return from the BIOS call
*
- * Make a BIOS call that does only returns one value, or just status.
+ * Make a BIOS call that returns one value only, or just status.
* If there is an error, then the error code is returned in AH
* (bits 8-15 of eax) and this function returns non-zero. This is
* used for simpler BIOS operations. This call may hold interrupts
@@ -822,7 +822,7 @@ static void apm_do_busy(void)
#define IDLE_CALC_LIMIT (HZ * 100)
#define IDLE_LEAKY_MAX 16
-static void (*original_pm_idle)(void);
+static void (*original_pm_idle)(void) __read_mostly;
/**
* apm_cpu_idle - cpu idling for APM capable Linux
@@ -1063,7 +1063,8 @@ static int apm_engage_power_management(u_short device, int enable)
static int apm_console_blank(int blank)
{
- int error, i;
+ int error = APM_NOT_ENGAGED; /* silence gcc */
+ int i;
u_short state;
static const u_short dev[3] = { 0x100, 0x1FF, 0x101 };
@@ -1104,7 +1105,8 @@ static int queue_empty(struct apm_user *as)
static apm_event_t get_queued_event(struct apm_user *as)
{
- as->event_tail = (as->event_tail + 1) % APM_MAX_EVENTS;
+ if (++as->event_tail >= APM_MAX_EVENTS)
+ as->event_tail = 0;
return as->events[as->event_tail];
}
@@ -1118,13 +1120,16 @@ static void queue_event(apm_event_t event, struct apm_user *sender)
for (as = user_list; as != NULL; as = as->next) {
if ((as == sender) || (!as->reader))
continue;
- as->event_head = (as->event_head + 1) % APM_MAX_EVENTS;
+ if (++as->event_head >= APM_MAX_EVENTS)
+ as->event_head = 0;
+
if (as->event_head == as->event_tail) {
static int notified;
if (notified++ == 0)
printk(KERN_ERR "apm: an event queue overflowed\n");
- as->event_tail = (as->event_tail + 1) % APM_MAX_EVENTS;
+ if (++as->event_tail >= APM_MAX_EVENTS)
+ as->event_tail = 0;
}
as->events[as->event_head] = event;
if ((!as->suser) || (!as->writer))
@@ -1282,7 +1287,7 @@ static void standby(void)
static apm_event_t get_event(void)
{
int error;
- apm_event_t event;
+ apm_event_t event = APM_NO_EVENTS; /* silence gcc */
apm_eventinfo_t info;
static int notified;
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index a06a49075f1..44f2c5f2dda 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -11,6 +11,8 @@
#include <asm/msr.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
+#include <asm/mtrr.h>
+#include <asm/mce.h>
#ifdef CONFIG_X86_LOCAL_APIC
#include <asm/mpspec.h>
#include <asm/apic.h>
diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
index 1a7bdcef192..05668e3598c 100644
--- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -48,12 +48,13 @@ MODULE_LICENSE("GPL");
struct cpufreq_acpi_io {
- struct acpi_processor_performance acpi_data;
+ struct acpi_processor_performance *acpi_data;
struct cpufreq_frequency_table *freq_table;
unsigned int resume;
};
static struct cpufreq_acpi_io *acpi_io_data[NR_CPUS];
+static struct acpi_processor_performance *acpi_perf_data[NR_CPUS];
static struct cpufreq_driver acpi_cpufreq_driver;
@@ -104,64 +105,43 @@ acpi_processor_set_performance (
{
u16 port = 0;
u8 bit_width = 0;
- int ret;
- u32 value = 0;
int i = 0;
- struct cpufreq_freqs cpufreq_freqs;
- cpumask_t saved_mask;
+ int ret = 0;
+ u32 value = 0;
int retval;
+ struct acpi_processor_performance *perf;
dprintk("acpi_processor_set_performance\n");
- /*
- * TBD: Use something other than set_cpus_allowed.
- * As set_cpus_allowed is a bit racy,
- * with any other set_cpus_allowed for this process.
- */
- saved_mask = current->cpus_allowed;
- set_cpus_allowed(current, cpumask_of_cpu(cpu));
- if (smp_processor_id() != cpu) {
- return (-EAGAIN);
- }
-
- if (state == data->acpi_data.state) {
+ retval = 0;
+ perf = data->acpi_data;
+ if (state == perf->state) {
if (unlikely(data->resume)) {
dprintk("Called after resume, resetting to P%d\n", state);
data->resume = 0;
} else {
dprintk("Already at target state (P%d)\n", state);
- retval = 0;
- goto migrate_end;
+ return (retval);
}
}
- dprintk("Transitioning from P%d to P%d\n",
- data->acpi_data.state, state);
-
- /* cpufreq frequency struct */
- cpufreq_freqs.cpu = cpu;
- cpufreq_freqs.old = data->freq_table[data->acpi_data.state].frequency;
- cpufreq_freqs.new = data->freq_table[state].frequency;
-
- /* notify cpufreq */
- cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE);
+ dprintk("Transitioning from P%d to P%d\n", perf->state, state);
/*
* First we write the target state's 'control' value to the
* control_register.
*/
- port = data->acpi_data.control_register.address;
- bit_width = data->acpi_data.control_register.bit_width;
- value = (u32) data->acpi_data.states[state].control;
+ port = perf->control_register.address;
+ bit_width = perf->control_register.bit_width;
+ value = (u32) perf->states[state].control;
dprintk("Writing 0x%08x to port 0x%04x\n", value, port);
ret = acpi_processor_write_port(port, bit_width, value);
if (ret) {
dprintk("Invalid port width 0x%04x\n", bit_width);
- retval = ret;
- goto migrate_end;
+ return (ret);
}
/*
@@ -177,48 +157,35 @@ acpi_processor_set_performance (
* before giving up.
*/
- port = data->acpi_data.status_register.address;
- bit_width = data->acpi_data.status_register.bit_width;
+ port = perf->status_register.address;
+ bit_width = perf->status_register.bit_width;
dprintk("Looking for 0x%08x from port 0x%04x\n",
- (u32) data->acpi_data.states[state].status, port);
+ (u32) perf->states[state].status, port);
- for (i=0; i<100; i++) {
+ for (i = 0; i < 100; i++) {
ret = acpi_processor_read_port(port, bit_width, &value);
if (ret) {
dprintk("Invalid port width 0x%04x\n", bit_width);
- retval = ret;
- goto migrate_end;
+ return (ret);
}
- if (value == (u32) data->acpi_data.states[state].status)
+ if (value == (u32) perf->states[state].status)
break;
udelay(10);
}
} else {
- value = (u32) data->acpi_data.states[state].status;
+ value = (u32) perf->states[state].status;
}
- /* notify cpufreq */
- cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE);
-
- if (unlikely(value != (u32) data->acpi_data.states[state].status)) {
- unsigned int tmp = cpufreq_freqs.new;
- cpufreq_freqs.new = cpufreq_freqs.old;
- cpufreq_freqs.old = tmp;
- cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE);
- cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE);
+ if (unlikely(value != (u32) perf->states[state].status)) {
printk(KERN_WARNING "acpi-cpufreq: Transition failed\n");
retval = -ENODEV;
- goto migrate_end;
+ return (retval);
}
dprintk("Transition successful after %d microseconds\n", i * 10);
- data->acpi_data.state = state;
-
- retval = 0;
-migrate_end:
- set_cpus_allowed(current, saved_mask);
+ perf->state = state;
return (retval);
}
@@ -230,8 +197,17 @@ acpi_cpufreq_target (
unsigned int relation)
{
struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
+ struct acpi_processor_performance *perf;
+ struct cpufreq_freqs freqs;
+ cpumask_t online_policy_cpus;
+ cpumask_t saved_mask;
+ cpumask_t set_mask;
+ cpumask_t covered_cpus;
+ unsigned int cur_state = 0;
unsigned int next_state = 0;
unsigned int result = 0;
+ unsigned int j;
+ unsigned int tmp;
dprintk("acpi_cpufreq_setpolicy\n");
@@ -240,11 +216,95 @@ acpi_cpufreq_target (
target_freq,
relation,
&next_state);
- if (result)
+ if (unlikely(result))
return (result);
- result = acpi_processor_set_performance (data, policy->cpu, next_state);
+ perf = data->acpi_data;
+ cur_state = perf->state;
+ freqs.old = data->freq_table[cur_state].frequency;
+ freqs.new = data->freq_table[next_state].frequency;
+
+#ifdef CONFIG_HOTPLUG_CPU
+ /* cpufreq holds the hotplug lock, so we are safe from here on */
+ cpus_and(online_policy_cpus, cpu_online_map, policy->cpus);
+#else
+ online_policy_cpus = policy->cpus;
+#endif
+
+ for_each_cpu_mask(j, online_policy_cpus) {
+ freqs.cpu = j;
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ }
+
+ /*
+ * We need to call driver->target() on all or any CPU in
+ * policy->cpus, depending on policy->shared_type.
+ */
+ saved_mask = current->cpus_allowed;
+ cpus_clear(covered_cpus);
+ for_each_cpu_mask(j, online_policy_cpus) {
+ /*
+ * Support for SMP systems.
+ * Make sure we are running on CPU that wants to change freq
+ */
+ cpus_clear(set_mask);
+ if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
+ cpus_or(set_mask, set_mask, online_policy_cpus);
+ else
+ cpu_set(j, set_mask);
+
+ set_cpus_allowed(current, set_mask);
+ if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) {
+ dprintk("couldn't limit to CPUs in this domain\n");
+ result = -EAGAIN;
+ break;
+ }
+
+ result = acpi_processor_set_performance (data, j, next_state);
+ if (result) {
+ result = -EAGAIN;
+ break;
+ }
+
+ if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
+ break;
+
+ cpu_set(j, covered_cpus);
+ }
+
+ for_each_cpu_mask(j, online_policy_cpus) {
+ freqs.cpu = j;
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ }
+ if (unlikely(result)) {
+ /*
+ * We have failed halfway through the frequency change.
+ * We have sent callbacks to online_policy_cpus and
+ * acpi_processor_set_performance() has been called on
+ * coverd_cpus. Best effort undo..
+ */
+
+ if (!cpus_empty(covered_cpus)) {
+ for_each_cpu_mask(j, covered_cpus) {
+ policy->cpu = j;
+ acpi_processor_set_performance (data,
+ j,
+ cur_state);
+ }
+ }
+
+ tmp = freqs.new;
+ freqs.new = freqs.old;
+ freqs.old = tmp;
+ for_each_cpu_mask(j, online_policy_cpus) {
+ freqs.cpu = j;
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ }
+ }
+
+ set_cpus_allowed(current, saved_mask);
return (result);
}
@@ -270,30 +330,65 @@ acpi_cpufreq_guess_freq (
struct cpufreq_acpi_io *data,
unsigned int cpu)
{
+ struct acpi_processor_performance *perf = data->acpi_data;
+
if (cpu_khz) {
/* search the closest match to cpu_khz */
unsigned int i;
unsigned long freq;
- unsigned long freqn = data->acpi_data.states[0].core_frequency * 1000;
+ unsigned long freqn = perf->states[0].core_frequency * 1000;
- for (i=0; i < (data->acpi_data.state_count - 1); i++) {
+ for (i = 0; i < (perf->state_count - 1); i++) {
freq = freqn;
- freqn = data->acpi_data.states[i+1].core_frequency * 1000;
+ freqn = perf->states[i+1].core_frequency * 1000;
if ((2 * cpu_khz) > (freqn + freq)) {
- data->acpi_data.state = i;
+ perf->state = i;
return (freq);
}
}
- data->acpi_data.state = data->acpi_data.state_count - 1;
+ perf->state = perf->state_count - 1;
return (freqn);
- } else
+ } else {
/* assume CPU is at P0... */
- data->acpi_data.state = 0;
- return data->acpi_data.states[0].core_frequency * 1000;
-
+ perf->state = 0;
+ return perf->states[0].core_frequency * 1000;
+ }
}
+/*
+ * acpi_cpufreq_early_init - initialize ACPI P-States library
+ *
+ * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c)
+ * in order to determine correct frequency and voltage pairings. We can
+ * do _PDC and _PSD and find out the processor dependency for the
+ * actual init that will happen later...
+ */
+static int acpi_cpufreq_early_init_acpi(void)
+{
+ struct acpi_processor_performance *data;
+ unsigned int i, j;
+
+ dprintk("acpi_cpufreq_early_init\n");
+
+ for_each_cpu(i) {
+ data = kzalloc(sizeof(struct acpi_processor_performance),
+ GFP_KERNEL);
+ if (!data) {
+ for_each_cpu(j) {
+ kfree(acpi_perf_data[j]);
+ acpi_perf_data[j] = NULL;
+ }
+ return (-ENOMEM);
+ }
+ acpi_perf_data[i] = data;
+ }
+
+ /* Do initialization in ACPI core */
+ acpi_processor_preregister_performance(acpi_perf_data);
+ return 0;
+}
+
static int
acpi_cpufreq_cpu_init (
struct cpufreq_policy *policy)
@@ -303,41 +398,51 @@ acpi_cpufreq_cpu_init (
struct cpufreq_acpi_io *data;
unsigned int result = 0;
struct cpuinfo_x86 *c = &cpu_data[policy->cpu];
+ struct acpi_processor_performance *perf;
dprintk("acpi_cpufreq_cpu_init\n");
+ if (!acpi_perf_data[cpu])
+ return (-ENODEV);
+
data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
if (!data)
return (-ENOMEM);
+ data->acpi_data = acpi_perf_data[cpu];
acpi_io_data[cpu] = data;
- result = acpi_processor_register_performance(&data->acpi_data, cpu);
+ result = acpi_processor_register_performance(data->acpi_data, cpu);
if (result)
goto err_free;
+ perf = data->acpi_data;
+ policy->cpus = perf->shared_cpu_map;
+ policy->shared_type = perf->shared_type;
+
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
}
/* capability check */
- if (data->acpi_data.state_count <= 1) {
+ if (perf->state_count <= 1) {
dprintk("No P-States\n");
result = -ENODEV;
goto err_unreg;
}
- if ((data->acpi_data.control_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO) ||
- (data->acpi_data.status_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
+
+ if ((perf->control_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO) ||
+ (perf->status_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
dprintk("Unsupported address space [%d, %d]\n",
- (u32) (data->acpi_data.control_register.space_id),
- (u32) (data->acpi_data.status_register.space_id));
+ (u32) (perf->control_register.space_id),
+ (u32) (perf->status_register.space_id));
result = -ENODEV;
goto err_unreg;
}
/* alloc freq_table */
- data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * (data->acpi_data.state_count + 1), GFP_KERNEL);
+ data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * (perf->state_count + 1), GFP_KERNEL);
if (!data->freq_table) {
result = -ENOMEM;
goto err_unreg;
@@ -345,9 +450,9 @@ acpi_cpufreq_cpu_init (
/* detect transition latency */
policy->cpuinfo.transition_latency = 0;
- for (i=0; i<data->acpi_data.state_count; i++) {
- if ((data->acpi_data.states[i].transition_latency * 1000) > policy->cpuinfo.transition_latency)
- policy->cpuinfo.transition_latency = data->acpi_data.states[i].transition_latency * 1000;
+ for (i=0; i<perf->state_count; i++) {
+ if ((perf->states[i].transition_latency * 1000) > policy->cpuinfo.transition_latency)
+ policy->cpuinfo.transition_latency = perf->states[i].transition_latency * 1000;
}
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
@@ -355,11 +460,11 @@ acpi_cpufreq_cpu_init (
policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
/* table init */
- for (i=0; i<=data->acpi_data.state_count; i++)
+ for (i=0; i<=perf->state_count; i++)
{
data->freq_table[i].index = i;
- if (i<data->acpi_data.state_count)
- data->freq_table[i].frequency = data->acpi_data.states[i].core_frequency * 1000;
+ if (i<perf->state_count)
+ data->freq_table[i].frequency = perf->states[i].core_frequency * 1000;
else
data->freq_table[i].frequency = CPUFREQ_TABLE_END;
}
@@ -374,12 +479,12 @@ acpi_cpufreq_cpu_init (
printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management activated.\n",
cpu);
- for (i = 0; i < data->acpi_data.state_count; i++)
+ for (i = 0; i < perf->state_count; i++)
dprintk(" %cP%d: %d MHz, %d mW, %d uS\n",
- (i == data->acpi_data.state?'*':' '), i,
- (u32) data->acpi_data.states[i].core_frequency,
- (u32) data->acpi_data.states[i].power,
- (u32) data->acpi_data.states[i].transition_latency);
+ (i == perf->state?'*':' '), i,
+ (u32) perf->states[i].core_frequency,
+ (u32) perf->states[i].power,
+ (u32) perf->states[i].transition_latency);
cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
@@ -394,7 +499,7 @@ acpi_cpufreq_cpu_init (
err_freqfree:
kfree(data->freq_table);
err_unreg:
- acpi_processor_unregister_performance(&data->acpi_data, cpu);
+ acpi_processor_unregister_performance(perf, cpu);
err_free:
kfree(data);
acpi_io_data[cpu] = NULL;
@@ -415,7 +520,7 @@ acpi_cpufreq_cpu_exit (
if (data) {
cpufreq_frequency_table_put_attr(policy->cpu);
acpi_io_data[policy->cpu] = NULL;
- acpi_processor_unregister_performance(&data->acpi_data, policy->cpu);
+ acpi_processor_unregister_performance(data->acpi_data, policy->cpu);
kfree(data);
}
@@ -462,7 +567,10 @@ acpi_cpufreq_init (void)
dprintk("acpi_cpufreq_init\n");
- result = cpufreq_register_driver(&acpi_cpufreq_driver);
+ result = acpi_cpufreq_early_init_acpi();
+
+ if (!result)
+ result = cpufreq_register_driver(&acpi_cpufreq_driver);
return (result);
}
@@ -471,10 +579,15 @@ acpi_cpufreq_init (void)
static void __exit
acpi_cpufreq_exit (void)
{
+ unsigned int i;
dprintk("acpi_cpufreq_exit\n");
cpufreq_unregister_driver(&acpi_cpufreq_driver);
+ for_each_cpu(i) {
+ kfree(acpi_perf_data[i]);
+ acpi_perf_data[i] = NULL;
+ }
return;
}
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
index f1a82c5de1b..31c3a5baaa7 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -347,7 +347,36 @@ static unsigned int get_cur_freq(unsigned int cpu)
#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
-static struct acpi_processor_performance p;
+static struct acpi_processor_performance *acpi_perf_data[NR_CPUS];
+
+/*
+ * centrino_cpu_early_init_acpi - Do the preregistering with ACPI P-States
+ * library
+ *
+ * Before doing the actual init, we need to do _PSD related setup whenever
+ * supported by the BIOS. These are handled by this early_init routine.
+ */
+static int centrino_cpu_early_init_acpi(void)
+{
+ unsigned int i, j;
+ struct acpi_processor_performance *data;
+
+ for_each_cpu(i) {
+ data = kzalloc(sizeof(struct acpi_processor_performance),
+ GFP_KERNEL);
+ if (!data) {
+ for_each_cpu(j) {
+ kfree(acpi_perf_data[j]);
+ acpi_perf_data[j] = NULL;
+ }
+ return (-ENOMEM);
+ }
+ acpi_perf_data[i] = data;
+ }
+
+ acpi_processor_preregister_performance(acpi_perf_data);
+ return 0;
+}
/*
* centrino_cpu_init_acpi - register with ACPI P-States library
@@ -361,46 +390,51 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
unsigned long cur_freq;
int result = 0, i;
unsigned int cpu = policy->cpu;
+ struct acpi_processor_performance *p;
+
+ p = acpi_perf_data[cpu];
/* register with ACPI core */
- if (acpi_processor_register_performance(&p, cpu)) {
- dprintk("obtaining ACPI data failed\n");
+ if (acpi_processor_register_performance(p, cpu)) {
+ dprintk(PFX "obtaining ACPI data failed\n");
return -EIO;
}
+ policy->cpus = p->shared_cpu_map;
+ policy->shared_type = p->shared_type;
/* verify the acpi_data */
- if (p.state_count <= 1) {
+ if (p->state_count <= 1) {
dprintk("No P-States\n");
result = -ENODEV;
goto err_unreg;
}
- if ((p.control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
- (p.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
+ if ((p->control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
+ (p->status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
dprintk("Invalid control/status registers (%x - %x)\n",
- p.control_register.space_id, p.status_register.space_id);
+ p->control_register.space_id, p->status_register.space_id);
result = -EIO;
goto err_unreg;
}
- for (i=0; i<p.state_count; i++) {
- if (p.states[i].control != p.states[i].status) {
+ for (i=0; i<p->state_count; i++) {
+ if (p->states[i].control != p->states[i].status) {
dprintk("Different control (%llu) and status values (%llu)\n",
- p.states[i].control, p.states[i].status);
+ p->states[i].control, p->states[i].status);
result = -EINVAL;
goto err_unreg;
}
- if (!p.states[i].core_frequency) {
+ if (!p->states[i].core_frequency) {
dprintk("Zero core frequency for state %u\n", i);
result = -EINVAL;
goto err_unreg;
}
- if (p.states[i].core_frequency > p.states[0].core_frequency) {
+ if (p->states[i].core_frequency > p->states[0].core_frequency) {
dprintk("P%u has larger frequency (%llu) than P0 (%llu), skipping\n", i,
- p.states[i].core_frequency, p.states[0].core_frequency);
- p.states[i].core_frequency = 0;
+ p->states[i].core_frequency, p->states[0].core_frequency);
+ p->states[i].core_frequency = 0;
continue;
}
}
@@ -412,26 +446,26 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
}
centrino_model[cpu]->model_name=NULL;
- centrino_model[cpu]->max_freq = p.states[0].core_frequency * 1000;
+ centrino_model[cpu]->max_freq = p->states[0].core_frequency * 1000;
centrino_model[cpu]->op_points = kmalloc(sizeof(struct cpufreq_frequency_table) *
- (p.state_count + 1), GFP_KERNEL);
+ (p->state_count + 1), GFP_KERNEL);
if (!centrino_model[cpu]->op_points) {
result = -ENOMEM;
goto err_kfree;
}
- for (i=0; i<p.state_count; i++) {
- centrino_model[cpu]->op_points[i].index = p.states[i].control;
- centrino_model[cpu]->op_points[i].frequency = p.states[i].core_frequency * 1000;
+ for (i=0; i<p->state_count; i++) {
+ centrino_model[cpu]->op_points[i].index = p->states[i].control;
+ centrino_model[cpu]->op_points[i].frequency = p->states[i].core_frequency * 1000;
dprintk("adding state %i with frequency %u and control value %04x\n",
i, centrino_model[cpu]->op_points[i].frequency, centrino_model[cpu]->op_points[i].index);
}
- centrino_model[cpu]->op_points[p.state_count].frequency = CPUFREQ_TABLE_END;
+ centrino_model[cpu]->op_points[p->state_count].frequency = CPUFREQ_TABLE_END;
cur_freq = get_cur_freq(cpu);
- for (i=0; i<p.state_count; i++) {
- if (!p.states[i].core_frequency) {
+ for (i=0; i<p->state_count; i++) {
+ if (!p->states[i].core_frequency) {
dprintk("skipping state %u\n", i);
centrino_model[cpu]->op_points[i].frequency = CPUFREQ_ENTRY_INVALID;
continue;
@@ -447,7 +481,7 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
}
if (cur_freq == centrino_model[cpu]->op_points[i].frequency)
- p.state = i;
+ p->state = i;
}
/* notify BIOS that we exist */
@@ -460,12 +494,13 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
err_kfree:
kfree(centrino_model[cpu]);
err_unreg:
- acpi_processor_unregister_performance(&p, cpu);
- dprintk("invalid ACPI data\n");
+ acpi_processor_unregister_performance(p, cpu);
+ dprintk(PFX "invalid ACPI data\n");
return (result);
}
#else
static inline int centrino_cpu_init_acpi(struct cpufreq_policy *policy) { return -ENODEV; }
+static inline int centrino_cpu_early_init_acpi(void) { return 0; }
#endif
static int centrino_cpu_init(struct cpufreq_policy *policy)
@@ -551,10 +586,15 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy)
#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
if (!centrino_model[cpu]->model_name) {
- dprintk("unregistering and freeing ACPI data\n");
- acpi_processor_unregister_performance(&p, cpu);
- kfree(centrino_model[cpu]->op_points);
- kfree(centrino_model[cpu]);
+ static struct acpi_processor_performance *p;
+
+ if (acpi_perf_data[cpu]) {
+ p = acpi_perf_data[cpu];
+ dprintk("unregistering and freeing ACPI data\n");
+ acpi_processor_unregister_performance(p, cpu);
+ kfree(centrino_model[cpu]->op_points);
+ kfree(centrino_model[cpu]);
+ }
}
#endif
@@ -588,63 +628,128 @@ static int centrino_target (struct cpufreq_policy *policy,
unsigned int relation)
{
unsigned int newstate = 0;
- unsigned int msr, oldmsr, h, cpu = policy->cpu;
+ unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu;
struct cpufreq_freqs freqs;
+ cpumask_t online_policy_cpus;
cpumask_t saved_mask;
- int retval;
+ cpumask_t set_mask;
+ cpumask_t covered_cpus;
+ int retval = 0;
+ unsigned int j, k, first_cpu, tmp;
- if (centrino_model[cpu] == NULL)
+ if (unlikely(centrino_model[cpu] == NULL))
return -ENODEV;
- /*
- * Support for SMP systems.
- * Make sure we are running on the CPU that wants to change frequency
- */
- saved_mask = current->cpus_allowed;
- set_cpus_allowed(current, policy->cpus);
- if (!cpu_isset(smp_processor_id(), policy->cpus)) {
- dprintk("couldn't limit to CPUs in this domain\n");
- return(-EAGAIN);
+ if (unlikely(cpufreq_frequency_table_target(policy,
+ centrino_model[cpu]->op_points,
+ target_freq,
+ relation,
+ &newstate))) {
+ return -EINVAL;
}
- if (cpufreq_frequency_table_target(policy, centrino_model[cpu]->op_points, target_freq,
- relation, &newstate)) {
- retval = -EINVAL;
- goto migrate_end;
- }
+#ifdef CONFIG_HOTPLUG_CPU
+ /* cpufreq holds the hotplug lock, so we are safe from here on */
+ cpus_and(online_policy_cpus, cpu_online_map, policy->cpus);
+#else
+ online_policy_cpus = policy->cpus;
+#endif
- msr = centrino_model[cpu]->op_points[newstate].index;
- rdmsr(MSR_IA32_PERF_CTL, oldmsr, h);
+ saved_mask = current->cpus_allowed;
+ first_cpu = 1;
+ cpus_clear(covered_cpus);
+ for_each_cpu_mask(j, online_policy_cpus) {
+ /*
+ * Support for SMP systems.
+ * Make sure we are running on CPU that wants to change freq
+ */
+ cpus_clear(set_mask);
+ if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
+ cpus_or(set_mask, set_mask, online_policy_cpus);
+ else
+ cpu_set(j, set_mask);
+
+ set_cpus_allowed(current, set_mask);
+ if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) {
+ dprintk("couldn't limit to CPUs in this domain\n");
+ retval = -EAGAIN;
+ if (first_cpu) {
+ /* We haven't started the transition yet. */
+ goto migrate_end;
+ }
+ break;
+ }
- if (msr == (oldmsr & 0xffff)) {
- retval = 0;
- dprintk("no change needed - msr was and needs to be %x\n", oldmsr);
- goto migrate_end;
- }
+ msr = centrino_model[cpu]->op_points[newstate].index;
+
+ if (first_cpu) {
+ rdmsr(MSR_IA32_PERF_CTL, oldmsr, h);
+ if (msr == (oldmsr & 0xffff)) {
+ dprintk("no change needed - msr was and needs "
+ "to be %x\n", oldmsr);
+ retval = 0;
+ goto migrate_end;
+ }
+
+ freqs.old = extract_clock(oldmsr, cpu, 0);
+ freqs.new = extract_clock(msr, cpu, 0);
+
+ dprintk("target=%dkHz old=%d new=%d msr=%04x\n",
+ target_freq, freqs.old, freqs.new, msr);
+
+ for_each_cpu_mask(k, online_policy_cpus) {
+ freqs.cpu = k;
+ cpufreq_notify_transition(&freqs,
+ CPUFREQ_PRECHANGE);
+ }
+
+ first_cpu = 0;
+ /* all but 16 LSB are reserved, treat them with care */
+ oldmsr &= ~0xffff;
+ msr &= 0xffff;
+ oldmsr |= msr;
+ }
- freqs.cpu = cpu;
- freqs.old = extract_clock(oldmsr, cpu, 0);
- freqs.new = extract_clock(msr, cpu, 0);
+ wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
+ if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
+ break;
- dprintk("target=%dkHz old=%d new=%d msr=%04x\n",
- target_freq, freqs.old, freqs.new, msr);
+ cpu_set(j, covered_cpus);
+ }
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ for_each_cpu_mask(k, online_policy_cpus) {
+ freqs.cpu = k;
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ }
- /* all but 16 LSB are "reserved", so treat them with
- care */
- oldmsr &= ~0xffff;
- msr &= 0xffff;
- oldmsr |= msr;
+ if (unlikely(retval)) {
+ /*
+ * We have failed halfway through the frequency change.
+ * We have sent callbacks to policy->cpus and
+ * MSRs have already been written on coverd_cpus.
+ * Best effort undo..
+ */
- wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
+ if (!cpus_empty(covered_cpus)) {
+ for_each_cpu_mask(j, covered_cpus) {
+ set_cpus_allowed(current, cpumask_of_cpu(j));
+ wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
+ }
+ }
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ tmp = freqs.new;
+ freqs.new = freqs.old;
+ freqs.old = tmp;
+ for_each_cpu_mask(j, online_policy_cpus) {
+ freqs.cpu = j;
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ }
+ }
- retval = 0;
migrate_end:
set_cpus_allowed(current, saved_mask);
- return (retval);
+ return 0;
}
static struct freq_attr* centrino_attr[] = {
@@ -686,12 +791,25 @@ static int __init centrino_init(void)
if (!cpu_has(cpu, X86_FEATURE_EST))
return -ENODEV;
+ centrino_cpu_early_init_acpi();
+
return cpufreq_register_driver(&centrino_driver);
}
static void __exit centrino_exit(void)
{
+#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
+ unsigned int j;
+#endif
+
cpufreq_unregister_driver(&centrino_driver);
+
+#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
+ for_each_cpu(j) {
+ kfree(acpi_perf_data[j]);
+ acpi_perf_data[j] = NULL;
+ }
+#endif
}
MODULE_AUTHOR ("Jeremy Fitzhardinge <jeremy@goop.org>");
diff --git a/arch/i386/kernel/cpu/cyrix.c b/arch/i386/kernel/cpu/cyrix.c
index 00f2e058797..fc32c8028e2 100644
--- a/arch/i386/kernel/cpu/cyrix.c
+++ b/arch/i386/kernel/cpu/cyrix.c
@@ -184,7 +184,7 @@ static void __init geode_configure(void)
#ifdef CONFIG_PCI
-static struct pci_device_id cyrix_55x0[] = {
+static struct pci_device_id __initdata cyrix_55x0[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5510) },
{ PCI_DEVICE(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520) },
{ },
@@ -272,14 +272,15 @@ static void __init init_cyrix(struct cpuinfo_x86 *c)
printk(KERN_INFO "Working around Cyrix MediaGX virtual DMA bugs.\n");
isa_dma_bridge_buggy = 2;
-#endif
- c->x86_cache_size=16; /* Yep 16K integrated cache thats it */
-
+
+
/*
* The 5510/5520 companion chips have a funky PIT.
*/
if (pci_dev_present(cyrix_55x0))
pit_latch_buggy = 1;
+#endif
+ c->x86_cache_size=16; /* Yep 16K integrated cache thats it */
/* GXm supports extended cpuid levels 'ala' AMD */
if (c->cpuid_level == 2) {
diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c
index f94cdb7aca5..a19fcb262db 100644
--- a/arch/i386/kernel/cpu/proc.c
+++ b/arch/i386/kernel/cpu/proc.c
@@ -52,7 +52,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
/* VIA/Cyrix/Centaur-defined */
NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ "ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
diff --git a/arch/i386/kernel/i387.c b/arch/i386/kernel/i387.c
index d75524758da..c4351972d9a 100644
--- a/arch/i386/kernel/i387.c
+++ b/arch/i386/kernel/i387.c
@@ -25,7 +25,7 @@
#define HAVE_HWFP 1
#endif
-static unsigned long mxcsr_feature_mask = 0xffffffff;
+static unsigned long mxcsr_feature_mask __read_mostly = 0xffffffff;
void mxcsr_feature_mask_init(void)
{
diff --git a/arch/i386/kernel/i8259.c b/arch/i386/kernel/i8259.c
index 323ef8ab324..b7636b96e10 100644
--- a/arch/i386/kernel/i8259.c
+++ b/arch/i386/kernel/i8259.c
@@ -271,8 +271,8 @@ static int i8259A_shutdown(struct sys_device *dev)
* the kernel initialization code can get it
* out of.
*/
- outb(0xff, 0x21); /* mask all of 8259A-1 */
- outb(0xff, 0xA1); /* mask all of 8259A-1 */
+ outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
+ outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */
return 0;
}
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index d70f2ade5cd..a62df3e764c 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -267,7 +267,7 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
# include <linux/slab.h> /* kmalloc() */
# include <linux/timer.h> /* time_after() */
-# ifdef CONFIG_BALANCED_IRQ_DEBUG
+#ifdef CONFIG_BALANCED_IRQ_DEBUG
# define TDprintk(x...) do { printk("<%ld:%s:%d>: ", jiffies, __FILE__, __LINE__); printk(x); } while (0)
# define Dprintk(x...) do { TDprintk(x); } while (0)
# else
@@ -275,10 +275,15 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
# define Dprintk(x...)
# endif
-
#define IRQBALANCE_CHECK_ARCH -999
-static int irqbalance_disabled = IRQBALANCE_CHECK_ARCH;
-static int physical_balance = 0;
+#define MAX_BALANCED_IRQ_INTERVAL (5*HZ)
+#define MIN_BALANCED_IRQ_INTERVAL (HZ/2)
+#define BALANCED_IRQ_MORE_DELTA (HZ/10)
+#define BALANCED_IRQ_LESS_DELTA (HZ)
+
+static int irqbalance_disabled __read_mostly = IRQBALANCE_CHECK_ARCH;
+static int physical_balance __read_mostly;
+static long balanced_irq_interval __read_mostly = MAX_BALANCED_IRQ_INTERVAL;
static struct irq_cpu_info {
unsigned long * last_irq;
@@ -297,12 +302,14 @@ static struct irq_cpu_info {
#define CPU_TO_PACKAGEINDEX(i) (first_cpu(cpu_sibling_map[i]))
-#define MAX_BALANCED_IRQ_INTERVAL (5*HZ)
-#define MIN_BALANCED_IRQ_INTERVAL (HZ/2)
-#define BALANCED_IRQ_MORE_DELTA (HZ/10)
-#define BALANCED_IRQ_LESS_DELTA (HZ)
+static cpumask_t balance_irq_affinity[NR_IRQS] = {
+ [0 ... NR_IRQS-1] = CPU_MASK_ALL
+};
-static long balanced_irq_interval = MAX_BALANCED_IRQ_INTERVAL;
+void set_balance_irq_affinity(unsigned int irq, cpumask_t mask)
+{
+ balance_irq_affinity[irq] = mask;
+}
static unsigned long move(int curr_cpu, cpumask_t allowed_mask,
unsigned long now, int direction)
@@ -340,7 +347,7 @@ static inline void balance_irq(int cpu, int irq)
if (irqbalance_disabled)
return;
- cpus_and(allowed_mask, cpu_online_map, irq_affinity[irq]);
+ cpus_and(allowed_mask, cpu_online_map, balance_irq_affinity[irq]);
new_cpu = move(cpu, allowed_mask, now, 1);
if (cpu != new_cpu) {
set_pending_irq(irq, cpumask_of_cpu(new_cpu));
@@ -529,7 +536,9 @@ tryanotherirq:
}
}
- cpus_and(allowed_mask, cpu_online_map, irq_affinity[selected_irq]);
+ cpus_and(allowed_mask,
+ cpu_online_map,
+ balance_irq_affinity[selected_irq]);
target_cpu_mask = cpumask_of_cpu(min_loaded);
cpus_and(tmp, target_cpu_mask, allowed_mask);
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c
index f3a9c78c4a2..248e922ee13 100644
--- a/arch/i386/kernel/irq.c
+++ b/arch/i386/kernel/irq.c
@@ -42,8 +42,8 @@ union irq_ctx {
u32 stack[THREAD_SIZE/sizeof(u32)];
};
-static union irq_ctx *hardirq_ctx[NR_CPUS];
-static union irq_ctx *softirq_ctx[NR_CPUS];
+static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
+static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
#endif
/*
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index 38806f42784..395a9a6dff8 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -607,7 +607,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
struct die_args *args = (struct die_args *)data;
int ret = NOTIFY_DONE;
- if (args->regs && user_mode(args->regs))
+ if (args->regs && user_mode_vm(args->regs))
return ret;
switch (val) {
diff --git a/arch/i386/kernel/microcode.c b/arch/i386/kernel/microcode.c
index e7c138f66c5..0a865889b2a 100644
--- a/arch/i386/kernel/microcode.c
+++ b/arch/i386/kernel/microcode.c
@@ -91,7 +91,10 @@ MODULE_DESCRIPTION("Intel CPU (IA-32) Microcode Update Driver");
MODULE_AUTHOR("Tigran Aivazian <tigran@veritas.com>");
MODULE_LICENSE("GPL");
-#define MICROCODE_VERSION "1.14"
+static int verbose;
+module_param(verbose, int, 0644);
+
+#define MICROCODE_VERSION "1.14a"
#define DEFAULT_UCODE_DATASIZE (2000) /* 2000 bytes */
#define MC_HEADER_SIZE (sizeof (microcode_header_t)) /* 48 bytes */
@@ -122,14 +125,15 @@ static unsigned int user_buffer_size; /* it's size */
typedef enum mc_error_code {
MC_SUCCESS = 0,
- MC_NOTFOUND = 1,
- MC_MARKED = 2,
- MC_ALLOCATED = 3,
+ MC_IGNORED = 1,
+ MC_NOTFOUND = 2,
+ MC_MARKED = 3,
+ MC_ALLOCATED = 4,
} mc_error_code_t;
static struct ucode_cpu_info {
unsigned int sig;
- unsigned int pf;
+ unsigned int pf, orig_pf;
unsigned int rev;
unsigned int cksum;
mc_error_code_t err;
@@ -164,6 +168,7 @@ static void collect_cpu_info (void *unused)
rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
uci->pf = 1 << ((val[1] >> 18) & 7);
}
+ uci->orig_pf = uci->pf;
}
wrmsr(MSR_IA32_UCODE_REV, 0, 0);
@@ -197,21 +202,34 @@ static inline void mark_microcode_update (int cpu_num, microcode_header_t *mc_he
pr_debug(" Checksum 0x%x\n", cksum);
if (mc_header->rev < uci->rev) {
- printk(KERN_ERR "microcode: CPU%d not 'upgrading' to earlier revision"
- " 0x%x (current=0x%x)\n", cpu_num, mc_header->rev, uci->rev);
- goto out;
+ if (uci->err == MC_NOTFOUND) {
+ uci->err = MC_IGNORED;
+ uci->cksum = mc_header->rev;
+ } else if (uci->err == MC_IGNORED && uci->cksum < mc_header->rev)
+ uci->cksum = mc_header->rev;
} else if (mc_header->rev == uci->rev) {
- /* notify the caller of success on this cpu */
- uci->err = MC_SUCCESS;
- goto out;
+ if (uci->err < MC_MARKED) {
+ /* notify the caller of success on this cpu */
+ uci->err = MC_SUCCESS;
+ }
+ } else if (uci->err != MC_ALLOCATED || mc_header->rev > uci->mc->hdr.rev) {
+ pr_debug("microcode: CPU%d found a matching microcode update with "
+ " revision 0x%x (current=0x%x)\n", cpu_num, mc_header->rev, uci->rev);
+ uci->cksum = cksum;
+ uci->pf = pf; /* keep the original mc pf for cksum calculation */
+ uci->err = MC_MARKED; /* found the match */
+ for_each_online_cpu(cpu_num) {
+ if (ucode_cpu_info + cpu_num != uci
+ && ucode_cpu_info[cpu_num].mc == uci->mc) {
+ uci->mc = NULL;
+ break;
+ }
+ }
+ if (uci->mc != NULL) {
+ vfree(uci->mc);
+ uci->mc = NULL;
+ }
}
-
- pr_debug("microcode: CPU%d found a matching microcode update with "
- " revision 0x%x (current=0x%x)\n", cpu_num, mc_header->rev, uci->rev);
- uci->cksum = cksum;
- uci->pf = pf; /* keep the original mc pf for cksum calculation */
- uci->err = MC_MARKED; /* found the match */
-out:
return;
}
@@ -253,10 +271,8 @@ static int find_matching_ucodes (void)
for_each_online_cpu(cpu_num) {
struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num;
- if (uci->err != MC_NOTFOUND) /* already found a match or not an online cpu*/
- continue;
- if (sigmatch(mc_header.sig, uci->sig, mc_header.pf, uci->pf))
+ if (sigmatch(mc_header.sig, uci->sig, mc_header.pf, uci->orig_pf))
mark_microcode_update(cpu_num, &mc_header, mc_header.sig, mc_header.pf, mc_header.cksum);
}
@@ -295,9 +311,8 @@ static int find_matching_ucodes (void)
}
for_each_online_cpu(cpu_num) {
struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num;
- if (uci->err != MC_NOTFOUND) /* already found a match or not an online cpu*/
- continue;
- if (sigmatch(ext_sig.sig, uci->sig, ext_sig.pf, uci->pf)) {
+
+ if (sigmatch(ext_sig.sig, uci->sig, ext_sig.pf, uci->orig_pf)) {
mark_microcode_update(cpu_num, &mc_header, ext_sig.sig, ext_sig.pf, ext_sig.cksum);
}
}
@@ -368,6 +383,13 @@ static void do_update_one (void * unused)
struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num;
if (uci->mc == NULL) {
+ if (verbose) {
+ if (uci->err == MC_SUCCESS)
+ printk(KERN_INFO "microcode: CPU%d already at revision 0x%x\n",
+ cpu_num, uci->rev);
+ else
+ printk(KERN_INFO "microcode: No new microcode data for CPU%d\n", cpu_num);
+ }
return;
}
@@ -426,6 +448,9 @@ out_free:
ucode_cpu_info[j].mc = NULL;
}
}
+ if (ucode_cpu_info[i].err == MC_IGNORED && verbose)
+ printk(KERN_WARNING "microcode: CPU%d not 'upgrading' to earlier revision"
+ " 0x%x (current=0x%x)\n", i, ucode_cpu_info[i].cksum, ucode_cpu_info[i].rev);
}
out:
return error;
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index dd6b0e3386c..e6023970aa4 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -48,6 +48,7 @@
#include <linux/crash_dump.h>
#include <linux/dmi.h>
#include <linux/pfn.h>
+#include <linux/suspend.h>
#include <video/edid.h>
@@ -1434,6 +1435,111 @@ static void set_mca_bus(int x)
static void set_mca_bus(int x) { }
#endif
+#ifdef CONFIG_SOFTWARE_SUSPEND
+static void __init mark_nosave_page_range(unsigned long start, unsigned long end)
+{
+ struct page *page;
+ while (start <= end) {
+ page = pfn_to_page(start);
+ SetPageNosave(page);
+ start++;
+ }
+}
+
+static void __init e820_nosave_reserved_pages(void)
+{
+ int i;
+ unsigned long r_start = 0, r_end = 0;
+
+ /* Assume e820 map is sorted */
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+ unsigned long start, end;
+
+ start = PFN_DOWN(ei->addr);
+ end = PFN_UP(ei->addr + ei->size);
+ if (start >= end)
+ continue;
+ if (ei->type == E820_RESERVED)
+ continue;
+ r_end = start;
+ /*
+ * Highmem 'Reserved' pages are marked as reserved, swsusp
+ * will not save/restore them, so we ignore these pages here.
+ */
+ if (r_end > max_low_pfn)
+ r_end = max_low_pfn;
+ if (r_end > r_start)
+ mark_nosave_page_range(r_start, r_end-1);
+ if (r_end >= max_low_pfn)
+ break;
+ r_start = end;
+ }
+}
+
+static void __init e820_save_acpi_pages(void)
+{
+ int i;
+
+ /* Assume e820 map is sorted */
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+ unsigned long start, end;
+
+ start = ei->addr;
+ end = ei->addr + ei->size;
+ if (start >= end)
+ continue;
+ if (ei->type != E820_ACPI && ei->type != E820_NVS)
+ continue;
+ /*
+ * If the region is below max_low_pfn, it will be
+ * saved/restored by swsusp follow 'RAM' type.
+ */
+ if (start < (max_low_pfn << PAGE_SHIFT))
+ start = max_low_pfn << PAGE_SHIFT;
+ /*
+ * Highmem pages (ACPI NVS/Data) are reserved, but swsusp
+ * highmem save/restore will not save/restore them. We marked
+ * them as arch saveable pages here
+ */
+ if (end > start)
+ swsusp_add_arch_pages(start, end);
+ }
+}
+
+extern char __start_rodata, __end_rodata;
+/*
+ * BIOS reserved region/hole - no save/restore
+ * ACPI NVS - save/restore
+ * ACPI Data - this is a little tricky, the mem could be used by OS after OS
+ * reads tables from the region, but anyway save/restore the memory hasn't any
+ * side effect and Linux runtime module load/unload might use it.
+ * kernel rodata - no save/restore (kernel rodata isn't changed)
+ */
+static int __init mark_nosave_pages(void)
+{
+ unsigned long pfn_start, pfn_end;
+
+ /* FIXME: provide a version for efi BIOS */
+ if (efi_enabled)
+ return 0;
+ /* BIOS reserved regions & holes */
+ e820_nosave_reserved_pages();
+
+ /* kernel rodata */
+ pfn_start = PFN_UP(virt_to_phys(&__start_rodata));
+ pfn_end = PFN_DOWN(virt_to_phys(&__end_rodata));
+ mark_nosave_page_range(pfn_start, pfn_end-1);
+
+ /* record ACPI Data/NVS as saveable */
+ e820_save_acpi_pages();
+
+ return 0;
+}
+core_initcall(mark_nosave_pages);
+#endif
+
/*
* Determine if we were loaded by an EFI loader. If so, then we have also been
* passed the efi memmap, systab, etc., so we should use these data structures
diff --git a/arch/i386/kernel/srat.c b/arch/i386/kernel/srat.c
index 52b3ed5d2cb..989c85255db 100644
--- a/arch/i386/kernel/srat.c
+++ b/arch/i386/kernel/srat.c
@@ -39,7 +39,6 @@
#define NODE_ARRAY_OFFSET(x) ((x) % 8) /* 8 bits/char */
#define BMAP_SET(bmap, bit) ((bmap)[NODE_ARRAY_INDEX(bit)] |= 1 << NODE_ARRAY_OFFSET(bit))
#define BMAP_TEST(bmap, bit) ((bmap)[NODE_ARRAY_INDEX(bit)] & (1 << NODE_ARRAY_OFFSET(bit)))
-#define MAX_PXM_DOMAINS 256 /* 1 byte and no promises about values */
/* bitmap length; _PXM is at most 255 */
#define PXM_BITMAP_LEN (MAX_PXM_DOMAINS / 8)
static u8 pxm_bitmap[PXM_BITMAP_LEN]; /* bitmap of proximity domains */
@@ -213,19 +212,11 @@ static __init void node_read_chunk(int nid, struct node_memory_chunk_s *memory_c
node_end_pfn[nid] = memory_chunk->end_pfn;
}
-static u8 pxm_to_nid_map[MAX_PXM_DOMAINS];/* _PXM to logical node ID map */
-
-int pxm_to_node(int pxm)
-{
- return pxm_to_nid_map[pxm];
-}
-
/* Parse the ACPI Static Resource Affinity Table */
static int __init acpi20_parse_srat(struct acpi_table_srat *sratp)
{
u8 *start, *end, *p;
int i, j, nid;
- u8 nid_to_pxm_map[MAX_NUMNODES];/* logical node ID to _PXM map */
start = (u8 *)(&(sratp->reserved) + 1); /* skip header */
p = start;
@@ -235,10 +226,6 @@ static int __init acpi20_parse_srat(struct acpi_table_srat *sratp)
memset(node_memory_chunk, 0, sizeof(node_memory_chunk));
memset(zholes_size, 0, sizeof(zholes_size));
- /* -1 in these maps means not available */
- memset(pxm_to_nid_map, -1, sizeof(pxm_to_nid_map));
- memset(nid_to_pxm_map, -1, sizeof(nid_to_pxm_map));
-
num_memory_chunks = 0;
while (p < end) {
switch (*p) {
@@ -278,9 +265,7 @@ static int __init acpi20_parse_srat(struct acpi_table_srat *sratp)
nodes_clear(node_online_map);
for (i = 0; i < MAX_PXM_DOMAINS; i++) {
if (BMAP_TEST(pxm_bitmap, i)) {
- nid = num_online_nodes();
- pxm_to_nid_map[i] = nid;
- nid_to_pxm_map[nid] = i;
+ int nid = acpi_map_pxm_to_node(i);
node_set_online(nid);
}
}
@@ -288,7 +273,7 @@ static int __init acpi20_parse_srat(struct acpi_table_srat *sratp)
/* set cnode id in memory chunk structure */
for (i = 0; i < num_memory_chunks; i++)
- node_memory_chunk[i].nid = pxm_to_nid_map[node_memory_chunk[i].pxm];
+ node_memory_chunk[i].nid = pxm_to_node(node_memory_chunk[i].pxm);
printk("pxm bitmap: ");
for (i = 0; i < sizeof(pxm_bitmap); i++) {
diff --git a/arch/i386/kernel/syscall_table.S b/arch/i386/kernel/syscall_table.S
index af56987f69b..dd63d477539 100644
--- a/arch/i386/kernel/syscall_table.S
+++ b/arch/i386/kernel/syscall_table.S
@@ -316,3 +316,4 @@ ENTRY(sys_call_table)
.long sys_sync_file_range
.long sys_tee /* 315 */
.long sys_vmsplice
+ .long sys_move_pages
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 0e498369f35..dcc14477af1 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -149,6 +149,12 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo,
while (valid_stack_ptr(tinfo, (void *)ebp)) {
addr = *(unsigned long *)(ebp + 4);
printed = print_addr_and_symbol(addr, log_lvl, printed);
+ /*
+ * break out of recursive entries (such as
+ * end_of_stack_stop_unwind_function):
+ */
+ if (ebp == *(unsigned long *)ebp)
+ break;
ebp = *(unsigned long *)ebp;
}
#else
@@ -268,8 +274,9 @@ void show_registers(struct pt_regs *regs)
regs->esi, regs->edi, regs->ebp, esp);
printk(KERN_EMERG "ds: %04x es: %04x ss: %04x\n",
regs->xds & 0xffff, regs->xes & 0xffff, ss);
- printk(KERN_EMERG "Process %s (pid: %d, threadinfo=%p task=%p)",
- current->comm, current->pid, current_thread_info(), current);
+ printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)",
+ TASK_COMM_LEN, current->comm, current->pid,
+ current_thread_info(), current, current->thread_info);
/*
* When in-kernel, we also print out the stack and code at the
* time of the fault..
diff --git a/arch/i386/lib/usercopy.c b/arch/i386/lib/usercopy.c
index 4cf981d70f4..6979297ce27 100644
--- a/arch/i386/lib/usercopy.c
+++ b/arch/i386/lib/usercopy.c
@@ -425,15 +425,121 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
: "eax", "edx", "memory");
return size;
}
+
+/*
+ * Non Temporal Hint version of __copy_user_zeroing_intel. It is cache aware.
+ * hyoshiok@miraclelinux.com
+ */
+
+static unsigned long __copy_user_zeroing_intel_nocache(void *to,
+ const void __user *from, unsigned long size)
+{
+ int d0, d1;
+
+ __asm__ __volatile__(
+ " .align 2,0x90\n"
+ "0: movl 32(%4), %%eax\n"
+ " cmpl $67, %0\n"
+ " jbe 2f\n"
+ "1: movl 64(%4), %%eax\n"
+ " .align 2,0x90\n"
+ "2: movl 0(%4), %%eax\n"
+ "21: movl 4(%4), %%edx\n"
+ " movnti %%eax, 0(%3)\n"
+ " movnti %%edx, 4(%3)\n"
+ "3: movl 8(%4), %%eax\n"
+ "31: movl 12(%4),%%edx\n"
+ " movnti %%eax, 8(%3)\n"
+ " movnti %%edx, 12(%3)\n"
+ "4: movl 16(%4), %%eax\n"
+ "41: movl 20(%4), %%edx\n"
+ " movnti %%eax, 16(%3)\n"
+ " movnti %%edx, 20(%3)\n"
+ "10: movl 24(%4), %%eax\n"
+ "51: movl 28(%4), %%edx\n"
+ " movnti %%eax, 24(%3)\n"
+ " movnti %%edx, 28(%3)\n"
+ "11: movl 32(%4), %%eax\n"
+ "61: movl 36(%4), %%edx\n"
+ " movnti %%eax, 32(%3)\n"
+ " movnti %%edx, 36(%3)\n"
+ "12: movl 40(%4), %%eax\n"
+ "71: movl 44(%4), %%edx\n"
+ " movnti %%eax, 40(%3)\n"
+ " movnti %%edx, 44(%3)\n"
+ "13: movl 48(%4), %%eax\n"
+ "81: movl 52(%4), %%edx\n"
+ " movnti %%eax, 48(%3)\n"
+ " movnti %%edx, 52(%3)\n"
+ "14: movl 56(%4), %%eax\n"
+ "91: movl 60(%4), %%edx\n"
+ " movnti %%eax, 56(%3)\n"
+ " movnti %%edx, 60(%3)\n"
+ " addl $-64, %0\n"
+ " addl $64, %4\n"
+ " addl $64, %3\n"
+ " cmpl $63, %0\n"
+ " ja 0b\n"
+ " sfence \n"
+ "5: movl %0, %%eax\n"
+ " shrl $2, %0\n"
+ " andl $3, %%eax\n"
+ " cld\n"
+ "6: rep; movsl\n"
+ " movl %%eax,%0\n"
+ "7: rep; movsb\n"
+ "8:\n"
+ ".section .fixup,\"ax\"\n"
+ "9: lea 0(%%eax,%0,4),%0\n"
+ "16: pushl %0\n"
+ " pushl %%eax\n"
+ " xorl %%eax,%%eax\n"
+ " rep; stosb\n"
+ " popl %%eax\n"
+ " popl %0\n"
+ " jmp 8b\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 0b,16b\n"
+ " .long 1b,16b\n"
+ " .long 2b,16b\n"
+ " .long 21b,16b\n"
+ " .long 3b,16b\n"
+ " .long 31b,16b\n"
+ " .long 4b,16b\n"
+ " .long 41b,16b\n"
+ " .long 10b,16b\n"
+ " .long 51b,16b\n"
+ " .long 11b,16b\n"
+ " .long 61b,16b\n"
+ " .long 12b,16b\n"
+ " .long 71b,16b\n"
+ " .long 13b,16b\n"
+ " .long 81b,16b\n"
+ " .long 14b,16b\n"
+ " .long 91b,16b\n"
+ " .long 6b,9b\n"
+ " .long 7b,16b\n"
+ ".previous"
+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
+ : "1"(to), "2"(from), "0"(size)
+ : "eax", "edx", "memory");
+ return size;
+}
+
#else
+
/*
* Leave these declared but undefined. They should not be any references to
* them
*/
-unsigned long
-__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size);
-unsigned long
-__copy_user_intel(void __user *to, const void *from, unsigned long size);
+unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
+ unsigned long size);
+unsigned long __copy_user_intel(void __user *to, const void *from,
+ unsigned long size);
+unsigned long __copy_user_zeroing_intel_nocache(void *to,
+ const void __user *from, unsigned long size);
#endif /* CONFIG_X86_INTEL_USERCOPY */
/* Generic arbitrary sized copy. */
@@ -515,8 +621,8 @@ do { \
: "memory"); \
} while (0)
-
-unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned long n)
+unsigned long __copy_to_user_ll(void __user *to, const void *from,
+ unsigned long n)
{
BUG_ON((long) n < 0);
#ifndef CONFIG_X86_WP_WORKS_OK
@@ -576,8 +682,8 @@ survive:
}
EXPORT_SYMBOL(__copy_to_user_ll);
-unsigned long
-__copy_from_user_ll(void *to, const void __user *from, unsigned long n)
+unsigned long __copy_from_user_ll(void *to, const void __user *from,
+ unsigned long n)
{
BUG_ON((long)n < 0);
if (movsl_is_ok(to, from, n))
@@ -588,6 +694,21 @@ __copy_from_user_ll(void *to, const void __user *from, unsigned long n)
}
EXPORT_SYMBOL(__copy_from_user_ll);
+unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
+ unsigned long n)
+{
+ BUG_ON((long)n < 0);
+#ifdef CONFIG_X86_INTEL_USERCOPY
+ if ( n > 64 && cpu_has_xmm2)
+ n = __copy_user_zeroing_intel_nocache(to, from, n);
+ else
+ __copy_user_zeroing(to, from, n);
+#else
+ __copy_user_zeroing(to, from, n);
+#endif
+ return n;
+}
+
/**
* copy_to_user: - Copy a block of data into user space.
* @to: Destination address, in user space.
diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c
index 7f0fcf219a2..bd6fe96cc16 100644
--- a/arch/i386/mm/fault.c
+++ b/arch/i386/mm/fault.c
@@ -77,12 +77,15 @@ static inline unsigned long get_segment_eip(struct pt_regs *regs,
unsigned seg = regs->xcs & 0xffff;
u32 seg_ar, seg_limit, base, *desc;
+ /* Unlikely, but must come before segment checks. */
+ if (unlikely(regs->eflags & VM_MASK)) {
+ base = seg << 4;
+ *eip_limit = base + 0xffff;
+ return base + (eip & 0xffff);
+ }
+
/* The standard kernel/user address space limit. */
*eip_limit = (seg & 3) ? USER_DS.seg : KERNEL_DS.seg;
-
- /* Unlikely, but must come before segment checks. */
- if (unlikely((regs->eflags & VM_MASK) != 0))
- return eip + (seg << 4);
/* By far the most common cases. */
if (likely(seg == __USER_CS || seg == __KERNEL_CS))
@@ -380,12 +383,12 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs,
goto bad_area;
if (error_code & 4) {
/*
- * accessing the stack below %esp is always a bug.
- * The "+ 32" is there due to some instructions (like
- * pusha) doing post-decrement on the stack and that
- * doesn't show up until later..
+ * Accessing the stack below %esp is always a bug.
+ * The large cushion allows instructions like enter
+ * and pusha to work. ("enter $65535,$31" pushes
+ * 32 pointers and then decrements %esp by 65535.)
*/
- if (address + 32 < regs->esp)
+ if (address + 65536 + 32 * sizeof(unsigned long) < regs->esp)
goto bad_area;
}
if (expand_stack(vma, address))
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index 3df1371d452..bf19513f0ce 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -29,6 +29,7 @@
#include <linux/efi.h>
#include <linux/memory_hotplug.h>
#include <linux/initrd.h>
+#include <linux/cpumask.h>
#include <asm/processor.h>
#include <asm/system.h>
@@ -384,7 +385,7 @@ static void __init pagetable_init (void)
#endif
}
-#ifdef CONFIG_SOFTWARE_SUSPEND
+#if defined(CONFIG_SOFTWARE_SUSPEND) || defined(CONFIG_ACPI_SLEEP)
/*
* Swap suspend & friends need this for resume because things like the intel-agp
* driver might have split up a kernel 4MB mapping.
diff --git a/arch/i386/mm/pageattr.c b/arch/i386/mm/pageattr.c
index 92c3d9f0e73..0887b34bc59 100644
--- a/arch/i386/mm/pageattr.c
+++ b/arch/i386/mm/pageattr.c
@@ -209,19 +209,19 @@ int change_page_attr(struct page *page, int numpages, pgprot_t prot)
}
void global_flush_tlb(void)
-{
- LIST_HEAD(l);
+{
+ struct list_head l;
struct page *pg, *next;
BUG_ON(irqs_disabled());
spin_lock_irq(&cpa_lock);
- list_splice_init(&df_list, &l);
+ list_replace_init(&df_list, &l);
spin_unlock_irq(&cpa_lock);
flush_map();
list_for_each_entry_safe(pg, next, &l, lru)
__free_page(pg);
-}
+}
#ifdef CONFIG_DEBUG_PAGEALLOC
void kernel_map_pages(struct page *page, int numpages, int enable)
diff --git a/arch/i386/pci/irq.c b/arch/i386/pci/irq.c
index 06dab00aaad..8ce69508f3c 100644
--- a/arch/i386/pci/irq.c
+++ b/arch/i386/pci/irq.c
@@ -198,14 +198,14 @@ static void write_config_nybble(struct pci_dev *router, unsigned offset, unsigne
*/
static int pirq_ali_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
{
- static unsigned char irqmap[16] = { 0, 9, 3, 10, 4, 5, 7, 6, 1, 11, 0, 12, 0, 14, 0, 15 };
+ static const unsigned char irqmap[16] = { 0, 9, 3, 10, 4, 5, 7, 6, 1, 11, 0, 12, 0, 14, 0, 15 };
return irqmap[read_config_nybble(router, 0x48, pirq-1)];
}
static int pirq_ali_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
{
- static unsigned char irqmap[16] = { 0, 8, 0, 2, 4, 5, 7, 6, 0, 1, 3, 9, 11, 0, 13, 15 };
+ static const unsigned char irqmap[16] = { 0, 8, 0, 2, 4, 5, 7, 6, 0, 1, 3, 9, 11, 0, 13, 15 };
unsigned int val = irqmap[irq];
if (val) {
@@ -256,13 +256,13 @@ static int pirq_via_set(struct pci_dev *router, struct pci_dev *dev, int pirq, i
*/
static int pirq_via586_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
{
- static unsigned int pirqmap[4] = { 3, 2, 5, 1 };
+ static const unsigned int pirqmap[4] = { 3, 2, 5, 1 };
return read_config_nybble(router, 0x55, pirqmap[pirq-1]);
}
static int pirq_via586_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
{
- static unsigned int pirqmap[4] = { 3, 2, 5, 1 };
+ static const unsigned int pirqmap[4] = { 3, 2, 5, 1 };
write_config_nybble(router, 0x55, pirqmap[pirq-1], irq);
return 1;
}
@@ -274,13 +274,13 @@ static int pirq_via586_set(struct pci_dev *router, struct pci_dev *dev, int pirq
*/
static int pirq_ite_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
{
- static unsigned char pirqmap[4] = { 1, 0, 2, 3 };
+ static const unsigned char pirqmap[4] = { 1, 0, 2, 3 };
return read_config_nybble(router,0x43, pirqmap[pirq-1]);
}
static int pirq_ite_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
{
- static unsigned char pirqmap[4] = { 1, 0, 2, 3 };
+ static const unsigned char pirqmap[4] = { 1, 0, 2, 3 };
write_config_nybble(router, 0x43, pirqmap[pirq-1], irq);
return 1;
}
@@ -505,7 +505,7 @@ static int pirq_bios_set(struct pci_dev *router, struct pci_dev *dev, int pirq,
static __init int intel_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
{
- static struct pci_device_id pirq_440gx[] = {
+ static struct pci_device_id __initdata pirq_440gx[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_2) },
{ },
@@ -880,6 +880,7 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
((!(pci_probe & PCI_USE_PIRQ_MASK)) || ((1 << irq) & mask)) ) {
DBG(" -> got IRQ %d\n", irq);
msg = "Found";
+ eisa_set_level_irq(irq);
} else if (newirq && r->set && (dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) {
DBG(" -> assigning IRQ %d", newirq);
if (r->set(pirq_router_dev, dev, pirq, newirq)) {
diff --git a/arch/i386/power/cpu.c b/arch/i386/power/cpu.c
index 79b2370c7fa..e6517915fe3 100644
--- a/arch/i386/power/cpu.c
+++ b/arch/i386/power/cpu.c
@@ -10,6 +10,8 @@
#include <linux/config.h>
#include <linux/module.h>
#include <linux/suspend.h>
+#include <asm/mtrr.h>
+#include <asm/mce.h>
static struct saved_context saved_context;
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 0f3076a820c..fbb25b00629 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -77,6 +77,7 @@ choice
config IA64_GENERIC
bool "generic"
select ACPI
+ select PCI
select NUMA
select ACPI_NUMA
help
@@ -273,7 +274,6 @@ config HOTPLUG_CPU
config SCHED_SMT
bool "SMT scheduler support"
depends on SMP
- default off
help
Improves the CPU scheduler's decision making when dealing with
Intel IA64 chips with MultiThreading at a cost of slightly increased
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index bdccd0b1eb6..5825ddee58d 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -1958,7 +1958,7 @@ sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle)
if (pxm < 0)
return;
- node = pxm_to_nid_map[pxm];
+ node = pxm_to_node(pxm);
if (node >= MAX_NUMNODES || !node_online(node))
return;
@@ -1999,7 +1999,7 @@ acpi_sba_ioc_add(struct acpi_device *device)
if (!iovp_shift)
iovp_shift = min(PAGE_SHIFT, 16);
}
- ACPI_MEM_FREE(dev_info);
+ kfree(dev_info);
/*
* default anything not caught above or specified on cmdline to 4k
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 58c93a30348..6ea642beaae 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -68,8 +68,6 @@ EXPORT_SYMBOL(pm_power_off);
unsigned char acpi_kbd_controller_present = 1;
unsigned char acpi_legacy_devices;
-static unsigned int __initdata acpi_madt_rev;
-
unsigned int acpi_cpei_override;
unsigned int acpi_cpei_phys_cpuid;
@@ -243,6 +241,8 @@ acpi_parse_iosapic(acpi_table_entry_header * header, const unsigned long end)
return iosapic_init(iosapic->address, iosapic->global_irq_base);
}
+static unsigned int __initdata acpi_madt_rev;
+
static int __init
acpi_parse_plat_int_src(acpi_table_entry_header * header,
const unsigned long end)
@@ -415,9 +415,6 @@ static int __initdata srat_num_cpus; /* number of cpus */
static u32 __devinitdata pxm_flag[PXM_FLAG_LEN];
#define pxm_bit_set(bit) (set_bit(bit,(void *)pxm_flag))
#define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag))
-/* maps to convert between proximity domain and logical node ID */
-int __devinitdata pxm_to_nid_map[MAX_PXM_DOMAINS];
-int __initdata nid_to_pxm_map[MAX_NUMNODES];
static struct acpi_table_slit __initdata *slit_table;
static int get_processor_proximity_domain(struct acpi_table_processor_affinity *pa)
@@ -533,22 +530,17 @@ void __init acpi_numa_arch_fixup(void)
* MCD - This can probably be dropped now. No need for pxm ID to node ID
* mapping with sparse node numbering iff MAX_PXM_DOMAINS <= MAX_NUMNODES.
*/
- /* calculate total number of nodes in system from PXM bitmap */
- memset(pxm_to_nid_map, -1, sizeof(pxm_to_nid_map));
- memset(nid_to_pxm_map, -1, sizeof(nid_to_pxm_map));
nodes_clear(node_online_map);
for (i = 0; i < MAX_PXM_DOMAINS; i++) {
if (pxm_bit_test(i)) {
- int nid = num_online_nodes();
- pxm_to_nid_map[i] = nid;
- nid_to_pxm_map[nid] = i;
+ int nid = acpi_map_pxm_to_node(i);
node_set_online(nid);
}
}
/* set logical node id in memory chunk structure */
for (i = 0; i < num_node_memblks; i++)
- node_memblk[i].nid = pxm_to_nid_map[node_memblk[i].nid];
+ node_memblk[i].nid = pxm_to_node(node_memblk[i].nid);
/* assign memory bank numbers for each chunk on each node */
for_each_online_node(i) {
@@ -562,7 +554,7 @@ void __init acpi_numa_arch_fixup(void)
/* set logical node id in cpu structure */
for (i = 0; i < srat_num_cpus; i++)
- node_cpuid[i].nid = pxm_to_nid_map[node_cpuid[i].nid];
+ node_cpuid[i].nid = pxm_to_node(node_cpuid[i].nid);
printk(KERN_INFO "Number of logical nodes in system = %d\n",
num_online_nodes());
@@ -575,11 +567,11 @@ void __init acpi_numa_arch_fixup(void)
for (i = 0; i < slit_table->localities; i++) {
if (!pxm_bit_test(i))
continue;
- node_from = pxm_to_nid_map[i];
+ node_from = pxm_to_node(i);
for (j = 0; j < slit_table->localities; j++) {
if (!pxm_bit_test(j))
continue;
- node_to = pxm_to_nid_map[j];
+ node_to = pxm_to_node(j);
node_distance(node_from, node_to) =
slit_table->entry[i * slit_table->localities + j];
}
@@ -785,9 +777,9 @@ int acpi_map_cpu2node(acpi_handle handle, int cpu, long physid)
/*
* Assuming that the container driver would have set the proximity
- * domain and would have initialized pxm_to_nid_map[pxm_id] && pxm_flag
+ * domain and would have initialized pxm_to_node(pxm_id) && pxm_flag
*/
- node_cpuid[cpu].nid = (pxm_id < 0) ? 0 : pxm_to_nid_map[pxm_id];
+ node_cpuid[cpu].nid = (pxm_id < 0) ? 0 : pxm_to_node(pxm_id);
node_cpuid[cpu].phys_id = physid;
#endif
@@ -966,7 +958,7 @@ acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret)
if (pxm < 0)
return AE_OK;
- node = pxm_to_nid_map[pxm];
+ node = pxm_to_node(pxm);
if (node >= MAX_NUMNODES || !node_online(node) ||
cpus_empty(node_to_cpumask(node)))
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index bcb80ca5cf4..32c999f58d1 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1584,7 +1584,7 @@ sys_call_table:
data8 sys_keyctl
data8 sys_ioprio_set
data8 sys_ioprio_get // 1275
- data8 sys_ni_syscall
+ data8 sys_move_pages
data8 sys_inotify_init
data8 sys_inotify_add_watch
data8 sys_inotify_rm_watch
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 077f21216b6..6d7bc8ff7b3 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -532,7 +532,6 @@ static ctl_table pfm_sysctl_root[] = {
static struct ctl_table_header *pfm_sysctl_header;
static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
-static int pfm_flush(struct file *filp);
#define pfm_get_cpu_var(v) __ia64_per_cpu_var(v)
#define pfm_get_cpu_data(a,b) per_cpu(a, b)
@@ -595,10 +594,11 @@ pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
}
-static struct super_block *
-pfmfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data)
+static int
+pfmfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data,
+ struct vfsmount *mnt)
{
- return get_sb_pseudo(fs_type, "pfm:", NULL, PFMFS_MAGIC);
+ return get_sb_pseudo(fs_type, "pfm:", NULL, PFMFS_MAGIC, mnt);
}
static struct file_system_type pfm_fs_type = {
@@ -1773,7 +1773,7 @@ pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx)
* When caller is self-monitoring, the context is unloaded.
*/
static int
-pfm_flush(struct file *filp)
+pfm_flush(struct file *filp, fl_owner_t id)
{
pfm_context_t *ctx;
struct task_struct *task;
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c
index fcd2bad0286..5f03b9e524d 100644
--- a/arch/ia64/kernel/uncached.c
+++ b/arch/ia64/kernel/uncached.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2001-2005 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2001-2006 Silicon Graphics, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
@@ -29,15 +29,8 @@
#include <asm/tlbflush.h>
#include <asm/sn/arch.h>
-#define DEBUG 0
-#if DEBUG
-#define dprintk printk
-#else
-#define dprintk(x...) do { } while (0)
-#endif
-
-void __init efi_memmap_walk_uc (efi_freemem_callback_t callback);
+extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *);
#define MAX_UNCACHED_GRANULES 5
static int allocated_granules;
@@ -60,6 +53,7 @@ static void uncached_ipi_visibility(void *data)
static void uncached_ipi_mc_drain(void *data)
{
int status;
+
status = ia64_pal_mc_drain();
if (status)
printk(KERN_WARNING "ia64_pal_mc_drain() failed with %i on "
@@ -67,30 +61,35 @@ static void uncached_ipi_mc_drain(void *data)
}
-static unsigned long
-uncached_get_new_chunk(struct gen_pool *poolp)
+/*
+ * Add a new chunk of uncached memory pages to the specified pool.
+ *
+ * @pool: pool to add new chunk of uncached memory to
+ * @nid: node id of node to allocate memory from, or -1
+ *
+ * This is accomplished by first allocating a granule of cached memory pages
+ * and then converting them to uncached memory pages.
+ */
+static int uncached_add_chunk(struct gen_pool *pool, int nid)
{
struct page *page;
- void *tmp;
int status, i;
- unsigned long addr, node;
+ unsigned long c_addr, uc_addr;
if (allocated_granules >= MAX_UNCACHED_GRANULES)
- return 0;
+ return -1;
+
+ /* attempt to allocate a granule's worth of cached memory pages */
- node = poolp->private;
- page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO,
+ page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO,
IA64_GRANULE_SHIFT-PAGE_SHIFT);
+ if (!page)
+ return -1;
- dprintk(KERN_INFO "get_new_chunk page %p, addr %lx\n",
- page, (unsigned long)(page-vmem_map) << PAGE_SHIFT);
+ /* convert the memory pages from cached to uncached */
- /*
- * Do magic if no mem on local node! XXX
- */
- if (!page)
- return 0;
- tmp = page_address(page);
+ c_addr = (unsigned long)page_address(page);
+ uc_addr = c_addr - PAGE_OFFSET + __IA64_UNCACHED_OFFSET;
/*
* There's a small race here where it's possible for someone to
@@ -100,76 +99,90 @@ uncached_get_new_chunk(struct gen_pool *poolp)
for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
SetPageUncached(&page[i]);
- flush_tlb_kernel_range(tmp, tmp + IA64_GRANULE_SIZE);
+ flush_tlb_kernel_range(uc_addr, uc_adddr + IA64_GRANULE_SIZE);
status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
-
- dprintk(KERN_INFO "pal_prefetch_visibility() returns %i on cpu %i\n",
- status, raw_smp_processor_id());
-
if (!status) {
status = smp_call_function(uncached_ipi_visibility, NULL, 0, 1);
if (status)
- printk(KERN_WARNING "smp_call_function failed for "
- "uncached_ipi_visibility! (%i)\n", status);
+ goto failed;
}
+ preempt_disable();
+
if (ia64_platform_is("sn2"))
- sn_flush_all_caches((unsigned long)tmp, IA64_GRANULE_SIZE);
+ sn_flush_all_caches(uc_addr, IA64_GRANULE_SIZE);
else
- flush_icache_range((unsigned long)tmp,
- (unsigned long)tmp+IA64_GRANULE_SIZE);
+ flush_icache_range(uc_addr, uc_addr + IA64_GRANULE_SIZE);
+
+ /* flush the just introduced uncached translation from the TLB */
+ local_flush_tlb_all();
+
+ preempt_enable();
ia64_pal_mc_drain();
status = smp_call_function(uncached_ipi_mc_drain, NULL, 0, 1);
if (status)
- printk(KERN_WARNING "smp_call_function failed for "
- "uncached_ipi_mc_drain! (%i)\n", status);
+ goto failed;
- addr = (unsigned long)tmp - PAGE_OFFSET + __IA64_UNCACHED_OFFSET;
+ /*
+ * The chunk of memory pages has been converted to uncached so now we
+ * can add it to the pool.
+ */
+ status = gen_pool_add(pool, uc_addr, IA64_GRANULE_SIZE, nid);
+ if (status)
+ goto failed;
allocated_granules++;
- return addr;
+ return 0;
+
+ /* failed to convert or add the chunk so give it back to the kernel */
+failed:
+ for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
+ ClearPageUncached(&page[i]);
+
+ free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT);
+ return -1;
}
/*
* uncached_alloc_page
*
+ * @starting_nid: node id of node to start with, or -1
+ *
* Allocate 1 uncached page. Allocates on the requested node. If no
* uncached pages are available on the requested node, roundrobin starting
- * with higher nodes.
+ * with the next higher node.
*/
-unsigned long
-uncached_alloc_page(int nid)
+unsigned long uncached_alloc_page(int starting_nid)
{
- unsigned long maddr;
+ unsigned long uc_addr;
+ struct gen_pool *pool;
+ int nid;
- maddr = gen_pool_alloc(uncached_pool[nid], PAGE_SIZE);
+ if (unlikely(starting_nid >= MAX_NUMNODES))
+ return 0;
- dprintk(KERN_DEBUG "uncached_alloc_page returns %lx on node %i\n",
- maddr, nid);
+ if (starting_nid < 0)
+ starting_nid = numa_node_id();
+ nid = starting_nid;
- /*
- * If no memory is availble on our local node, try the
- * remaining nodes in the system.
- */
- if (!maddr) {
- int i;
-
- for (i = MAX_NUMNODES - 1; i >= 0; i--) {
- if (i == nid || !node_online(i))
- continue;
- maddr = gen_pool_alloc(uncached_pool[i], PAGE_SIZE);
- dprintk(KERN_DEBUG "uncached_alloc_page alternate search "
- "returns %lx on node %i\n", maddr, i);
- if (maddr) {
- break;
- }
- }
- }
+ do {
+ if (!node_online(nid))
+ continue;
+ pool = uncached_pool[nid];
+ if (pool == NULL)
+ continue;
+ do {
+ uc_addr = gen_pool_alloc(pool, PAGE_SIZE);
+ if (uc_addr != 0)
+ return uc_addr;
+ } while (uncached_add_chunk(pool, nid) == 0);
+
+ } while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid);
- return maddr;
+ return 0;
}
EXPORT_SYMBOL(uncached_alloc_page);
@@ -177,21 +190,22 @@ EXPORT_SYMBOL(uncached_alloc_page);
/*
* uncached_free_page
*
+ * @uc_addr: uncached address of page to free
+ *
* Free a single uncached page.
*/
-void
-uncached_free_page(unsigned long maddr)
+void uncached_free_page(unsigned long uc_addr)
{
- int node;
-
- node = paddr_to_nid(maddr - __IA64_UNCACHED_OFFSET);
+ int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET);
+ struct gen_pool *pool = uncached_pool[nid];
- dprintk(KERN_DEBUG "uncached_free_page(%lx) on node %i\n", maddr, node);
+ if (unlikely(pool == NULL))
+ return;
- if ((maddr & (0XFUL << 60)) != __IA64_UNCACHED_OFFSET)
- panic("uncached_free_page invalid address %lx\n", maddr);
+ if ((uc_addr & (0XFUL << 60)) != __IA64_UNCACHED_OFFSET)
+ panic("uncached_free_page invalid address %lx\n", uc_addr);
- gen_pool_free(uncached_pool[node], maddr, PAGE_SIZE);
+ gen_pool_free(pool, uc_addr, PAGE_SIZE);
}
EXPORT_SYMBOL(uncached_free_page);
@@ -199,43 +213,39 @@ EXPORT_SYMBOL(uncached_free_page);
/*
* uncached_build_memmap,
*
+ * @uc_start: uncached starting address of a chunk of uncached memory
+ * @uc_end: uncached ending address of a chunk of uncached memory
+ * @arg: ignored, (NULL argument passed in on call to efi_memmap_walk_uc())
+ *
* Called at boot time to build a map of pages that can be used for
* memory special operations.
*/
-static int __init
-uncached_build_memmap(unsigned long start, unsigned long end, void *arg)
+static int __init uncached_build_memmap(unsigned long uc_start,
+ unsigned long uc_end, void *arg)
{
- long length = end - start;
- int node;
-
- dprintk(KERN_ERR "uncached_build_memmap(%lx %lx)\n", start, end);
+ int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET);
+ struct gen_pool *pool = uncached_pool[nid];
+ size_t size = uc_end - uc_start;
touch_softlockup_watchdog();
- memset((char *)start, 0, length);
- node = paddr_to_nid(start - __IA64_UNCACHED_OFFSET);
-
- for (; start < end ; start += PAGE_SIZE) {
- dprintk(KERN_INFO "sticking %lx into the pool!\n", start);
- gen_pool_free(uncached_pool[node], start, PAGE_SIZE);
+ if (pool != NULL) {
+ memset((char *)uc_start, 0, size);
+ (void) gen_pool_add(pool, uc_start, size, nid);
}
-
return 0;
}
-static int __init uncached_init(void) {
- int i;
+static int __init uncached_init(void)
+{
+ int nid;
- for (i = 0; i < MAX_NUMNODES; i++) {
- if (!node_online(i))
- continue;
- uncached_pool[i] = gen_pool_create(0, IA64_GRANULE_SHIFT,
- &uncached_get_new_chunk, i);
+ for_each_online_node(nid) {
+ uncached_pool[nid] = gen_pool_create(PAGE_SHIFT, nid);
}
- efi_memmap_walk_uc(uncached_build_memmap);
-
+ efi_memmap_walk_uc(uncached_build_memmap, NULL);
return 0;
}
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index cafa8776a53..11f08001f8c 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -671,9 +671,11 @@ int add_memory(u64 start, u64 size)
return ret;
}
+EXPORT_SYMBOL_GPL(add_memory);
int remove_memory(u64 start, u64 size)
{
return -EINVAL;
}
+EXPORT_SYMBOL_GPL(remove_memory);
#endif
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index ab829a22f8a..cf7751b99d1 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -352,7 +352,7 @@ pci_acpi_scan_root(struct acpi_device *device, int domain, int bus)
pxm = acpi_get_pxm(controller->acpi_handle);
#ifdef CONFIG_NUMA
if (pxm >= 0)
- controller->node = pxm_to_nid_map[pxm];
+ controller->node = pxm_to_node(pxm);
#endif
acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_window,
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index 30988dfbddf..93577abae36 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -139,7 +139,7 @@ static int __init pxm_to_nasid(int pxm)
int i;
int nid;
- nid = pxm_to_nid_map[pxm];
+ nid = pxm_to_node(pxm);
for (i = 0; i < num_node_memblks; i++) {
if (node_memblk[i].nid == nid) {
return NASID_GET(node_memblk[i].start_paddr);
@@ -704,7 +704,7 @@ void __init build_cnode_tables(void)
* cnode == node for all C & M bricks.
*/
for_each_online_node(node) {
- nasid = pxm_to_nasid(nid_to_pxm_map[node]);
+ nasid = pxm_to_nasid(node_to_pxm(node));
sn_cnodeid_to_nasid[node] = nasid;
physical_node_map[nasid] = node;
}
diff --git a/arch/ia64/sn/kernel/sn2/cache.c b/arch/ia64/sn/kernel/sn2/cache.c
index bc3cfa17cd0..2862cb33026 100644
--- a/arch/ia64/sn/kernel/sn2/cache.c
+++ b/arch/ia64/sn/kernel/sn2/cache.c
@@ -3,11 +3,12 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2001-2003, 2006 Silicon Graphics, Inc. All rights reserved.
*
*/
#include <linux/module.h>
#include <asm/pgalloc.h>
+#include <asm/sn/arch.h>
/**
* sn_flush_all_caches - flush a range of address from all caches (incl. L4)
@@ -17,18 +18,24 @@
* Flush a range of addresses from all caches including L4.
* All addresses fully or partially contained within
* @flush_addr to @flush_addr + @bytes are flushed
- * from the all caches.
+ * from all caches.
*/
void
sn_flush_all_caches(long flush_addr, long bytes)
{
- flush_icache_range(flush_addr, flush_addr+bytes);
+ unsigned long addr = flush_addr;
+
+ /* SHub1 requires a cached address */
+ if (is_shub1() && (addr & RGN_BITS) == RGN_BASE(RGN_UNCACHED))
+ addr = (addr - RGN_BASE(RGN_UNCACHED)) + RGN_BASE(RGN_KERNEL);
+
+ flush_icache_range(addr, addr + bytes);
/*
* The last call may have returned before the caches
* were actually flushed, so we call it again to make
* sure.
*/
- flush_icache_range(flush_addr, flush_addr+bytes);
+ flush_icache_range(addr, addr + bytes);
mb();
}
EXPORT_SYMBOL(sn_flush_all_caches);
diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S
index 320fde05dc6..522079f8c2b 100644
--- a/arch/m68k/kernel/entry.S
+++ b/arch/m68k/kernel/entry.S
@@ -226,7 +226,7 @@ ENTRY(nmi_handler)
inthandler:
SAVE_ALL_INT
GET_CURRENT(%d0)
- addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+2)
+ addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
| put exception # in d0
bfextu %sp@(PT_VECTOR){#4,#10},%d0
@@ -245,7 +245,7 @@ inthandler:
3: addql #8,%sp | pop parameters off stack
ret_from_interrupt:
- subqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+2)
+ subqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
jeq 1f
2:
RESTORE_ALL
diff --git a/arch/m68k/kernel/ints.c b/arch/m68k/kernel/ints.c
index 514d323ad53..4b85514792e 100644
--- a/arch/m68k/kernel/ints.c
+++ b/arch/m68k/kernel/ints.c
@@ -95,6 +95,12 @@ void __init init_IRQ(void)
{
int i;
+ /* assembly irq entry code relies on this... */
+ if (HARDIRQ_MASK != 0x00ff0000) {
+ extern void hardirq_mask_is_broken(void);
+ hardirq_mask_is_broken();
+ }
+
for (i = 0; i < SYS_IRQS; i++) {
if (mach_default_handler)
irq_list[i].handler = (*mach_default_handler)[i];
diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
index 866917bfa02..f9af893cd28 100644
--- a/arch/m68k/kernel/signal.c
+++ b/arch/m68k/kernel/signal.c
@@ -763,7 +763,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
/* This is the X/Open sanctioned signal stack switching. */
if (ka->sa.sa_flags & SA_ONSTACK) {
- if (!on_sig_stack(usp))
+ if (!sas_ss_flags(usp))
usp = current->sas_ss_sp + current->sas_ss_size;
}
return (void __user *)((usp - frame_size) & -8UL);
diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
index cdf58fbb3e7..837a8870990 100644
--- a/arch/m68k/kernel/traps.c
+++ b/arch/m68k/kernel/traps.c
@@ -114,7 +114,7 @@ void __init base_trap_init(void)
if(MACH_IS_SUN3X) {
extern e_vector *sun3x_prom_vbr;
- __asm__ volatile ("movec %%vbr, %0" : "=r" ((void*)sun3x_prom_vbr));
+ __asm__ volatile ("movec %%vbr, %0" : "=r" (sun3x_prom_vbr));
}
/* setup the exception vector table */
@@ -992,6 +992,7 @@ void show_registers(struct pt_regs *regs)
void show_stack(struct task_struct *task, unsigned long *stack)
{
+ unsigned long *p;
unsigned long *endstack;
int i;
@@ -1004,12 +1005,13 @@ void show_stack(struct task_struct *task, unsigned long *stack)
endstack = (unsigned long *)(((unsigned long)stack + THREAD_SIZE - 1) & -THREAD_SIZE);
printk("Stack from %08lx:", (unsigned long)stack);
+ p = stack;
for (i = 0; i < kstack_depth_to_print; i++) {
- if (stack + 1 > endstack)
+ if (p + 1 > endstack)
break;
if (i % 8 == 0)
printk("\n ");
- printk(" %08lx", *stack++);
+ printk(" %08lx", *p++);
}
printk("\n");
show_trace(stack);
diff --git a/arch/m68k/lib/Makefile b/arch/m68k/lib/Makefile
index ebe51a51381..6bbf19f9600 100644
--- a/arch/m68k/lib/Makefile
+++ b/arch/m68k/lib/Makefile
@@ -4,5 +4,5 @@
EXTRA_AFLAGS := -traditional
-lib-y := ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
- checksum.o string.o semaphore.o
+lib-y := ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
+ checksum.o string.o semaphore.o uaccess.o
diff --git a/arch/m68k/lib/uaccess.c b/arch/m68k/lib/uaccess.c
new file mode 100644
index 00000000000..1bc188c0d98
--- /dev/null
+++ b/arch/m68k/lib/uaccess.c
@@ -0,0 +1,222 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/module.h>
+#include <asm/uaccess.h>
+
+unsigned long __generic_copy_from_user(void *to, const void __user *from,
+ unsigned long n)
+{
+ unsigned long tmp, res;
+
+ asm volatile ("\n"
+ " tst.l %0\n"
+ " jeq 2f\n"
+ "1: moves.l (%1)+,%3\n"
+ " move.l %3,(%2)+\n"
+ " subq.l #1,%0\n"
+ " jne 1b\n"
+ "2: btst #1,%5\n"
+ " jeq 4f\n"
+ "3: moves.w (%1)+,%3\n"
+ " move.w %3,(%2)+\n"
+ "4: btst #0,%5\n"
+ " jeq 6f\n"
+ "5: moves.b (%1)+,%3\n"
+ " move.b %3,(%2)+\n"
+ "6:\n"
+ " .section .fixup,\"ax\"\n"
+ " .even\n"
+ "10: move.l %0,%3\n"
+ "7: clr.l (%2)+\n"
+ " subq.l #1,%3\n"
+ " jne 7b\n"
+ " lsl.l #2,%0\n"
+ " btst #1,%5\n"
+ " jeq 8f\n"
+ "30: clr.w (%2)+\n"
+ " addq.l #2,%0\n"
+ "8: btst #0,%5\n"
+ " jeq 6b\n"
+ "50: clr.b (%2)+\n"
+ " addq.l #1,%0\n"
+ " jra 6b\n"
+ " .previous\n"
+ "\n"
+ " .section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 1b,10b\n"
+ " .long 3b,30b\n"
+ " .long 5b,50b\n"
+ " .previous"
+ : "=d" (res), "+a" (from), "+a" (to), "=&r" (tmp)
+ : "0" (n / 4), "d" (n & 3));
+
+ return res;
+}
+EXPORT_SYMBOL(__generic_copy_from_user);
+
+unsigned long __generic_copy_to_user(void __user *to, const void *from,
+ unsigned long n)
+{
+ unsigned long tmp, res;
+
+ asm volatile ("\n"
+ " tst.l %0\n"
+ " jeq 4f\n"
+ "1: move.l (%1)+,%3\n"
+ "2: moves.l %3,(%2)+\n"
+ "3: subq.l #1,%0\n"
+ " jne 1b\n"
+ "4: btst #1,%5\n"
+ " jeq 6f\n"
+ " move.w (%1)+,%3\n"
+ "5: moves.w %3,(%2)+\n"
+ "6: btst #0,%5\n"
+ " jeq 8f\n"
+ " move.b (%1)+,%3\n"
+ "7: moves.b %3,(%2)+\n"
+ "8:\n"
+ " .section .fixup,\"ax\"\n"
+ " .even\n"
+ "20: lsl.l #2,%0\n"
+ "50: add.l %5,%0\n"
+ " jra 7b\n"
+ " .previous\n"
+ "\n"
+ " .section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 2b,20b\n"
+ " .long 3b,20b\n"
+ " .long 5b,50b\n"
+ " .long 6b,50b\n"
+ " .long 7b,50b\n"
+ " .long 8b,50b\n"
+ " .previous"
+ : "=d" (res), "+a" (from), "+a" (to), "=&r" (tmp)
+ : "0" (n / 4), "d" (n & 3));
+
+ return res;
+}
+EXPORT_SYMBOL(__generic_copy_to_user);
+
+/*
+ * Copy a null terminated string from userspace.
+ */
+long strncpy_from_user(char *dst, const char __user *src, long count)
+{
+ long res;
+ char c;
+
+ if (count <= 0)
+ return count;
+
+ asm volatile ("\n"
+ "1: moves.b (%2)+,%4\n"
+ " move.b %4,(%1)+\n"
+ " jeq 2f\n"
+ " subq.l #1,%3\n"
+ " jne 1b\n"
+ "2: sub.l %3,%0\n"
+ "3:\n"
+ " .section .fixup,\"ax\"\n"
+ " .even\n"
+ "10: move.l %5,%0\n"
+ " jra 3b\n"
+ " .previous\n"
+ "\n"
+ " .section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 1b,10b\n"
+ " .previous"
+ : "=d" (res), "+a" (dst), "+a" (src), "+r" (count), "=&d" (c)
+ : "i" (-EFAULT), "0" (count));
+
+ return res;
+}
+EXPORT_SYMBOL(strncpy_from_user);
+
+/*
+ * Return the size of a string (including the ending 0)
+ *
+ * Return 0 on exception, a value greater than N if too long
+ */
+long strnlen_user(const char __user *src, long n)
+{
+ char c;
+ long res;
+
+ asm volatile ("\n"
+ "1: subq.l #1,%1\n"
+ " jmi 3f\n"
+ "2: moves.b (%0)+,%2\n"
+ " tst.b %2\n"
+ " jne 1b\n"
+ " jra 4f\n"
+ "\n"
+ "3: addq.l #1,%0\n"
+ "4: sub.l %4,%0\n"
+ "5:\n"
+ " .section .fixup,\"ax\"\n"
+ " .even\n"
+ "20: sub.l %0,%0\n"
+ " jra 5b\n"
+ " .previous\n"
+ "\n"
+ " .section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 2b,20b\n"
+ " .previous\n"
+ : "=&a" (res), "+d" (n), "=&d" (c)
+ : "0" (src), "r" (src));
+
+ return res;
+}
+EXPORT_SYMBOL(strnlen_user);
+
+/*
+ * Zero Userspace
+ */
+
+unsigned long clear_user(void __user *to, unsigned long n)
+{
+ unsigned long res;
+
+ asm volatile ("\n"
+ " tst.l %0\n"
+ " jeq 3f\n"
+ "1: moves.l %2,(%1)+\n"
+ "2: subq.l #1,%0\n"
+ " jne 1b\n"
+ "3: btst #1,%4\n"
+ " jeq 5f\n"
+ "4: moves.w %2,(%1)+\n"
+ "5: btst #0,%4\n"
+ " jeq 7f\n"
+ "6: moves.b %2,(%1)\n"
+ "7:\n"
+ " .section .fixup,\"ax\"\n"
+ " .even\n"
+ "10: lsl.l #2,%0\n"
+ "40: add.l %4,%0\n"
+ " jra 7b\n"
+ " .previous\n"
+ "\n"
+ " .section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 1b,10b\n"
+ " .long 2b,10b\n"
+ " .long 4b,40b\n"
+ " .long 5b,40b\n"
+ " .long 6b,40b\n"
+ " .long 7b,40b\n"
+ " .previous"
+ : "=d" (res), "+a" (to)
+ : "r" (0), "0" (n / 4), "d" (n & 3));
+
+ return res;
+}
+EXPORT_SYMBOL(clear_user);
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index 14f8d3f4e19..19dce75711b 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -89,24 +89,11 @@ extern void mac_debugging_long(int, long);
static void mac_get_model(char *str);
-void mac_bang(int irq, void *vector, struct pt_regs *p)
-{
- printk(KERN_INFO "Resetting ...\n");
- mac_reset();
-}
-
static void mac_sched_init(irqreturn_t (*vector)(int, void *, struct pt_regs *))
{
via_init_clock(vector);
}
-#if 0
-void mac_waitbut (void)
-{
- ;
-}
-#endif
-
extern irqreturn_t mac_default_handler(int, void *, struct pt_regs *);
irqreturn_t (*mac_handlers[8])(int, void *, struct pt_regs *)=
diff --git a/arch/m68k/mac/macints.c b/arch/m68k/mac/macints.c
index 1809601ad90..7a1600bd195 100644
--- a/arch/m68k/mac/macints.c
+++ b/arch/m68k/mac/macints.c
@@ -216,7 +216,6 @@ static void scc_irq_disable(int);
* console_loglevel determines NMI handler function
*/
-extern irqreturn_t mac_bang(int, void *, struct pt_regs *);
irqreturn_t mac_nmi_handler(int, void *, struct pt_regs *);
irqreturn_t mac_debug_handler(int, void *, struct pt_regs *);
diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c
index cd528bf7b43..a6e3814c866 100644
--- a/arch/m68k/mac/via.c
+++ b/arch/m68k/mac/via.c
@@ -25,7 +25,6 @@
#include <linux/init.h>
#include <linux/ide.h>
-#include <asm/traps.h>
#include <asm/bootinfo.h>
#include <asm/macintosh.h>
#include <asm/macints.h>
@@ -71,7 +70,6 @@ void via_irq_enable(int irq);
void via_irq_disable(int irq);
void via_irq_clear(int irq);
-extern irqreturn_t mac_bang(int, void *, struct pt_regs *);
extern irqreturn_t mac_scc_dispatch(int, void *, struct pt_regs *);
extern int oss_present;
@@ -212,11 +210,6 @@ void __init via_init(void)
break;
}
#else
- /* The alernate IRQ mapping seems to just not work. Anyone with a */
- /* supported machine is welcome to take a stab at fixing it. It */
- /* _should_ work on the following Quadras: 610,650,700,800,900,950 */
- /* - 1999-06-12 (jmt) */
-
via_alt_mapping = 0;
#endif
@@ -270,12 +263,6 @@ void __init via_register_interrupts(void)
cpu_request_irq(IRQ_AUTO_1, via1_irq,
IRQ_FLG_LOCK|IRQ_FLG_FAST, "via1",
(void *) via1);
-#if 0 /* interferes with serial on some machines */
- if (!psc_present) {
- cpu_request_irq(IRQ_AUTO_6, mac_bang, IRQ_FLG_LOCK,
- "Off Switch", mac_bang);
- }
-#endif
}
cpu_request_irq(IRQ_AUTO_2, via2_irq, IRQ_FLG_LOCK|IRQ_FLG_FAST,
"via2", (void *) via2);
@@ -471,8 +458,8 @@ irqreturn_t via2_irq(int irq, void *dev_id, struct pt_regs *regs)
for (i = 0, irq_bit = 1 ; i < 7 ; i++, irq_bit <<= 1)
if (events & irq_bit) {
via2[gIER] = irq_bit;
- mac_do_irq_list(VIA2_SOURCE_BASE + i, regs);
via2[gIFR] = irq_bit | rbv_clear;
+ mac_do_irq_list(VIA2_SOURCE_BASE + i, regs);
via2[gIER] = irq_bit | 0x80;
}
return IRQ_HANDLED;
@@ -529,6 +516,7 @@ void via_irq_enable(int irq) {
}
via2[gIER] = irq_bit | 0x80;
} else if (irq_src == 7) {
+ nubus_active |= irq_bit;
if (rbv_present) {
/* enable the slot interrupt. SIER works like IER. */
via2[rSIER] = IER_SET_BIT(irq_idx);
@@ -550,7 +538,6 @@ void via_irq_enable(int irq) {
}
}
}
- nubus_active |= irq_bit;
}
}
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index afb57eeafdc..bdb11103694 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -203,7 +203,7 @@ void __init paging_init(void)
{
int chunk;
unsigned long mem_avail = 0;
- unsigned long zones_size[3] = { 0, };
+ unsigned long zones_size[MAX_NR_ZONES] = { 0, };
#ifdef DEBUG
{
@@ -257,12 +257,12 @@ void __init paging_init(void)
#ifdef DEBUG
printk ("before free_area_init\n");
#endif
- zones_size[0] = (mach_max_dma_address < (unsigned long)high_memory ?
- (mach_max_dma_address+1) : (unsigned long)high_memory);
- zones_size[1] = (unsigned long)high_memory - zones_size[0];
+ zones_size[ZONE_DMA] = (mach_max_dma_address < (unsigned long)high_memory ?
+ (mach_max_dma_address+1) : (unsigned long)high_memory);
+ zones_size[ZONE_NORMAL] = (unsigned long)high_memory - zones_size[0];
- zones_size[0] = (zones_size[0] - PAGE_OFFSET) >> PAGE_SHIFT;
- zones_size[1] >>= PAGE_SHIFT;
+ zones_size[ZONE_DMA] = (zones_size[ZONE_DMA] - PAGE_OFFSET) >> PAGE_SHIFT;
+ zones_size[ZONE_NORMAL] >>= PAGE_SHIFT;
free_area_init(zones_size);
}
diff --git a/arch/m68k/mm/sun3mmu.c b/arch/m68k/mm/sun3mmu.c
index a47be196a47..ac6640ade0b 100644
--- a/arch/m68k/mm/sun3mmu.c
+++ b/arch/m68k/mm/sun3mmu.c
@@ -46,7 +46,7 @@ void __init paging_init(void)
unsigned long address;
unsigned long next_pgtable;
unsigned long bootmem_end;
- unsigned long zones_size[3] = {0, 0, 0};
+ unsigned long zones_size[MAX_NR_ZONES] = { 0, };
unsigned long size;
@@ -92,8 +92,7 @@ void __init paging_init(void)
current->mm = NULL;
/* memory sizing is a hack stolen from motorola.c.. hope it works for us */
- zones_size[0] = ((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT;
- zones_size[1] = 0;
+ zones_size[ZONE_DMA] = ((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT;
free_area_init(zones_size);
diff --git a/arch/m68knommu/kernel/signal.c b/arch/m68knommu/kernel/signal.c
index e1b3aa39e27..8e2c5a88efa 100644
--- a/arch/m68knommu/kernel/signal.c
+++ b/arch/m68knommu/kernel/signal.c
@@ -553,7 +553,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
/* This is the X/Open sanctioned signal stack switching. */
if (ka->sa.sa_flags & SA_ONSTACK) {
- if (!on_sig_stack(usp))
+ if (!sas_ss_flags(usp))
usp = current->sas_ss_sp + current->sas_ss_size;
}
return (void *)((usp - frame_size) & -8UL);
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index a0ac0e5f61a..2d2fdf77e30 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -497,7 +497,7 @@ einval: li v0, -EINVAL
sys sys_sched_get_priority_min 1
sys sys_sched_rr_get_interval 2 /* 4165 */
sys sys_nanosleep, 2
- sys sys_mremap, 4
+ sys sys_mremap, 5
sys sys_accept 3
sys sys_bind 3
sys sys_connect 3 /* 4170 */
diff --git a/arch/mips/kernel/sysirix.c b/arch/mips/kernel/sysirix.c
index 5407b784cd0..19e1ef43eb4 100644
--- a/arch/mips/kernel/sysirix.c
+++ b/arch/mips/kernel/sysirix.c
@@ -694,7 +694,7 @@ asmlinkage int irix_statfs(const char __user *path,
if (error)
goto out;
- error = vfs_statfs(nd.dentry->d_inode->i_sb, &kbuf);
+ error = vfs_statfs(nd.dentry, &kbuf);
if (error)
goto dput_and_out;
@@ -732,7 +732,7 @@ asmlinkage int irix_fstatfs(unsigned int fd, struct irix_statfs __user *buf)
goto out;
}
- error = vfs_statfs(file->f_dentry->d_inode->i_sb, &kbuf);
+ error = vfs_statfs(file->f_dentry, &kbuf);
if (error)
goto out_f;
@@ -1360,7 +1360,7 @@ asmlinkage int irix_statvfs(char __user *fname, struct irix_statvfs __user *buf)
error = user_path_walk(fname, &nd);
if (error)
goto out;
- error = vfs_statfs(nd.dentry->d_inode->i_sb, &kbuf);
+ error = vfs_statfs(nd.dentry, &kbuf);
if (error)
goto dput_and_out;
@@ -1406,7 +1406,7 @@ asmlinkage int irix_fstatvfs(int fd, struct irix_statvfs __user *buf)
error = -EBADF;
goto out;
}
- error = vfs_statfs(file->f_dentry->d_inode->i_sb, &kbuf);
+ error = vfs_statfs(file->f_dentry, &kbuf);
if (error)
goto out_f;
@@ -1611,7 +1611,7 @@ asmlinkage int irix_statvfs64(char __user *fname, struct irix_statvfs64 __user *
error = user_path_walk(fname, &nd);
if (error)
goto out;
- error = vfs_statfs(nd.dentry->d_inode->i_sb, &kbuf);
+ error = vfs_statfs(nd.dentry, &kbuf);
if (error)
goto dput_and_out;
@@ -1658,7 +1658,7 @@ asmlinkage int irix_fstatvfs64(int fd, struct irix_statvfs __user *buf)
error = -EBADF;
goto out;
}
- error = vfs_statfs(file->f_dentry->d_inode->i_sb, &kbuf);
+ error = vfs_statfs(file->f_dentry, &kbuf);
if (error)
goto out_f;
diff --git a/arch/parisc/hpux/sys_hpux.c b/arch/parisc/hpux/sys_hpux.c
index 05273ccced0..cb69727027a 100644
--- a/arch/parisc/hpux/sys_hpux.c
+++ b/arch/parisc/hpux/sys_hpux.c
@@ -145,7 +145,7 @@ static int hpux_ustat(dev_t dev, struct hpux_ustat __user *ubuf)
s = user_get_super(dev);
if (s == NULL)
goto out;
- err = vfs_statfs(s, &sbuf);
+ err = vfs_statfs(s->s_root, &sbuf);
drop_super(s);
if (err)
goto out;
@@ -186,12 +186,12 @@ struct hpux_statfs {
int16_t f_pad;
};
-static int vfs_statfs_hpux(struct super_block *sb, struct hpux_statfs *buf)
+static int vfs_statfs_hpux(struct dentry *dentry, struct hpux_statfs *buf)
{
struct kstatfs st;
int retval;
- retval = vfs_statfs(sb, &st);
+ retval = vfs_statfs(dentry, &st);
if (retval)
return retval;
@@ -219,7 +219,7 @@ asmlinkage long hpux_statfs(const char __user *path,
error = user_path_walk(path, &nd);
if (!error) {
struct hpux_statfs tmp;
- error = vfs_statfs_hpux(nd.dentry->d_inode->i_sb, &tmp);
+ error = vfs_statfs_hpux(nd.dentry, &tmp);
if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
error = -EFAULT;
path_release(&nd);
@@ -237,7 +237,7 @@ asmlinkage long hpux_fstatfs(unsigned int fd, struct hpux_statfs __user * buf)
file = fget(fd);
if (!file)
goto out;
- error = vfs_statfs_hpux(file->f_dentry->d_inode->i_sb, &tmp);
+ error = vfs_statfs_hpux(file->f_dentry, &tmp);
if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
error = -EFAULT;
fput(file);
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index 05767e83cf2..cc38edfd90c 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -248,7 +248,7 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
DBG(1,"get_sigframe: ka = %#lx, sp = %#lx, frame_size = %#lx\n",
(unsigned long)ka, sp, frame_size);
- if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp))
+ if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp))
sp = current->sas_ss_sp; /* Stacks grow up! */
DBG(1,"get_sigframe: Returning sp = %#lx\n", (unsigned long)sp);
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 7b829c754d0..e922a88b2ba 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -718,7 +718,6 @@ config PPC_64K_PAGES
config SCHED_SMT
bool "SMT (Hyperthreading) scheduler support"
depends on PPC64 && SMP
- default off
help
SMT scheduler support improves the CPU scheduler's decision making
when dealing with POWER5 cpus at a cost of slightly increased
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index bfcec4cc70a..40d4c14fde8 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -380,8 +380,8 @@ unsigned int real_irq_to_virt_slowpath(unsigned int real_irq)
#endif /* CONFIG_PPC64 */
#ifdef CONFIG_IRQSTACKS
-struct thread_info *softirq_ctx[NR_CPUS];
-struct thread_info *hardirq_ctx[NR_CPUS];
+struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
+struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
void irq_ctx_init(void)
{
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 1987697b23a..7b4572805db 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -436,11 +436,11 @@ spufs_fill_super(struct super_block *sb, void *data, int silent)
return spufs_create_root(sb, data);
}
-static struct super_block *
+static int
spufs_get_sb(struct file_system_type *fstype, int flags,
- const char *name, void *data)
+ const char *name, void *data, struct vfsmount *mnt)
{
- return get_sb_single(fstype, flags, data, spufs_fill_super);
+ return get_sb_single(fstype, flags, data, spufs_fill_super, mnt);
}
static struct file_system_type spufs_type = {
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 01c5c082f97..821a141889d 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -446,6 +446,14 @@ config NO_IDLE_HZ_INIT
The HZ timer is switched off in idle by default. That means the
HZ timer is already disabled at boot time.
+config S390_HYPFS_FS
+ bool "s390 hypervisor file system support"
+ select SYS_HYPERVISOR
+ default y
+ help
+ This is a virtual file system intended to provide accounting
+ information in an s390 hypervisor environment.
+
config KEXEC
bool "kexec system call (EXPERIMENTAL)"
depends on EXPERIMENTAL
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 7bb16fb97d4..b3791fb094a 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -76,7 +76,7 @@ LDFLAGS_vmlinux := -e start
head-y := arch/$(ARCH)/kernel/head.o arch/$(ARCH)/kernel/init_task.o
core-y += arch/$(ARCH)/mm/ arch/$(ARCH)/kernel/ arch/$(ARCH)/crypto/ \
- arch/$(ARCH)/appldata/
+ arch/$(ARCH)/appldata/ arch/$(ARCH)/hypfs/
libs-y += arch/$(ARCH)/lib/
drivers-y += drivers/s390/
drivers-$(CONFIG_MATHEMU) += arch/$(ARCH)/math-emu/
diff --git a/arch/s390/hypfs/Makefile b/arch/s390/hypfs/Makefile
new file mode 100644
index 00000000000..f4b00cd81f7
--- /dev/null
+++ b/arch/s390/hypfs/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the linux hypfs filesystem routines.
+#
+
+obj-$(CONFIG_S390_HYPFS_FS) += s390_hypfs.o
+
+s390_hypfs-objs := inode.o hypfs_diag.o
diff --git a/arch/s390/hypfs/hypfs.h b/arch/s390/hypfs/hypfs.h
new file mode 100644
index 00000000000..ea5567be00f
--- /dev/null
+++ b/arch/s390/hypfs/hypfs.h
@@ -0,0 +1,30 @@
+/*
+ * fs/hypfs/hypfs.h
+ * Hypervisor filesystem for Linux on s390.
+ *
+ * Copyright (C) IBM Corp. 2006
+ * Author(s): Michael Holzheu <holzheu@de.ibm.com>
+ */
+
+#ifndef _HYPFS_H_
+#define _HYPFS_H_
+
+#include <linux/fs.h>
+#include <linux/types.h>
+
+#define REG_FILE_MODE 0440
+#define UPDATE_FILE_MODE 0220
+#define DIR_MODE 0550
+
+extern struct dentry *hypfs_mkdir(struct super_block *sb, struct dentry *parent,
+ const char *name);
+
+extern struct dentry *hypfs_create_u64(struct super_block *sb,
+ struct dentry *dir, const char *name,
+ __u64 value);
+
+extern struct dentry *hypfs_create_str(struct super_block *sb,
+ struct dentry *dir, const char *name,
+ char *string);
+
+#endif /* _HYPFS_H_ */
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
new file mode 100644
index 00000000000..efa74af7f04
--- /dev/null
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -0,0 +1,696 @@
+/*
+ * fs/hypfs/hypfs_diag.c
+ * Hypervisor filesystem for Linux on s390. Diag 204 and 224
+ * implementation.
+ *
+ * Copyright (C) IBM Corp. 2006
+ * Author(s): Michael Holzheu <holzheu@de.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+#include <asm/ebcdic.h>
+#include "hypfs.h"
+
+#define LPAR_NAME_LEN 8 /* lpar name len in diag 204 data */
+#define CPU_NAME_LEN 16 /* type name len of cpus in diag224 name table */
+#define TMP_SIZE 64 /* size of temporary buffers */
+
+/* diag 204 subcodes */
+enum diag204_sc {
+ SUBC_STIB4 = 4,
+ SUBC_RSI = 5,
+ SUBC_STIB6 = 6,
+ SUBC_STIB7 = 7
+};
+
+/* The two available diag 204 data formats */
+enum diag204_format {
+ INFO_SIMPLE = 0,
+ INFO_EXT = 0x00010000
+};
+
+/* bit is set in flags, when physical cpu info is included in diag 204 data */
+#define LPAR_PHYS_FLG 0x80
+
+static char *diag224_cpu_names; /* diag 224 name table */
+static enum diag204_sc diag204_store_sc; /* used subcode for store */
+static enum diag204_format diag204_info_type; /* used diag 204 data format */
+
+static void *diag204_buf; /* 4K aligned buffer for diag204 data */
+static void *diag204_buf_vmalloc; /* vmalloc pointer for diag204 data */
+static int diag204_buf_pages; /* number of pages for diag204 data */
+
+/*
+ * DIAG 204 data structures and member access functions.
+ *
+ * Since we have two different diag 204 data formats for old and new s390
+ * machines, we do not access the structs directly, but use getter functions for
+ * each struct member instead. This should make the code more readable.
+ */
+
+/* Time information block */
+
+struct info_blk_hdr {
+ __u8 npar;
+ __u8 flags;
+ __u16 tslice;
+ __u16 phys_cpus;
+ __u16 this_part;
+ __u64 curtod;
+} __attribute__ ((packed));
+
+struct x_info_blk_hdr {
+ __u8 npar;
+ __u8 flags;
+ __u16 tslice;
+ __u16 phys_cpus;
+ __u16 this_part;
+ __u64 curtod1;
+ __u64 curtod2;
+ char reserved[40];
+} __attribute__ ((packed));
+
+static inline int info_blk_hdr__size(enum diag204_format type)
+{
+ if (type == INFO_SIMPLE)
+ return sizeof(struct info_blk_hdr);
+ else /* INFO_EXT */
+ return sizeof(struct x_info_blk_hdr);
+}
+
+static inline __u8 info_blk_hdr__npar(enum diag204_format type, void *hdr)
+{
+ if (type == INFO_SIMPLE)
+ return ((struct info_blk_hdr *)hdr)->npar;
+ else /* INFO_EXT */
+ return ((struct x_info_blk_hdr *)hdr)->npar;
+}
+
+static inline __u8 info_blk_hdr__flags(enum diag204_format type, void *hdr)
+{
+ if (type == INFO_SIMPLE)
+ return ((struct info_blk_hdr *)hdr)->flags;
+ else /* INFO_EXT */
+ return ((struct x_info_blk_hdr *)hdr)->flags;
+}
+
+static inline __u16 info_blk_hdr__pcpus(enum diag204_format type, void *hdr)
+{
+ if (type == INFO_SIMPLE)
+ return ((struct info_blk_hdr *)hdr)->phys_cpus;
+ else /* INFO_EXT */
+ return ((struct x_info_blk_hdr *)hdr)->phys_cpus;
+}
+
+/* Partition header */
+
+struct part_hdr {
+ __u8 pn;
+ __u8 cpus;
+ char reserved[6];
+ char part_name[LPAR_NAME_LEN];
+} __attribute__ ((packed));
+
+struct x_part_hdr {
+ __u8 pn;
+ __u8 cpus;
+ __u8 rcpus;
+ __u8 pflag;
+ __u32 mlu;
+ char part_name[LPAR_NAME_LEN];
+ char lpc_name[8];
+ char os_name[8];
+ __u64 online_cs;
+ __u64 online_es;
+ __u8 upid;
+ char reserved1[3];
+ __u32 group_mlu;
+ char group_name[8];
+ char reserved2[32];
+} __attribute__ ((packed));
+
+static inline int part_hdr__size(enum diag204_format type)
+{
+ if (type == INFO_SIMPLE)
+ return sizeof(struct part_hdr);
+ else /* INFO_EXT */
+ return sizeof(struct x_part_hdr);
+}
+
+static inline __u8 part_hdr__rcpus(enum diag204_format type, void *hdr)
+{
+ if (type == INFO_SIMPLE)
+ return ((struct part_hdr *)hdr)->cpus;
+ else /* INFO_EXT */
+ return ((struct x_part_hdr *)hdr)->rcpus;
+}
+
+static inline void part_hdr__part_name(enum diag204_format type, void *hdr,
+ char *name)
+{
+ if (type == INFO_SIMPLE)
+ memcpy(name, ((struct part_hdr *)hdr)->part_name,
+ LPAR_NAME_LEN);
+ else /* INFO_EXT */
+ memcpy(name, ((struct x_part_hdr *)hdr)->part_name,
+ LPAR_NAME_LEN);
+ EBCASC(name, LPAR_NAME_LEN);
+ name[LPAR_NAME_LEN] = 0;
+ strstrip(name);
+}
+
+struct cpu_info {
+ __u16 cpu_addr;
+ char reserved1[2];
+ __u8 ctidx;
+ __u8 cflag;
+ __u16 weight;
+ __u64 acc_time;
+ __u64 lp_time;
+} __attribute__ ((packed));
+
+struct x_cpu_info {
+ __u16 cpu_addr;
+ char reserved1[2];
+ __u8 ctidx;
+ __u8 cflag;
+ __u16 weight;
+ __u64 acc_time;
+ __u64 lp_time;
+ __u16 min_weight;
+ __u16 cur_weight;
+ __u16 max_weight;
+ char reseved2[2];
+ __u64 online_time;
+ __u64 wait_time;
+ __u32 pma_weight;
+ __u32 polar_weight;
+ char reserved3[40];
+} __attribute__ ((packed));
+
+/* CPU info block */
+
+static inline int cpu_info__size(enum diag204_format type)
+{
+ if (type == INFO_SIMPLE)
+ return sizeof(struct cpu_info);
+ else /* INFO_EXT */
+ return sizeof(struct x_cpu_info);
+}
+
+static inline __u8 cpu_info__ctidx(enum diag204_format type, void *hdr)
+{
+ if (type == INFO_SIMPLE)
+ return ((struct cpu_info *)hdr)->ctidx;
+ else /* INFO_EXT */
+ return ((struct x_cpu_info *)hdr)->ctidx;
+}
+
+static inline __u16 cpu_info__cpu_addr(enum diag204_format type, void *hdr)
+{
+ if (type == INFO_SIMPLE)
+ return ((struct cpu_info *)hdr)->cpu_addr;
+ else /* INFO_EXT */
+ return ((struct x_cpu_info *)hdr)->cpu_addr;
+}
+
+static inline __u64 cpu_info__acc_time(enum diag204_format type, void *hdr)
+{
+ if (type == INFO_SIMPLE)
+ return ((struct cpu_info *)hdr)->acc_time;
+ else /* INFO_EXT */
+ return ((struct x_cpu_info *)hdr)->acc_time;
+}
+
+static inline __u64 cpu_info__lp_time(enum diag204_format type, void *hdr)
+{
+ if (type == INFO_SIMPLE)
+ return ((struct cpu_info *)hdr)->lp_time;
+ else /* INFO_EXT */
+ return ((struct x_cpu_info *)hdr)->lp_time;
+}
+
+static inline __u64 cpu_info__online_time(enum diag204_format type, void *hdr)
+{
+ if (type == INFO_SIMPLE)
+ return 0; /* online_time not available in simple info */
+ else /* INFO_EXT */
+ return ((struct x_cpu_info *)hdr)->online_time;
+}
+
+/* Physical header */
+
+struct phys_hdr {
+ char reserved1[1];
+ __u8 cpus;
+ char reserved2[6];
+ char mgm_name[8];
+} __attribute__ ((packed));
+
+struct x_phys_hdr {
+ char reserved1[1];
+ __u8 cpus;
+ char reserved2[6];
+ char mgm_name[8];
+ char reserved3[80];
+} __attribute__ ((packed));
+
+static inline int phys_hdr__size(enum diag204_format type)
+{
+ if (type == INFO_SIMPLE)
+ return sizeof(struct phys_hdr);
+ else /* INFO_EXT */
+ return sizeof(struct x_phys_hdr);
+}
+
+static inline __u8 phys_hdr__cpus(enum diag204_format type, void *hdr)
+{
+ if (type == INFO_SIMPLE)
+ return ((struct phys_hdr *)hdr)->cpus;
+ else /* INFO_EXT */
+ return ((struct x_phys_hdr *)hdr)->cpus;
+}
+
+/* Physical CPU info block */
+
+struct phys_cpu {
+ __u16 cpu_addr;
+ char reserved1[2];
+ __u8 ctidx;
+ char reserved2[3];
+ __u64 mgm_time;
+ char reserved3[8];
+} __attribute__ ((packed));
+
+struct x_phys_cpu {
+ __u16 cpu_addr;
+ char reserved1[2];
+ __u8 ctidx;
+ char reserved2[3];
+ __u64 mgm_time;
+ char reserved3[80];
+} __attribute__ ((packed));
+
+static inline int phys_cpu__size(enum diag204_format type)
+{
+ if (type == INFO_SIMPLE)
+ return sizeof(struct phys_cpu);
+ else /* INFO_EXT */
+ return sizeof(struct x_phys_cpu);
+}
+
+static inline __u16 phys_cpu__cpu_addr(enum diag204_format type, void *hdr)
+{
+ if (type == INFO_SIMPLE)
+ return ((struct phys_cpu *)hdr)->cpu_addr;
+ else /* INFO_EXT */
+ return ((struct x_phys_cpu *)hdr)->cpu_addr;
+}
+
+static inline __u64 phys_cpu__mgm_time(enum diag204_format type, void *hdr)
+{
+ if (type == INFO_SIMPLE)
+ return ((struct phys_cpu *)hdr)->mgm_time;
+ else /* INFO_EXT */
+ return ((struct x_phys_cpu *)hdr)->mgm_time;
+}
+
+static inline __u64 phys_cpu__ctidx(enum diag204_format type, void *hdr)
+{
+ if (type == INFO_SIMPLE)
+ return ((struct phys_cpu *)hdr)->ctidx;
+ else /* INFO_EXT */
+ return ((struct x_phys_cpu *)hdr)->ctidx;
+}
+
+/* Diagnose 204 functions */
+
+static int diag204(unsigned long subcode, unsigned long size, void *addr)
+{
+ register unsigned long _subcode asm("0") = subcode;
+ register unsigned long _size asm("1") = size;
+
+ asm volatile (" diag %2,%0,0x204\n"
+ "0: \n" ".section __ex_table,\"a\"\n"
+#ifndef __s390x__
+ " .align 4\n"
+ " .long 0b,0b\n"
+#else
+ " .align 8\n"
+ " .quad 0b,0b\n"
+#endif
+ ".previous":"+d" (_subcode), "+d"(_size)
+ :"d"(addr)
+ :"memory");
+ if (_subcode)
+ return -1;
+ else
+ return _size;
+}
+
+/*
+ * For the old diag subcode 4 with simple data format we have to use real
+ * memory. If we use subcode 6 or 7 with extended data format, we can (and
+ * should) use vmalloc, since we need a lot of memory in that case. Currently
+ * up to 93 pages!
+ */
+
+static void diag204_free_buffer(void)
+{
+ if (!diag204_buf)
+ return;
+ if (diag204_buf_vmalloc) {
+ vfree(diag204_buf_vmalloc);
+ diag204_buf_vmalloc = NULL;
+ } else {
+ free_pages((unsigned long) diag204_buf, 0);
+ }
+ diag204_buf_pages = 0;
+ diag204_buf = NULL;
+}
+
+static void *diag204_alloc_vbuf(int pages)
+{
+ /* The buffer has to be page aligned! */
+ diag204_buf_vmalloc = vmalloc(PAGE_SIZE * (pages + 1));
+ if (!diag204_buf_vmalloc)
+ return ERR_PTR(-ENOMEM);
+ diag204_buf = (void*)((unsigned long)diag204_buf_vmalloc
+ & ~0xfffUL) + 0x1000;
+ diag204_buf_pages = pages;
+ return diag204_buf;
+}
+
+static void *diag204_alloc_rbuf(void)
+{
+ diag204_buf = (void*)__get_free_pages(GFP_KERNEL,0);
+ if (diag204_buf)
+ return ERR_PTR(-ENOMEM);
+ diag204_buf_pages = 1;
+ return diag204_buf;
+}
+
+static void *diag204_get_buffer(enum diag204_format fmt, int *pages)
+{
+ if (diag204_buf) {
+ *pages = diag204_buf_pages;
+ return diag204_buf;
+ }
+ if (fmt == INFO_SIMPLE) {
+ *pages = 1;
+ return diag204_alloc_rbuf();
+ } else {/* INFO_EXT */
+ *pages = diag204(SUBC_RSI | INFO_EXT, 0, 0);
+ if (*pages <= 0)
+ return ERR_PTR(-ENOSYS);
+ else
+ return diag204_alloc_vbuf(*pages);
+ }
+}
+
+/*
+ * diag204_probe() has to find out, which type of diagnose 204 implementation
+ * we have on our machine. Currently there are three possible scanarios:
+ * - subcode 4 + simple data format (only one page)
+ * - subcode 4-6 + extended data format
+ * - subcode 4-7 + extended data format
+ *
+ * Subcode 5 is used to retrieve the size of the data, provided by subcodes
+ * 6 and 7. Subcode 7 basically has the same function as subcode 6. In addition
+ * to subcode 6 it provides also information about secondary cpus.
+ * In order to get as much information as possible, we first try
+ * subcode 7, then 6 and if both fail, we use subcode 4.
+ */
+
+static int diag204_probe(void)
+{
+ void *buf;
+ int pages, rc;
+
+ buf = diag204_get_buffer(INFO_EXT, &pages);
+ if (!IS_ERR(buf)) {
+ if (diag204(SUBC_STIB7 | INFO_EXT, pages, buf) >= 0) {
+ diag204_store_sc = SUBC_STIB7;
+ diag204_info_type = INFO_EXT;
+ goto out;
+ }
+ if (diag204(SUBC_STIB6 | INFO_EXT, pages, buf) >= 0) {
+ diag204_store_sc = SUBC_STIB7;
+ diag204_info_type = INFO_EXT;
+ goto out;
+ }
+ diag204_free_buffer();
+ }
+
+ /* subcodes 6 and 7 failed, now try subcode 4 */
+
+ buf = diag204_get_buffer(INFO_SIMPLE, &pages);
+ if (IS_ERR(buf)) {
+ rc = PTR_ERR(buf);
+ goto fail_alloc;
+ }
+ if (diag204(SUBC_STIB4 | INFO_SIMPLE, pages, buf) >= 0) {
+ diag204_store_sc = SUBC_STIB4;
+ diag204_info_type = INFO_SIMPLE;
+ goto out;
+ } else {
+ rc = -ENOSYS;
+ goto fail_store;
+ }
+out:
+ rc = 0;
+fail_store:
+ diag204_free_buffer();
+fail_alloc:
+ return rc;
+}
+
+static void *diag204_store(void)
+{
+ void *buf;
+ int pages;
+
+ buf = diag204_get_buffer(diag204_info_type, &pages);
+ if (IS_ERR(buf))
+ goto out;
+ if (diag204(diag204_store_sc | diag204_info_type, pages, buf) < 0)
+ return ERR_PTR(-ENOSYS);
+out:
+ return buf;
+}
+
+/* Diagnose 224 functions */
+
+static void diag224(void *ptr)
+{
+ asm volatile(" diag %0,%1,0x224\n"
+ : :"d" (0), "d"(ptr) : "memory");
+}
+
+static int diag224_get_name_table(void)
+{
+ /* memory must be below 2GB */
+ diag224_cpu_names = kmalloc(PAGE_SIZE, GFP_KERNEL | GFP_DMA);
+ if (!diag224_cpu_names)
+ return -ENOMEM;
+ diag224(diag224_cpu_names);
+ EBCASC(diag224_cpu_names + 16, (*diag224_cpu_names + 1) * 16);
+ return 0;
+}
+
+static void diag224_delete_name_table(void)
+{
+ kfree(diag224_cpu_names);
+}
+
+static int diag224_idx2name(int index, char *name)
+{
+ memcpy(name, diag224_cpu_names + ((index + 1) * CPU_NAME_LEN),
+ CPU_NAME_LEN);
+ name[CPU_NAME_LEN] = 0;
+ strstrip(name);
+ return 0;
+}
+
+__init int hypfs_diag_init(void)
+{
+ int rc;
+
+ if (diag204_probe()) {
+ printk(KERN_ERR "hypfs: diag 204 not working.");
+ return -ENODATA;
+ }
+ rc = diag224_get_name_table();
+ if (rc) {
+ diag224_delete_name_table();
+ printk(KERN_ERR "hypfs: could not get name table.\n");
+ }
+ return rc;
+}
+
+__exit void hypfs_diag_exit(void)
+{
+ diag224_delete_name_table();
+ diag204_free_buffer();
+}
+
+/*
+ * Functions to create the directory structure
+ * *******************************************
+ */
+
+static int hypfs_create_cpu_files(struct super_block *sb,
+ struct dentry *cpus_dir, void *cpu_info)
+{
+ struct dentry *cpu_dir;
+ char buffer[TMP_SIZE];
+ void *rc;
+
+ snprintf(buffer, TMP_SIZE, "%d", cpu_info__cpu_addr(diag204_info_type,
+ cpu_info));
+ cpu_dir = hypfs_mkdir(sb, cpus_dir, buffer);
+ rc = hypfs_create_u64(sb, cpu_dir, "mgmtime",
+ cpu_info__acc_time(diag204_info_type, cpu_info) -
+ cpu_info__lp_time(diag204_info_type, cpu_info));
+ if (IS_ERR(rc))
+ return PTR_ERR(rc);
+ rc = hypfs_create_u64(sb, cpu_dir, "cputime",
+ cpu_info__lp_time(diag204_info_type, cpu_info));
+ if (IS_ERR(rc))
+ return PTR_ERR(rc);
+ if (diag204_info_type == INFO_EXT) {
+ rc = hypfs_create_u64(sb, cpu_dir, "onlinetime",
+ cpu_info__online_time(diag204_info_type,
+ cpu_info));
+ if (IS_ERR(rc))
+ return PTR_ERR(rc);
+ }
+ diag224_idx2name(cpu_info__ctidx(diag204_info_type, cpu_info), buffer);
+ rc = hypfs_create_str(sb, cpu_dir, "type", buffer);
+ if (IS_ERR(rc))
+ return PTR_ERR(rc);
+ return 0;
+}
+
+static void *hypfs_create_lpar_files(struct super_block *sb,
+ struct dentry *systems_dir, void *part_hdr)
+{
+ struct dentry *cpus_dir;
+ struct dentry *lpar_dir;
+ char lpar_name[LPAR_NAME_LEN + 1];
+ void *cpu_info;
+ int i;
+
+ part_hdr__part_name(diag204_info_type, part_hdr, lpar_name);
+ lpar_name[LPAR_NAME_LEN] = 0;
+ lpar_dir = hypfs_mkdir(sb, systems_dir, lpar_name);
+ if (IS_ERR(lpar_dir))
+ return lpar_dir;
+ cpus_dir = hypfs_mkdir(sb, lpar_dir, "cpus");
+ if (IS_ERR(cpus_dir))
+ return cpus_dir;
+ cpu_info = part_hdr + part_hdr__size(diag204_info_type);
+ for (i = 0; i < part_hdr__rcpus(diag204_info_type, part_hdr); i++) {
+ int rc;
+ rc = hypfs_create_cpu_files(sb, cpus_dir, cpu_info);
+ if (rc)
+ return ERR_PTR(rc);
+ cpu_info += cpu_info__size(diag204_info_type);
+ }
+ return cpu_info;
+}
+
+static int hypfs_create_phys_cpu_files(struct super_block *sb,
+ struct dentry *cpus_dir, void *cpu_info)
+{
+ struct dentry *cpu_dir;
+ char buffer[TMP_SIZE];
+ void *rc;
+
+ snprintf(buffer, TMP_SIZE, "%i", phys_cpu__cpu_addr(diag204_info_type,
+ cpu_info));
+ cpu_dir = hypfs_mkdir(sb, cpus_dir, buffer);
+ if (IS_ERR(cpu_dir))
+ return PTR_ERR(cpu_dir);
+ rc = hypfs_create_u64(sb, cpu_dir, "mgmtime",
+ phys_cpu__mgm_time(diag204_info_type, cpu_info));
+ if (IS_ERR(rc))
+ return PTR_ERR(rc);
+ diag224_idx2name(phys_cpu__ctidx(diag204_info_type, cpu_info), buffer);
+ rc = hypfs_create_str(sb, cpu_dir, "type", buffer);
+ if (IS_ERR(rc))
+ return PTR_ERR(rc);
+ return 0;
+}
+
+static void *hypfs_create_phys_files(struct super_block *sb,
+ struct dentry *parent_dir, void *phys_hdr)
+{
+ int i;
+ void *cpu_info;
+ struct dentry *cpus_dir;
+
+ cpus_dir = hypfs_mkdir(sb, parent_dir, "cpus");
+ if (IS_ERR(cpus_dir))
+ return cpus_dir;
+ cpu_info = phys_hdr + phys_hdr__size(diag204_info_type);
+ for (i = 0; i < phys_hdr__cpus(diag204_info_type, phys_hdr); i++) {
+ int rc;
+ rc = hypfs_create_phys_cpu_files(sb, cpus_dir, cpu_info);
+ if (rc)
+ return ERR_PTR(rc);
+ cpu_info += phys_cpu__size(diag204_info_type);
+ }
+ return cpu_info;
+}
+
+int hypfs_diag_create_files(struct super_block *sb, struct dentry *root)
+{
+ struct dentry *systems_dir, *hyp_dir;
+ void *time_hdr, *part_hdr;
+ int i, rc;
+ void *buffer, *ptr;
+
+ buffer = diag204_store();
+ if (IS_ERR(buffer))
+ return PTR_ERR(buffer);
+
+ systems_dir = hypfs_mkdir(sb, root, "systems");
+ if (IS_ERR(systems_dir)) {
+ rc = PTR_ERR(systems_dir);
+ goto err_out;
+ }
+ time_hdr = (struct x_info_blk_hdr *)buffer;
+ part_hdr = time_hdr + info_blk_hdr__size(diag204_info_type);
+ for (i = 0; i < info_blk_hdr__npar(diag204_info_type, time_hdr); i++) {
+ part_hdr = hypfs_create_lpar_files(sb, systems_dir, part_hdr);
+ if (IS_ERR(part_hdr)) {
+ rc = PTR_ERR(part_hdr);
+ goto err_out;
+ }
+ }
+ if (info_blk_hdr__flags(diag204_info_type, time_hdr) & LPAR_PHYS_FLG) {
+ ptr = hypfs_create_phys_files(sb, root, part_hdr);
+ if (IS_ERR(ptr)) {
+ rc = PTR_ERR(ptr);
+ goto err_out;
+ }
+ }
+ hyp_dir = hypfs_mkdir(sb, root, "hyp");
+ if (IS_ERR(hyp_dir)) {
+ rc = PTR_ERR(hyp_dir);
+ goto err_out;
+ }
+ ptr = hypfs_create_str(sb, hyp_dir, "type", "LPAR Hypervisor");
+ if (IS_ERR(ptr)) {
+ rc = PTR_ERR(ptr);
+ goto err_out;
+ }
+ rc = 0;
+
+err_out:
+ return rc;
+}
diff --git a/arch/s390/hypfs/hypfs_diag.h b/arch/s390/hypfs/hypfs_diag.h
new file mode 100644
index 00000000000..793dea6b9bb
--- /dev/null
+++ b/arch/s390/hypfs/hypfs_diag.h
@@ -0,0 +1,16 @@
+/*
+ * fs/hypfs/hypfs_diag.h
+ * Hypervisor filesystem for Linux on s390.
+ *
+ * Copyright (C) IBM Corp. 2006
+ * Author(s): Michael Holzheu <holzheu@de.ibm.com>
+ */
+
+#ifndef _HYPFS_DIAG_H_
+#define _HYPFS_DIAG_H_
+
+extern int hypfs_diag_init(void);
+extern void hypfs_diag_exit(void);
+extern int hypfs_diag_create_files(struct super_block *sb, struct dentry *root);
+
+#endif /* _HYPFS_DIAG_H_ */
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
new file mode 100644
index 00000000000..bab560a1357
--- /dev/null
+++ b/arch/s390/hypfs/inode.c
@@ -0,0 +1,492 @@
+/*
+ * fs/hypfs/inode.c
+ * Hypervisor filesystem for Linux on s390.
+ *
+ * Copyright (C) IBM Corp. 2006
+ * Author(s): Michael Holzheu <holzheu@de.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/vfs.h>
+#include <linux/pagemap.h>
+#include <linux/gfp.h>
+#include <linux/time.h>
+#include <linux/parser.h>
+#include <linux/sysfs.h>
+#include <linux/module.h>
+#include <asm/ebcdic.h>
+#include "hypfs.h"
+#include "hypfs_diag.h"
+
+#define HYPFS_MAGIC 0x687970 /* ASCII 'hyp' */
+#define TMP_SIZE 64 /* size of temporary buffers */
+
+static struct dentry *hypfs_create_update_file(struct super_block *sb,
+ struct dentry *dir);
+
+struct hypfs_sb_info {
+ uid_t uid; /* uid used for files and dirs */
+ gid_t gid; /* gid used for files and dirs */
+ struct dentry *update_file; /* file to trigger update */
+ time_t last_update; /* last update time in secs since 1970 */
+ struct mutex lock; /* lock to protect update process */
+};
+
+static struct file_operations hypfs_file_ops;
+static struct file_system_type hypfs_type;
+static struct super_operations hypfs_s_ops;
+
+/* start of list of all dentries, which have to be deleted on update */
+static struct dentry *hypfs_last_dentry;
+
+static void hypfs_update_update(struct super_block *sb)
+{
+ struct hypfs_sb_info *sb_info = sb->s_fs_info;
+ struct inode *inode = sb_info->update_file->d_inode;
+
+ sb_info->last_update = get_seconds();
+ inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+}
+
+/* directory tree removal functions */
+
+static void hypfs_add_dentry(struct dentry *dentry)
+{
+ dentry->d_fsdata = hypfs_last_dentry;
+ hypfs_last_dentry = dentry;
+}
+
+static void hypfs_remove(struct dentry *dentry)
+{
+ struct dentry *parent;
+
+ parent = dentry->d_parent;
+ if (S_ISDIR(dentry->d_inode->i_mode))
+ simple_rmdir(parent->d_inode, dentry);
+ else
+ simple_unlink(parent->d_inode, dentry);
+ d_delete(dentry);
+ dput(dentry);
+}
+
+static void hypfs_delete_tree(struct dentry *root)
+{
+ while (hypfs_last_dentry) {
+ struct dentry *next_dentry;
+ next_dentry = hypfs_last_dentry->d_fsdata;
+ hypfs_remove(hypfs_last_dentry);
+ hypfs_last_dentry = next_dentry;
+ }
+}
+
+static struct inode *hypfs_make_inode(struct super_block *sb, int mode)
+{
+ struct inode *ret = new_inode(sb);
+
+ if (ret) {
+ struct hypfs_sb_info *hypfs_info = sb->s_fs_info;
+ ret->i_mode = mode;
+ ret->i_uid = hypfs_info->uid;
+ ret->i_gid = hypfs_info->gid;
+ ret->i_blksize = PAGE_CACHE_SIZE;
+ ret->i_blocks = 0;
+ ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME;
+ if (mode & S_IFDIR)
+ ret->i_nlink = 2;
+ else
+ ret->i_nlink = 1;
+ }
+ return ret;
+}
+
+static void hypfs_drop_inode(struct inode *inode)
+{
+ kfree(inode->u.generic_ip);
+ generic_delete_inode(inode);
+}
+
+static int hypfs_open(struct inode *inode, struct file *filp)
+{
+ char *data = filp->f_dentry->d_inode->u.generic_ip;
+ struct hypfs_sb_info *fs_info;
+
+ if (filp->f_mode & FMODE_WRITE) {
+ if (!(inode->i_mode & S_IWUGO))
+ return -EACCES;
+ }
+ if (filp->f_mode & FMODE_READ) {
+ if (!(inode->i_mode & S_IRUGO))
+ return -EACCES;
+ }
+
+ fs_info = inode->i_sb->s_fs_info;
+ if(data) {
+ mutex_lock(&fs_info->lock);
+ filp->private_data = kstrdup(data, GFP_KERNEL);
+ if (!filp->private_data) {
+ mutex_unlock(&fs_info->lock);
+ return -ENOMEM;
+ }
+ mutex_unlock(&fs_info->lock);
+ }
+ return 0;
+}
+
+static ssize_t hypfs_aio_read(struct kiocb *iocb, __user char *buf,
+ size_t count, loff_t offset)
+{
+ char *data;
+ size_t len;
+ struct file *filp = iocb->ki_filp;
+
+ data = filp->private_data;
+ len = strlen(data);
+ if (offset > len) {
+ count = 0;
+ goto out;
+ }
+ if (count > len - offset)
+ count = len - offset;
+ if (copy_to_user(buf, data + offset, count)) {
+ count = -EFAULT;
+ goto out;
+ }
+ iocb->ki_pos += count;
+ file_accessed(filp);
+out:
+ return count;
+}
+static ssize_t hypfs_aio_write(struct kiocb *iocb, const char __user *buf,
+ size_t count, loff_t pos)
+{
+ int rc;
+ struct super_block *sb;
+ struct hypfs_sb_info *fs_info;
+
+ sb = iocb->ki_filp->f_dentry->d_inode->i_sb;
+ fs_info = sb->s_fs_info;
+ /*
+ * Currently we only allow one update per second for two reasons:
+ * 1. diag 204 is VERY expensive
+ * 2. If several processes do updates in parallel and then read the
+ * hypfs data, the likelihood of collisions is reduced, if we restrict
+ * the minimum update interval. A collision occurs, if during the
+ * data gathering of one process another process triggers an update
+ * If the first process wants to ensure consistent data, it has
+ * to restart data collection in this case.
+ */
+ mutex_lock(&fs_info->lock);
+ if (fs_info->last_update == get_seconds()) {
+ rc = -EBUSY;
+ goto out;
+ }
+ hypfs_delete_tree(sb->s_root);
+ rc = hypfs_diag_create_files(sb, sb->s_root);
+ if (rc) {
+ printk(KERN_ERR "hypfs: Update failed\n");
+ hypfs_delete_tree(sb->s_root);
+ goto out;
+ }
+ hypfs_update_update(sb);
+ rc = count;
+out:
+ mutex_unlock(&fs_info->lock);
+ return rc;
+}
+
+static int hypfs_release(struct inode *inode, struct file *filp)
+{
+ kfree(filp->private_data);
+ return 0;
+}
+
+enum { opt_uid, opt_gid, opt_err };
+
+static match_table_t hypfs_tokens = {
+ {opt_uid, "uid=%u"},
+ {opt_gid, "gid=%u"},
+ {opt_err, NULL}
+};
+
+static int hypfs_parse_options(char *options, struct super_block *sb)
+{
+ char *str;
+ substring_t args[MAX_OPT_ARGS];
+
+ if (!options)
+ return 0;
+ while ((str = strsep(&options, ",")) != NULL) {
+ int token, option;
+ struct hypfs_sb_info *hypfs_info = sb->s_fs_info;
+
+ if (!*str)
+ continue;
+ token = match_token(str, hypfs_tokens, args);
+ switch (token) {
+ case opt_uid:
+ if (match_int(&args[0], &option))
+ return -EINVAL;
+ hypfs_info->uid = option;
+ break;
+ case opt_gid:
+ if (match_int(&args[0], &option))
+ return -EINVAL;
+ hypfs_info->gid = option;
+ break;
+ case opt_err:
+ default:
+ printk(KERN_ERR "hypfs: Unrecognized mount option "
+ "\"%s\" or missing value\n", str);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
+{
+ struct inode *root_inode;
+ struct dentry *root_dentry;
+ int rc = 0;
+ struct hypfs_sb_info *sbi;
+
+ sbi = kzalloc(sizeof(struct hypfs_sb_info), GFP_KERNEL);
+ if (!sbi)
+ return -ENOMEM;
+ mutex_init(&sbi->lock);
+ sbi->uid = current->uid;
+ sbi->gid = current->gid;
+ sb->s_fs_info = sbi;
+ sb->s_blocksize = PAGE_CACHE_SIZE;
+ sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_magic = HYPFS_MAGIC;
+ sb->s_op = &hypfs_s_ops;
+ if (hypfs_parse_options(data, sb)) {
+ rc = -EINVAL;
+ goto err_alloc;
+ }
+ root_inode = hypfs_make_inode(sb, S_IFDIR | 0755);
+ if (!root_inode) {
+ rc = -ENOMEM;
+ goto err_alloc;
+ }
+ root_inode->i_op = &simple_dir_inode_operations;
+ root_inode->i_fop = &simple_dir_operations;
+ root_dentry = d_alloc_root(root_inode);
+ if (!root_dentry) {
+ iput(root_inode);
+ rc = -ENOMEM;
+ goto err_alloc;
+ }
+ rc = hypfs_diag_create_files(sb, root_dentry);
+ if (rc)
+ goto err_tree;
+ sbi->update_file = hypfs_create_update_file(sb, root_dentry);
+ if (IS_ERR(sbi->update_file)) {
+ rc = PTR_ERR(sbi->update_file);
+ goto err_tree;
+ }
+ hypfs_update_update(sb);
+ sb->s_root = root_dentry;
+ return 0;
+
+err_tree:
+ hypfs_delete_tree(root_dentry);
+ d_genocide(root_dentry);
+ dput(root_dentry);
+err_alloc:
+ kfree(sbi);
+ return rc;
+}
+
+static struct super_block *hypfs_get_super(struct file_system_type *fst,
+ int flags, const char *devname,
+ void *data)
+{
+ return get_sb_single(fst, flags, data, hypfs_fill_super);
+}
+
+static void hypfs_kill_super(struct super_block *sb)
+{
+ struct hypfs_sb_info *sb_info = sb->s_fs_info;
+
+ hypfs_delete_tree(sb->s_root);
+ hypfs_remove(sb_info->update_file);
+ kfree(sb->s_fs_info);
+ sb->s_fs_info = NULL;
+ kill_litter_super(sb);
+}
+
+static struct dentry *hypfs_create_file(struct super_block *sb,
+ struct dentry *parent, const char *name,
+ char *data, mode_t mode)
+{
+ struct dentry *dentry;
+ struct inode *inode;
+ struct qstr qname;
+
+ qname.name = name;
+ qname.len = strlen(name);
+ qname.hash = full_name_hash(name, qname.len);
+ dentry = lookup_one_len(name, parent, strlen(name));
+ if (IS_ERR(dentry))
+ return ERR_PTR(-ENOMEM);
+ inode = hypfs_make_inode(sb, mode);
+ if (!inode) {
+ dput(dentry);
+ return ERR_PTR(-ENOMEM);
+ }
+ if (mode & S_IFREG) {
+ inode->i_fop = &hypfs_file_ops;
+ if (data)
+ inode->i_size = strlen(data);
+ else
+ inode->i_size = 0;
+ } else if (mode & S_IFDIR) {
+ inode->i_op = &simple_dir_inode_operations;
+ inode->i_fop = &simple_dir_operations;
+ parent->d_inode->i_nlink++;
+ } else
+ BUG();
+ inode->u.generic_ip = data;
+ d_instantiate(dentry, inode);
+ dget(dentry);
+ return dentry;
+}
+
+struct dentry *hypfs_mkdir(struct super_block *sb, struct dentry *parent,
+ const char *name)
+{
+ struct dentry *dentry;
+
+ dentry = hypfs_create_file(sb, parent, name, NULL, S_IFDIR | DIR_MODE);
+ if (IS_ERR(dentry))
+ return dentry;
+ hypfs_add_dentry(dentry);
+ parent->d_inode->i_nlink++;
+ return dentry;
+}
+
+static struct dentry *hypfs_create_update_file(struct super_block *sb,
+ struct dentry *dir)
+{
+ struct dentry *dentry;
+
+ dentry = hypfs_create_file(sb, dir, "update", NULL,
+ S_IFREG | UPDATE_FILE_MODE);
+ /*
+ * We do not put the update file on the 'delete' list with
+ * hypfs_add_dentry(), since it should not be removed when the tree
+ * is updated.
+ */
+ return dentry;
+}
+
+struct dentry *hypfs_create_u64(struct super_block *sb, struct dentry *dir,
+ const char *name, __u64 value)
+{
+ char *buffer;
+ char tmp[TMP_SIZE];
+ struct dentry *dentry;
+
+ snprintf(tmp, TMP_SIZE, "%lld\n", (unsigned long long int)value);
+ buffer = kstrdup(tmp, GFP_KERNEL);
+ if (!buffer)
+ return ERR_PTR(-ENOMEM);
+ dentry =
+ hypfs_create_file(sb, dir, name, buffer, S_IFREG | REG_FILE_MODE);
+ if (IS_ERR(dentry)) {
+ kfree(buffer);
+ return ERR_PTR(-ENOMEM);
+ }
+ hypfs_add_dentry(dentry);
+ return dentry;
+}
+
+struct dentry *hypfs_create_str(struct super_block *sb, struct dentry *dir,
+ const char *name, char *string)
+{
+ char *buffer;
+ struct dentry *dentry;
+
+ buffer = kmalloc(strlen(string) + 2, GFP_KERNEL);
+ if (!buffer)
+ return ERR_PTR(-ENOMEM);
+ sprintf(buffer, "%s\n", string);
+ dentry =
+ hypfs_create_file(sb, dir, name, buffer, S_IFREG | REG_FILE_MODE);
+ if (IS_ERR(dentry)) {
+ kfree(buffer);
+ return ERR_PTR(-ENOMEM);
+ }
+ hypfs_add_dentry(dentry);
+ return dentry;
+}
+
+static struct file_operations hypfs_file_ops = {
+ .open = hypfs_open,
+ .release = hypfs_release,
+ .read = do_sync_read,
+ .write = do_sync_write,
+ .aio_read = hypfs_aio_read,
+ .aio_write = hypfs_aio_write,
+};
+
+static struct file_system_type hypfs_type = {
+ .owner = THIS_MODULE,
+ .name = "s390_hypfs",
+ .get_sb = hypfs_get_super,
+ .kill_sb = hypfs_kill_super
+};
+
+static struct super_operations hypfs_s_ops = {
+ .statfs = simple_statfs,
+ .drop_inode = hypfs_drop_inode,
+};
+
+static decl_subsys(s390, NULL, NULL);
+
+static int __init hypfs_init(void)
+{
+ int rc;
+
+ if (MACHINE_IS_VM)
+ return -ENODATA;
+ if (hypfs_diag_init()) {
+ rc = -ENODATA;
+ goto fail_diag;
+ }
+ kset_set_kset_s(&s390_subsys, hypervisor_subsys);
+ rc = subsystem_register(&s390_subsys);
+ if (rc)
+ goto fail_sysfs;
+ rc = register_filesystem(&hypfs_type);
+ if (rc)
+ goto fail_filesystem;
+ return 0;
+
+fail_filesystem:
+ subsystem_unregister(&s390_subsys);
+fail_sysfs:
+ hypfs_diag_exit();
+fail_diag:
+ printk(KERN_ERR "hypfs: Initialization failed with rc = %i.\n", rc);
+ return rc;
+}
+
+static void __exit hypfs_exit(void)
+{
+ hypfs_diag_exit();
+ unregister_filesystem(&hypfs_type);
+ subsystem_unregister(&s390_subsys);
+}
+
+module_init(hypfs_init)
+module_exit(hypfs_exit)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Michael Holzheu <holzheu@de.ibm.com>");
+MODULE_DESCRIPTION("s390 Hypervisor Filesystem");
diff --git a/arch/sh64/kernel/signal.c b/arch/sh64/kernel/signal.c
index 3ea8929e483..9e2ffc45c0e 100644
--- a/arch/sh64/kernel/signal.c
+++ b/arch/sh64/kernel/signal.c
@@ -407,7 +407,7 @@ setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
static inline void __user *
get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
{
- if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp))
+ if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp))
sp = current->sas_ss_sp + current->sas_ss_size;
return (void __user *)((sp - frame_size) & -8ul);
diff --git a/arch/sparc64/solaris/fs.c b/arch/sparc64/solaris/fs.c
index 4885ca6cbc7..0f0eb6aa1c4 100644
--- a/arch/sparc64/solaris/fs.c
+++ b/arch/sparc64/solaris/fs.c
@@ -356,7 +356,7 @@ static int report_statvfs(struct vfsmount *mnt, struct inode *inode, u32 buf)
int error;
struct sol_statvfs __user *ss = A(buf);
- error = vfs_statfs(mnt->mnt_sb, &s);
+ error = vfs_statfs(mnt->mnt_root, &s);
if (!error) {
const char *p = mnt->mnt_sb->s_type->name;
int i = 0;
@@ -392,7 +392,7 @@ static int report_statvfs64(struct vfsmount *mnt, struct inode *inode, u32 buf)
int error;
struct sol_statvfs64 __user *ss = A(buf);
- error = vfs_statfs(mnt->mnt_sb, &s);
+ error = vfs_statfs(mnt->mnt_root, &s);
if (!error) {
const char *p = mnt->mnt_sb->s_type->name;
int i = 0;
diff --git a/arch/um/Kconfig.debug b/arch/um/Kconfig.debug
index 5681a8bd370..bab51d61917 100644
--- a/arch/um/Kconfig.debug
+++ b/arch/um/Kconfig.debug
@@ -49,7 +49,6 @@ config GCOV
config SYSCALL_DEBUG
bool "Enable system call debugging"
- default N
depends on DEBUG_INFO
help
This adds some system debugging to UML, including keeping a ring buffer
diff --git a/arch/v850/kernel/signal.c b/arch/v850/kernel/signal.c
index 633e4e1b825..17c2d4359b0 100644
--- a/arch/v850/kernel/signal.c
+++ b/arch/v850/kernel/signal.c
@@ -274,7 +274,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
/* Default to using normal stack */
unsigned long sp = regs->gpr[GPR_SP];
- if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp))
+ if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp))
sp = current->sas_ss_sp + current->sas_ss_size;
return (void *)((sp - frame_size) & -8UL);
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index 7d3bc5ac5db..af44130f0d6 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -299,6 +299,7 @@ config X86_64_ACPI_NUMA
bool "ACPI NUMA detection"
depends on NUMA
select ACPI
+ select PCI
select ACPI_NUMA
default y
help
diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S
index 5a92fed2d1d..4ec594ab1a9 100644
--- a/arch/x86_64/ia32/ia32entry.S
+++ b/arch/x86_64/ia32/ia32entry.S
@@ -696,4 +696,5 @@ ia32_sys_call_table:
.quad sys_sync_file_range
.quad sys_tee
.quad compat_sys_vmsplice
+ .quad compat_sys_move_pages
ia32_syscall_end:
diff --git a/arch/x86_64/kernel/acpi/Makefile b/arch/x86_64/kernel/acpi/Makefile
index 4fe97071f29..080b9963f1b 100644
--- a/arch/x86_64/kernel/acpi/Makefile
+++ b/arch/x86_64/kernel/acpi/Makefile
@@ -4,5 +4,6 @@ obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o
ifneq ($(CONFIG_ACPI_PROCESSOR),)
obj-y += processor.o
+processor-y := ../../../i386/kernel/acpi/processor.o ../../../i386/kernel/acpi/cstate.o
endif
diff --git a/arch/x86_64/kernel/acpi/processor.c b/arch/x86_64/kernel/acpi/processor.c
deleted file mode 100644
index 3bdc2baa5bb..00000000000
--- a/arch/x86_64/kernel/acpi/processor.c
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * arch/x86_64/kernel/acpi/processor.c
- *
- * Copyright (C) 2005 Intel Corporation
- * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
- * - Added _PDC for platforms with Intel CPUs
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/acpi.h>
-
-#include <acpi/processor.h>
-#include <asm/acpi.h>
-
-static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
-{
- struct acpi_object_list *obj_list;
- union acpi_object *obj;
- u32 *buf;
-
- /* allocate and initialize pdc. It will be used later. */
- obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
- if (!obj_list) {
- printk(KERN_ERR "Memory allocation error\n");
- return;
- }
-
- obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
- if (!obj) {
- printk(KERN_ERR "Memory allocation error\n");
- kfree(obj_list);
- return;
- }
-
- buf = kmalloc(12, GFP_KERNEL);
- if (!buf) {
- printk(KERN_ERR "Memory allocation error\n");
- kfree(obj);
- kfree(obj_list);
- return;
- }
-
- buf[0] = ACPI_PDC_REVISION_ID;
- buf[1] = 1;
- buf[2] = ACPI_PDC_EST_CAPABILITY_SMP;
-
- obj->type = ACPI_TYPE_BUFFER;
- obj->buffer.length = 12;
- obj->buffer.pointer = (u8 *) buf;
- obj_list->count = 1;
- obj_list->pointer = obj;
- pr->pdc = obj_list;
-
- return;
-}
-
-/* Initialize _PDC data based on the CPU vendor */
-void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
-{
- unsigned int cpu = pr->id;
- struct cpuinfo_x86 *c = cpu_data + cpu;
-
- pr->pdc = NULL;
- if (c->x86_vendor == X86_VENDOR_INTEL && cpu_has(c, X86_FEATURE_EST))
- init_intel_pdc(pr, c);
-
- return;
-}
-
-EXPORT_SYMBOL(arch_acpi_processor_init_pdc);
diff --git a/arch/x86_64/kernel/acpi/sleep.c b/arch/x86_64/kernel/acpi/sleep.c
index 867a0ebee17..091bc79c888 100644
--- a/arch/x86_64/kernel/acpi/sleep.c
+++ b/arch/x86_64/kernel/acpi/sleep.c
@@ -35,6 +35,8 @@
#include <linux/pci.h>
#include <linux/bootmem.h>
#include <linux/acpi.h>
+#include <linux/cpumask.h>
+
#include <asm/mpspec.h>
#include <asm/io.h>
#include <asm/apic.h>
@@ -66,7 +68,8 @@ static void init_low_mapping(void)
pgd_t *slot0 = pgd_offset(current->mm, 0UL);
low_ptr = *slot0;
set_pgd(slot0, *pgd_offset(current->mm, PAGE_OFFSET));
- flush_tlb_all();
+ WARN_ON(num_online_cpus() != 1);
+ local_flush_tlb();
}
/**
@@ -92,7 +95,7 @@ int acpi_save_state_mem(void)
void acpi_restore_state_mem(void)
{
set_pgd(pgd_offset(current->mm, 0UL), low_ptr);
- flush_tlb_all();
+ local_flush_tlb();
}
/**
diff --git a/arch/x86_64/kernel/apic.c b/arch/x86_64/kernel/apic.c
index 100a30c4004..29ef99001e0 100644
--- a/arch/x86_64/kernel/apic.c
+++ b/arch/x86_64/kernel/apic.c
@@ -51,7 +51,7 @@ int disable_apic_timer __initdata;
static cpumask_t timer_interrupt_broadcast_ipi_mask;
/* Using APIC to generate smp_local_timer_interrupt? */
-int using_apic_timer = 0;
+int using_apic_timer __read_mostly = 0;
static void apic_pm_activate(void);
diff --git a/arch/x86_64/kernel/i387.c b/arch/x86_64/kernel/i387.c
index a5d7e16b928..44ddb1ec808 100644
--- a/arch/x86_64/kernel/i387.c
+++ b/arch/x86_64/kernel/i387.c
@@ -24,7 +24,7 @@
#include <asm/ptrace.h>
#include <asm/uaccess.h>
-unsigned int mxcsr_feature_mask = 0xffffffff;
+unsigned int mxcsr_feature_mask __read_mostly = 0xffffffff;
void mxcsr_feature_mask_init(void)
{
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index 655b9192eeb..b8d5116d737 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -47,6 +47,7 @@
#include <linux/dmi.h>
#include <linux/dma-mapping.h>
#include <linux/ctype.h>
+#include <linux/suspend.h>
#include <asm/mtrr.h>
#include <asm/uaccess.h>
@@ -595,6 +596,100 @@ static void discover_ebda(void)
ebda_size = 64*1024;
}
+#ifdef CONFIG_SOFTWARE_SUSPEND
+static void __init mark_nosave_page_range(unsigned long start, unsigned long end)
+{
+ struct page *page;
+ while (start <= end) {
+ page = pfn_to_page(start);
+ SetPageNosave(page);
+ start++;
+ }
+}
+
+static void __init e820_nosave_reserved_pages(void)
+{
+ int i;
+ unsigned long r_start = 0, r_end = 0;
+
+ /* Assume e820 map is sorted */
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+ unsigned long start, end;
+
+ start = round_down(ei->addr, PAGE_SIZE);
+ end = round_up(ei->addr + ei->size, PAGE_SIZE);
+ if (start >= end)
+ continue;
+ if (ei->type == E820_RESERVED)
+ continue;
+ r_end = start>>PAGE_SHIFT;
+ /* swsusp ignores invalid pfn, ignore these pages here */
+ if (r_end > end_pfn)
+ r_end = end_pfn;
+ if (r_end > r_start)
+ mark_nosave_page_range(r_start, r_end-1);
+ if (r_end >= end_pfn)
+ break;
+ r_start = end>>PAGE_SHIFT;
+ }
+}
+
+static void __init e820_save_acpi_pages(void)
+{
+ int i;
+
+ /* Assume e820 map is sorted */
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+ unsigned long start, end;
+
+ start = ei->addr, PAGE_SIZE;
+ end = ei->addr + ei->size;
+ if (start >= end)
+ continue;
+ if (ei->type != E820_ACPI && ei->type != E820_NVS)
+ continue;
+ /*
+ * If the region is below end_pfn, it will be
+ * saved/restored by swsusp follow 'RAM' type.
+ */
+ if (start < (end_pfn << PAGE_SHIFT))
+ start = end_pfn << PAGE_SHIFT;
+ if (end > start)
+ swsusp_add_arch_pages(start, end);
+ }
+}
+
+extern char __start_rodata, __end_rodata;
+/*
+ * BIOS reserved region/hole - no save/restore
+ * ACPI NVS - save/restore
+ * ACPI Data - this is a little tricky, the mem could be used by OS after OS
+ * reads tables from the region, but anyway save/restore the memory hasn't any
+ * side effect and Linux runtime module load/unload might use it.
+ * kernel rodata - no save/restore (kernel rodata isn't changed)
+ */
+static int __init mark_nosave_pages(void)
+{
+ unsigned long pfn_start, pfn_end;
+
+ /* BIOS reserved regions & holes */
+ e820_nosave_reserved_pages();
+
+ /* kernel rodata */
+ pfn_start = round_up(__pa_symbol(&__start_rodata), PAGE_SIZE) >> PAGE_SHIFT;
+ pfn_end = round_down(__pa_symbol(&__end_rodata), PAGE_SIZE) >> PAGE_SHIFT;
+ mark_nosave_page_range(pfn_start, pfn_end-1);
+
+ /* record ACPI Data/NVS as saveable */
+ e820_save_acpi_pages();
+
+ return 0;
+}
+core_initcall(mark_nosave_pages);
+#endif
+
void __init setup_arch(char **cmdline_p)
{
unsigned long kernel_end;
diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c
index 474df22c6ed..502fce65e96 100644
--- a/arch/x86_64/mm/srat.c
+++ b/arch/x86_64/mm/srat.c
@@ -30,7 +30,6 @@
static struct acpi_table_slit *acpi_slit;
static nodemask_t nodes_parsed __initdata;
-static nodemask_t nodes_found __initdata;
static struct bootnode nodes[MAX_NUMNODES] __initdata;
static struct bootnode nodes_add[MAX_NUMNODES] __initdata;
static int found_add_area __initdata;
@@ -38,33 +37,14 @@ int hotadd_percent __initdata = 0;
#ifndef RESERVE_HOTADD
#define hotadd_percent 0 /* Ignore all settings */
#endif
-static u8 pxm2node[256] = { [0 ... 255] = 0xff };
/* Too small nodes confuse the VM badly. Usually they result
from BIOS bugs. */
#define NODE_MIN_SIZE (4*1024*1024)
-static int node_to_pxm(int n);
-
-int pxm_to_node(int pxm)
-{
- if ((unsigned)pxm >= 256)
- return -1;
- /* Extend 0xff to (int)-1 */
- return (signed char)pxm2node[pxm];
-}
-
static __init int setup_node(int pxm)
{
- unsigned node = pxm2node[pxm];
- if (node == 0xff) {
- if (nodes_weight(nodes_found) >= MAX_NUMNODES)
- return -1;
- node = first_unset_node(nodes_found);
- node_set(node, nodes_found);
- pxm2node[pxm] = node;
- }
- return pxm2node[pxm];
+ return acpi_map_pxm_to_node(pxm);
}
static __init int conflicting_nodes(unsigned long start, unsigned long end)
@@ -440,17 +420,6 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
return 0;
}
-static int node_to_pxm(int n)
-{
- int i;
- if (pxm2node[n] == n)
- return n;
- for (i = 0; i < 256; i++)
- if (pxm2node[i] == n)
- return i;
- return 0;
-}
-
void __init srat_reserve_add_area(int nodeid)
{
if (found_add_area && nodes_add[nodeid].end) {
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index dbeb3504c3c..848f173db25 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -34,10 +34,6 @@ config GENERIC_HARDIRQS
bool
default y
-config RWSEM_GENERIC_SPINLOCK
- bool
- default y
-
source "init/Kconfig"
menu "Processor type and features"
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index 5c018c503df..89e409e9e0d 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -1102,7 +1102,7 @@ ENTRY(fast_syscall_sysxtensa)
s32i a7, a2, PT_AREG7
movi a7, 4 # sizeof(unsigned int)
- verify_area a3, a7, a0, a2, .Leac
+ access_ok a0, a3, a7, a2, .Leac
_beqi a6, SYSXTENSA_ATOMIC_SET, .Lset
_beqi a6, SYSXTENSA_ATOMIC_EXG_ADD, .Lexg
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
index e252b61e45a..c494f0826fc 100644
--- a/arch/xtensa/kernel/signal.c
+++ b/arch/xtensa/kernel/signal.c
@@ -104,7 +104,7 @@ sys_sigaction(int sig, const struct old_sigaction *act,
if (act) {
old_sigset_t mask;
- if (verify_area(VERIFY_READ, act, sizeof(*act)) ||
+ if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
__get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
__get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
return -EFAULT;
@@ -116,7 +116,7 @@ sys_sigaction(int sig, const struct old_sigaction *act,
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
if (!ret && oact) {
- if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) ||
+ if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
__put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
__put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
return -EFAULT;
@@ -236,7 +236,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
err |= __copy_from_user (regs->areg, sc->sc_areg, XCHAL_NUM_AREGS*4);
err |= __get_user(buf, &sc->sc_cpstate);
if (buf) {
- if (verify_area(VERIFY_READ, buf, sizeof(*buf)))
+ if (!access_ok(VERIFY_READ, buf, sizeof(*buf)))
goto badframe;
err |= restore_cpextra(buf);
}
@@ -357,7 +357,7 @@ asmlinkage int sys_sigreturn(struct pt_regs *regs)
if (regs->depc > 64)
panic ("Double exception sys_sigreturn\n");
- if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__get_user(set.sig[0], &frame->sc.oldmask)
@@ -394,7 +394,7 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
return 0;
}
- if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
@@ -433,7 +433,7 @@ badframe:
static inline void *
get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
{
- if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp))
+ if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp))
sp = current->sas_ss_sp + current->sas_ss_size;
return (void *)((sp - frame_size) & -16ul);
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 7eb36c53f4b..465b54312c5 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -3359,12 +3359,11 @@ EXPORT_SYMBOL(end_that_request_chunk);
*/
static void blk_done_softirq(struct softirq_action *h)
{
- struct list_head *cpu_list;
- LIST_HEAD(local_list);
+ struct list_head *cpu_list, local_list;
local_irq_disable();
cpu_list = &__get_cpu_var(blk_cpu_done);
- list_splice_init(cpu_list, &local_list);
+ list_replace_init(cpu_list, &local_list);
local_irq_enable();
while (!list_empty(&local_list)) {
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index c24652d31bf..94b8d820c51 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -10,9 +10,8 @@ menu "ACPI (Advanced Configuration and Power Interface) Support"
config ACPI
bool "ACPI Support"
depends on IA64 || X86
+ depends on PCI
select PM
- select PCI
-
default y
---help---
Advanced Configuration and Power Interface (ACPI) support for
@@ -162,7 +161,7 @@ config ACPI_THERMAL
config ACPI_NUMA
bool "NUMA support"
depends on NUMA
- depends on (IA64 || X86_64)
+ depends on (X86 || IA64)
default y if IA64_GENERIC || IA64_SGI_SN2
config ACPI_ASUS
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index d882bf87fa9..e0a95ba7237 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -74,7 +74,7 @@ struct acpi_memory_device {
unsigned short caching; /* memory cache attribute */
unsigned short write_protect; /* memory read/write attribute */
u64 start_addr; /* Memory Range start physical addr */
- u64 end_addr; /* Memory Range end physical addr */
+ u64 length; /* Memory Range length */
};
static int
@@ -97,12 +97,11 @@ acpi_memory_get_device_resources(struct acpi_memory_device *mem_device)
if (ACPI_SUCCESS(status)) {
if (address64.resource_type == ACPI_MEMORY_RANGE) {
/* Populate the structure */
- mem_device->caching =
- address64.info.mem.caching;
+ mem_device->caching = address64.info.mem.caching;
mem_device->write_protect =
address64.info.mem.write_protect;
mem_device->start_addr = address64.minimum;
- mem_device->end_addr = address64.maximum;
+ mem_device->length = address64.address_length;
}
}
@@ -199,8 +198,7 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
* Tell the VM there is more memory here...
* Note: Assume that this function returns zero on success
*/
- result = add_memory(mem_device->start_addr,
- (mem_device->end_addr - mem_device->start_addr) + 1);
+ result = add_memory(mem_device->start_addr, mem_device->length);
if (result) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "\nadd_memory failed\n"));
mem_device->state = MEMORY_INVALID_STATE;
@@ -249,7 +247,7 @@ static int acpi_memory_disable_device(struct acpi_memory_device *mem_device)
{
int result;
u64 start = mem_device->start_addr;
- u64 len = mem_device->end_addr - start + 1;
+ u64 len = mem_device->length;
ACPI_FUNCTION_TRACE("acpi_memory_disable_device");
diff --git a/drivers/acpi/asus_acpi.c b/drivers/acpi/asus_acpi.c
index f4c87750dbf..839f423d738 100644
--- a/drivers/acpi/asus_acpi.c
+++ b/drivers/acpi/asus_acpi.c
@@ -817,7 +817,7 @@ typedef int (proc_writefunc) (struct file * file, const char __user * buffer,
unsigned long count, void *data);
static int
-__init asus_proc_add(char *name, proc_writefunc * writefunc,
+asus_proc_add(char *name, proc_writefunc * writefunc,
proc_readfunc * readfunc, mode_t mode,
struct acpi_device *device)
{
@@ -836,7 +836,7 @@ __init asus_proc_add(char *name, proc_writefunc * writefunc,
return 0;
}
-static int __init asus_hotk_add_fs(struct acpi_device *device)
+static int asus_hotk_add_fs(struct acpi_device *device)
{
struct proc_dir_entry *proc;
mode_t mode;
@@ -954,7 +954,7 @@ static void asus_hotk_notify(acpi_handle handle, u32 event, void *data)
* This function is used to initialize the hotk with right values. In this
* method, we can make all the detection we want, and modify the hotk struct
*/
-static int __init asus_hotk_get_info(void)
+static int asus_hotk_get_info(void)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_buffer dsdt = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -970,7 +970,7 @@ static int __init asus_hotk_get_info(void)
* HID), this bit will be moved. A global variable asus_info contains
* the DSDT header.
*/
- status = acpi_get_table(ACPI_TABLE_DSDT, 1, &dsdt);
+ status = acpi_get_table(ACPI_TABLE_ID_DSDT, 1, &dsdt);
if (ACPI_FAILURE(status))
printk(KERN_WARNING " Couldn't get the DSDT table header\n");
else
@@ -1101,7 +1101,7 @@ static int __init asus_hotk_get_info(void)
return AE_OK;
}
-static int __init asus_hotk_check(void)
+static int asus_hotk_check(void)
{
int result = 0;
@@ -1119,7 +1119,9 @@ static int __init asus_hotk_check(void)
return result;
}
-static int __init asus_hotk_add(struct acpi_device *device)
+static int asus_hotk_found;
+
+static int asus_hotk_add(struct acpi_device *device)
{
acpi_status status = AE_OK;
int result;
@@ -1180,6 +1182,8 @@ static int __init asus_hotk_add(struct acpi_device *device)
}
}
+ asus_hotk_found = 1;
+
end:
if (result) {
kfree(hotk);
@@ -1226,12 +1230,24 @@ static int __init asus_acpi_init(void)
asus_proc_dir->owner = THIS_MODULE;
result = acpi_bus_register_driver(&asus_hotk_driver);
- if (result < 1) {
- acpi_bus_unregister_driver(&asus_hotk_driver);
+ if (result < 0) {
remove_proc_entry(PROC_ASUS, acpi_root_dir);
return -ENODEV;
}
+ /*
+ * This is a bit of a kludge. We only want this module loaded
+ * for ASUS systems, but there's currently no way to probe the
+ * ACPI namespace for ASUS HIDs. So we just return failure if
+ * we didn't find one, which will cause the module to be
+ * unloaded.
+ */
+ if (!asus_hotk_found) {
+ acpi_bus_unregister_driver(&asus_hotk_driver);
+ remove_proc_entry(PROC_ASUS, acpi_root_dir);
+ return result;
+ }
+
return 0;
}
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 606f8733a77..dd3983cece9 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -43,7 +43,7 @@ ACPI_MODULE_NAME("acpi_bus")
extern void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger);
#endif
-FADT_DESCRIPTOR acpi_fadt;
+struct fadt_descriptor acpi_fadt;
EXPORT_SYMBOL(acpi_fadt);
struct acpi_device *acpi_root;
@@ -205,12 +205,14 @@ int acpi_bus_set_power(acpi_handle handle, int state)
* Get device's current power state if it's unknown
* This means device power state isn't initialized or previous setting failed
*/
- if (device->power.state == ACPI_STATE_UNKNOWN)
- acpi_bus_get_power(device->handle, &device->power.state);
- if (state == device->power.state) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at D%d\n",
- state));
- return_VALUE(0);
+ if (!device->flags.force_power_state) {
+ if (device->power.state == ACPI_STATE_UNKNOWN)
+ acpi_bus_get_power(device->handle, &device->power.state);
+ if (state == device->power.state) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at D%d\n",
+ state));
+ return_VALUE(0);
+ }
}
if (!device->power.states[state].flags.valid) {
ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Device does not support D%d\n",
@@ -596,6 +598,8 @@ void __init acpi_early_init(void)
if (acpi_disabled)
return_VOID;
+ printk(KERN_INFO PREFIX "Core revision %08x\n", ACPI_CA_VERSION);
+
/* enable workarounds, unless strict ACPI spec. compliance */
if (!acpi_strict)
acpi_gbl_enable_interpreter_slack = TRUE;
@@ -617,7 +621,7 @@ void __init acpi_early_init(void)
/*
* Get a separate copy of the FADT for use by other drivers.
*/
- status = acpi_get_table(ACPI_TABLE_FADT, 1, &buffer);
+ status = acpi_get_table(ACPI_TABLE_ID_FADT, 1, &buffer);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX "Unable to get the FADT\n");
goto error0;
@@ -743,8 +747,6 @@ static int __init acpi_init(void)
ACPI_FUNCTION_TRACE("acpi_init");
- printk(KERN_INFO PREFIX "Subsystem revision %08x\n", ACPI_CA_VERSION);
-
if (acpi_disabled) {
printk(KERN_INFO PREFIX "Interpreter disabled.\n");
return_VALUE(-ENODEV);
diff --git a/drivers/acpi/dispatcher/dsfield.c b/drivers/acpi/dispatcher/dsfield.c
index 76bc0463f6d..a6d77efb41a 100644
--- a/drivers/acpi/dispatcher/dsfield.c
+++ b/drivers/acpi/dispatcher/dsfield.c
@@ -87,7 +87,7 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
union acpi_operand_object *second_desc = NULL;
u32 flags;
- ACPI_FUNCTION_TRACE("ds_create_buffer_field");
+ ACPI_FUNCTION_TRACE(ds_create_buffer_field);
/* Get the name_string argument */
@@ -210,7 +210,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
acpi_status status;
acpi_integer position;
- ACPI_FUNCTION_TRACE_PTR("ds_get_field_names", info);
+ ACPI_FUNCTION_TRACE_PTR(ds_get_field_names, info);
/* First field starts at bit zero */
@@ -342,7 +342,7 @@ acpi_ds_create_field(union acpi_parse_object *op,
union acpi_parse_object *arg;
struct acpi_create_field_info info;
- ACPI_FUNCTION_TRACE_PTR("ds_create_field", op);
+ ACPI_FUNCTION_TRACE_PTR(ds_create_field, op);
/* First arg is the name of the parent op_region (must already exist) */
@@ -399,7 +399,7 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
struct acpi_namespace_node *node;
u8 type = 0;
- ACPI_FUNCTION_TRACE_PTR("ds_init_field_objects", op);
+ ACPI_FUNCTION_TRACE_PTR(ds_init_field_objects, op);
switch (walk_state->opcode) {
case AML_FIELD_OP:
@@ -425,6 +425,7 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
* Walk the list of entries in the field_list
*/
while (arg) {
+
/* Ignore OFFSET and ACCESSAS terms here */
if (arg->common.aml_opcode == AML_INT_NAMEDFIELD_OP) {
@@ -481,7 +482,7 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
union acpi_parse_object *arg;
struct acpi_create_field_info info;
- ACPI_FUNCTION_TRACE_PTR("ds_create_bank_field", op);
+ ACPI_FUNCTION_TRACE_PTR(ds_create_bank_field, op);
/* First arg is the name of the parent op_region (must already exist) */
@@ -554,7 +555,7 @@ acpi_ds_create_index_field(union acpi_parse_object *op,
union acpi_parse_object *arg;
struct acpi_create_field_info info;
- ACPI_FUNCTION_TRACE_PTR("ds_create_index_field", op);
+ ACPI_FUNCTION_TRACE_PTR(ds_create_index_field, op);
/* First arg is the name of the Index register (must already exist) */
diff --git a/drivers/acpi/dispatcher/dsinit.c b/drivers/acpi/dispatcher/dsinit.c
index e65a07ad242..bbdf990e9f6 100644
--- a/drivers/acpi/dispatcher/dsinit.c
+++ b/drivers/acpi/dispatcher/dsinit.c
@@ -184,7 +184,7 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
*
* RETURN: Status
*
- * DESCRIPTION: Walk the namespace starting at "start_node" and perform any
+ * DESCRIPTION: Walk the namespace starting at "StartNode" and perform any
* necessary initialization on the objects found therein
*
******************************************************************************/
@@ -196,7 +196,7 @@ acpi_ds_initialize_objects(struct acpi_table_desc * table_desc,
acpi_status status;
struct acpi_init_walk_info info;
- ACPI_FUNCTION_TRACE("ds_initialize_objects");
+ ACPI_FUNCTION_TRACE(ds_initialize_objects);
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"**** Starting initialization of namespace objects ****\n"));
@@ -213,7 +213,7 @@ acpi_ds_initialize_objects(struct acpi_table_desc * table_desc,
status = acpi_walk_namespace(ACPI_TYPE_ANY, start_node, ACPI_UINT32_MAX,
acpi_ds_init_one_object, &info, NULL);
if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "During walk_namespace"));
+ ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace"));
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
diff --git a/drivers/acpi/dispatcher/dsmethod.c b/drivers/acpi/dispatcher/dsmethod.c
index c475546535b..bc9aca4e740 100644
--- a/drivers/acpi/dispatcher/dsmethod.c
+++ b/drivers/acpi/dispatcher/dsmethod.c
@@ -81,6 +81,7 @@ acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
/* Invoke the global exception handler */
if (acpi_gbl_exception_handler) {
+
/* Exit the interpreter, allow handler to execute methods */
acpi_ex_exit_interpreter();
@@ -100,6 +101,7 @@ acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
}
#ifdef ACPI_DISASSEMBLER
if (ACPI_FAILURE(status)) {
+
/* Display method locals/args if disassembler is present */
acpi_dm_dump_method_info(status, walk_state, walk_state->op);
@@ -132,7 +134,7 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node * method_node,
{
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE_PTR("ds_begin_method_execution", method_node);
+ ACPI_FUNCTION_TRACE_PTR(ds_begin_method_execution, method_node);
if (!method_node) {
return_ACPI_STATUS(AE_NULL_ENTRY);
@@ -168,11 +170,14 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node * method_node,
/*
* Get a unit from the method semaphore. This releases the
- * interpreter if we block
+ * interpreter if we block (then reacquires it)
*/
status =
acpi_ex_system_wait_semaphore(obj_desc->method.semaphore,
ACPI_WAIT_FOREVER);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
}
/*
@@ -183,7 +188,7 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node * method_node,
if (!obj_desc->method.owner_id) {
status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id);
if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
+ goto cleanup;
}
}
@@ -193,6 +198,14 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node * method_node,
*/
obj_desc->method.thread_count++;
return_ACPI_STATUS(status);
+
+ cleanup:
+ /* On error, must signal the method semaphore if present */
+
+ if (obj_desc->method.semaphore) {
+ (void)acpi_os_signal_semaphore(obj_desc->method.semaphore, 1);
+ }
+ return_ACPI_STATUS(status);
}
/*******************************************************************************
@@ -218,10 +231,10 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
struct acpi_namespace_node *method_node;
struct acpi_walk_state *next_walk_state = NULL;
union acpi_operand_object *obj_desc;
- struct acpi_parameter_info info;
+ struct acpi_evaluate_info *info;
u32 i;
- ACPI_FUNCTION_TRACE_PTR("ds_call_control_method", this_walk_state);
+ ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state);
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"Execute method %p, currentstate=%p\n",
@@ -240,25 +253,31 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
return_ACPI_STATUS(AE_NULL_OBJECT);
}
- /* Init for new method, wait on concurrency semaphore */
+ /* Init for new method, possibly wait on concurrency semaphore */
status = acpi_ds_begin_method_execution(method_node, obj_desc,
this_walk_state->method_node);
if (ACPI_FAILURE(status)) {
- goto cleanup;
+ return_ACPI_STATUS(status);
}
+ /*
+ * 1) Parse the method. All "normal" methods are parsed for each execution.
+ * Internal methods (_OSI, etc.) do not require parsing.
+ */
if (!(obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY)) {
- /* 1) Parse: Create a new walk state for the preempting walk */
+
+ /* Create a new walk state for the parse */
next_walk_state =
acpi_ds_create_walk_state(obj_desc->method.owner_id, op,
obj_desc, NULL);
if (!next_walk_state) {
- return_ACPI_STATUS(AE_NO_MEMORY);
+ status = AE_NO_MEMORY;
+ goto cleanup;
}
- /* Create and init a Root Node */
+ /* Create and init a parse tree root */
op = acpi_ps_create_scope_op();
if (!op) {
@@ -271,17 +290,20 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
obj_desc->method.aml_length,
NULL, 1);
if (ACPI_FAILURE(status)) {
- acpi_ds_delete_walk_state(next_walk_state);
+ acpi_ps_delete_parse_tree(op);
goto cleanup;
}
- /* Begin AML parse */
+ /* Begin AML parse (deletes next_walk_state) */
status = acpi_ps_parse_aml(next_walk_state);
acpi_ps_delete_parse_tree(op);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
}
- /* 2) Execute: Create a new state for the preempting walk */
+ /* 2) Begin method execution. Create a new walk state */
next_walk_state = acpi_ds_create_walk_state(obj_desc->method.owner_id,
NULL, obj_desc, thread);
@@ -289,6 +311,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
status = AE_NO_MEMORY;
goto cleanup;
}
+
/*
* The resolved arguments were put on the previous walk state's operand
* stack. Operands on the previous walk state stack always
@@ -296,12 +319,24 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
*/
this_walk_state->operands[this_walk_state->num_operands] = NULL;
- info.parameters = &this_walk_state->operands[0];
- info.parameter_type = ACPI_PARAM_ARGS;
+ /*
+ * Allocate and initialize the evaluation information block
+ * TBD: this is somewhat inefficient, should change interface to
+ * ds_init_aml_walk. For now, keeps this struct off the CPU stack
+ */
+ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+ if (!info) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ info->parameters = &this_walk_state->operands[0];
+ info->parameter_type = ACPI_PARAM_ARGS;
status = acpi_ds_init_aml_walk(next_walk_state, NULL, method_node,
obj_desc->method.aml_start,
- obj_desc->method.aml_length, &info, 3);
+ obj_desc->method.aml_length, info, 3);
+
+ ACPI_FREE(info);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
@@ -323,6 +358,8 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
"Starting nested execution, newstate=%p\n",
next_walk_state));
+ /* Invoke an internal method if necessary */
+
if (obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY) {
status = obj_desc->method.implementation(next_walk_state);
}
@@ -330,16 +367,14 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
return_ACPI_STATUS(status);
cleanup:
- /* Decrement the thread count on the method parse tree */
- if (next_walk_state && (next_walk_state->method_desc)) {
- next_walk_state->method_desc->method.thread_count--;
- }
+ /* On error, we must terminate the method properly */
- /* On error, we must delete the new walk state */
+ acpi_ds_terminate_control_method(obj_desc, next_walk_state);
+ if (next_walk_state) {
+ acpi_ds_delete_walk_state(next_walk_state);
+ }
- acpi_ds_terminate_control_method(next_walk_state);
- acpi_ds_delete_walk_state(next_walk_state);
return_ACPI_STATUS(status);
}
@@ -362,25 +397,33 @@ acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
union acpi_operand_object *return_desc)
{
acpi_status status;
+ int same_as_implicit_return;
- ACPI_FUNCTION_TRACE_PTR("ds_restart_control_method", walk_state);
+ ACPI_FUNCTION_TRACE_PTR(ds_restart_control_method, walk_state);
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- "****Restart [%4.4s] Op %p return_value_from_callee %p\n",
+ "****Restart [%4.4s] Op %p ReturnValueFromCallee %p\n",
(char *)&walk_state->method_node->name,
walk_state->method_call_op, return_desc));
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- " return_from_this_method_used?=%X res_stack %p Walk %p\n",
+ " ReturnFromThisMethodUsed?=%X ResStack %p Walk %p\n",
walk_state->return_used,
walk_state->results, walk_state));
/* Did the called method return a value? */
if (return_desc) {
+
+ /* Is the implicit return object the same as the return desc? */
+
+ same_as_implicit_return =
+ (walk_state->implicit_return_obj == return_desc);
+
/* Are we actually going to use the return value? */
if (walk_state->return_used) {
+
/* Save the return value from the previous method */
status = acpi_ds_result_push(return_desc, walk_state);
@@ -397,18 +440,23 @@ acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
}
/*
- * The following code is the
- * optional support for a so-called "implicit return". Some AML code
- * assumes that the last value of the method is "implicitly" returned
- * to the caller. Just save the last result as the return value.
+ * The following code is the optional support for the so-called
+ * "implicit return". Some AML code assumes that the last value of the
+ * method is "implicitly" returned to the caller, in the absence of an
+ * explicit return value.
+ *
+ * Just save the last result of the method as the return value.
+ *
* NOTE: this is optional because the ASL language does not actually
* support this behavior.
*/
else if (!acpi_ds_do_implicit_return
- (return_desc, walk_state, FALSE)) {
+ (return_desc, walk_state, FALSE)
+ || same_as_implicit_return) {
/*
* Delete the return value if it will not be used by the
- * calling method
+ * calling method or remove one reference if the explicit return
+ * is the same as the implicit return value.
*/
acpi_ut_remove_reference(return_desc);
}
@@ -421,7 +469,8 @@ acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
*
* FUNCTION: acpi_ds_terminate_control_method
*
- * PARAMETERS: walk_state - State of the method
+ * PARAMETERS: method_desc - Method object
+ * walk_state - State associated with the method
*
* RETURN: None
*
@@ -431,95 +480,100 @@ acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
*
******************************************************************************/
-void acpi_ds_terminate_control_method(struct acpi_walk_state *walk_state)
+void
+acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
+ struct acpi_walk_state *walk_state)
{
- union acpi_operand_object *obj_desc;
struct acpi_namespace_node *method_node;
acpi_status status;
- ACPI_FUNCTION_TRACE_PTR("ds_terminate_control_method", walk_state);
+ ACPI_FUNCTION_TRACE_PTR(ds_terminate_control_method, walk_state);
- if (!walk_state) {
- return_VOID;
- }
+ /* method_desc is required, walk_state is optional */
- /* The current method object was saved in the walk state */
-
- obj_desc = walk_state->method_desc;
- if (!obj_desc) {
+ if (!method_desc) {
return_VOID;
}
- /* Delete all arguments and locals */
+ if (walk_state) {
- acpi_ds_method_data_delete_all(walk_state);
+ /* Delete all arguments and locals */
+
+ acpi_ds_method_data_delete_all(walk_state);
+ }
/*
* Lock the parser while we terminate this method.
* If this is the last thread executing the method,
* we have additional cleanup to perform
*/
- status = acpi_ut_acquire_mutex(ACPI_MTX_PARSER);
+ status = acpi_ut_acquire_mutex(ACPI_MTX_CONTROL_METHOD);
if (ACPI_FAILURE(status)) {
return_VOID;
}
/* Signal completion of the execution of this method if necessary */
- if (walk_state->method_desc->method.semaphore) {
+ if (method_desc->method.semaphore) {
status =
- acpi_os_signal_semaphore(walk_state->method_desc->method.
- semaphore, 1);
+ acpi_os_signal_semaphore(method_desc->method.semaphore, 1);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR((AE_INFO,
- "Could not signal method semaphore"));
- /* Ignore error and continue cleanup */
+ /* Ignore error and continue */
+
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not signal method semaphore"));
}
}
- /*
- * There are no more threads executing this method. Perform
- * additional cleanup.
- *
- * The method Node is stored in the walk state
- */
- method_node = walk_state->method_node;
+ if (walk_state) {
+ /*
+ * Delete any objects created by this method during execution.
+ * The method Node is stored in the walk state
+ */
+ method_node = walk_state->method_node;
- /* Lock namespace for possible update */
+ /* Lock namespace for possible update */
- status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
- if (ACPI_FAILURE(status)) {
- goto exit;
- }
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
- /*
- * Delete any namespace entries created immediately underneath
- * the method
- */
- if (method_node->child) {
- acpi_ns_delete_namespace_subtree(method_node);
+ /*
+ * Delete any namespace entries created immediately underneath
+ * the method
+ */
+ if (method_node && method_node->child) {
+ acpi_ns_delete_namespace_subtree(method_node);
+ }
+
+ /*
+ * Delete any namespace entries created anywhere else within
+ * the namespace by the execution of this method
+ */
+ acpi_ns_delete_namespace_by_owner(method_desc->method.owner_id);
+ status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
}
- /*
- * Delete any namespace entries created anywhere else within
- * the namespace by the execution of this method
- */
- acpi_ns_delete_namespace_by_owner(walk_state->method_desc->method.
- owner_id);
- status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ /* Decrement the thread count on the method */
+
+ if (method_desc->method.thread_count) {
+ method_desc->method.thread_count--;
+ } else {
+ ACPI_ERROR((AE_INFO, "Invalid zero thread count in method"));
+ }
/* Are there any other threads currently executing this method? */
- if (walk_state->method_desc->method.thread_count) {
+ if (method_desc->method.thread_count) {
/*
* Additional threads. Do not release the owner_id in this case,
* we immediately reuse it for the next thread executing this method
*/
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"*** Completed execution of one thread, %d threads remaining\n",
- walk_state->method_desc->method.
- thread_count));
+ method_desc->method.thread_count));
} else {
/* This is the only executing thread for this method */
@@ -533,22 +587,20 @@ void acpi_ds_terminate_control_method(struct acpi_walk_state *walk_state)
* This code is here because we must wait until the last thread exits
* before creating the synchronization semaphore.
*/
- if ((walk_state->method_desc->method.concurrency == 1) &&
- (!walk_state->method_desc->method.semaphore)) {
+ if ((method_desc->method.concurrency == 1) &&
+ (!method_desc->method.semaphore)) {
status = acpi_os_create_semaphore(1, 1,
- &walk_state->
- method_desc->method.
+ &method_desc->method.
semaphore);
}
/* No more threads, we can free the owner_id */
- acpi_ut_release_owner_id(&walk_state->method_desc->method.
- owner_id);
+ acpi_ut_release_owner_id(&method_desc->method.owner_id);
}
exit:
- (void)acpi_ut_release_mutex(ACPI_MTX_PARSER);
+ (void)acpi_ut_release_mutex(ACPI_MTX_CONTROL_METHOD);
return_VOID;
}
@@ -581,7 +633,7 @@ acpi_status acpi_ds_parse_method(struct acpi_namespace_node *node)
union acpi_parse_object *op;
struct acpi_walk_state *walk_state;
- ACPI_FUNCTION_TRACE_PTR("ds_parse_method", node);
+ ACPI_FUNCTION_TRACE_PTR(ds_parse_method, node);
/* Parameter Validation */
@@ -590,7 +642,7 @@ acpi_status acpi_ds_parse_method(struct acpi_namespace_node *node)
}
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
- "**** Parsing [%4.4s] **** named_obj=%p\n",
+ "**** Parsing [%4.4s] **** NamedObj=%p\n",
acpi_ut_get_node_name(node), node));
/* Extract the method object from the method Node */
@@ -669,7 +721,7 @@ acpi_status acpi_ds_parse_method(struct acpi_namespace_node *node)
}
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
- "**** [%4.4s] Parsed **** named_obj=%p Op=%p\n",
+ "**** [%4.4s] Parsed **** NamedObj=%p Op=%p\n",
acpi_ut_get_node_name(node), node, op));
/*
diff --git a/drivers/acpi/dispatcher/dsmthdat.c b/drivers/acpi/dispatcher/dsmthdat.c
index c025674f938..459160ff905 100644
--- a/drivers/acpi/dispatcher/dsmthdat.c
+++ b/drivers/acpi/dispatcher/dsmthdat.c
@@ -81,7 +81,7 @@ acpi_ds_method_data_get_type(u16 opcode,
* special data types.
*
* NOTES: walk_state fields are initialized to zero by the
- * ACPI_MEM_CALLOCATE().
+ * ACPI_ALLOCATE_ZEROED().
*
* A pseudo-Namespace Node is assigned to each argument and local
* so that ref_of() can return a pointer to the Node.
@@ -92,7 +92,7 @@ void acpi_ds_method_data_init(struct acpi_walk_state *walk_state)
{
u32 i;
- ACPI_FUNCTION_TRACE("ds_method_data_init");
+ ACPI_FUNCTION_TRACE(ds_method_data_init);
/* Init the method arguments */
@@ -100,10 +100,10 @@ void acpi_ds_method_data_init(struct acpi_walk_state *walk_state)
ACPI_MOVE_32_TO_32(&walk_state->arguments[i].name,
NAMEOF_ARG_NTE);
walk_state->arguments[i].name.integer |= (i << 24);
- walk_state->arguments[i].descriptor = ACPI_DESC_TYPE_NAMED;
+ walk_state->arguments[i].descriptor_type = ACPI_DESC_TYPE_NAMED;
walk_state->arguments[i].type = ACPI_TYPE_ANY;
- walk_state->arguments[i].flags = ANOBJ_END_OF_PEER_LIST |
- ANOBJ_METHOD_ARG;
+ walk_state->arguments[i].flags =
+ ANOBJ_END_OF_PEER_LIST | ANOBJ_METHOD_ARG;
}
/* Init the method locals */
@@ -113,11 +113,11 @@ void acpi_ds_method_data_init(struct acpi_walk_state *walk_state)
NAMEOF_LOCAL_NTE);
walk_state->local_variables[i].name.integer |= (i << 24);
- walk_state->local_variables[i].descriptor =
+ walk_state->local_variables[i].descriptor_type =
ACPI_DESC_TYPE_NAMED;
walk_state->local_variables[i].type = ACPI_TYPE_ANY;
- walk_state->local_variables[i].flags = ANOBJ_END_OF_PEER_LIST |
- ANOBJ_METHOD_LOCAL;
+ walk_state->local_variables[i].flags =
+ ANOBJ_END_OF_PEER_LIST | ANOBJ_METHOD_LOCAL;
}
return_VOID;
@@ -140,7 +140,7 @@ void acpi_ds_method_data_delete_all(struct acpi_walk_state *walk_state)
{
u32 index;
- ACPI_FUNCTION_TRACE("ds_method_data_delete_all");
+ ACPI_FUNCTION_TRACE(ds_method_data_delete_all);
/* Detach the locals */
@@ -199,7 +199,7 @@ acpi_ds_method_data_init_args(union acpi_operand_object **params,
acpi_status status;
u32 index = 0;
- ACPI_FUNCTION_TRACE_PTR("ds_method_data_init_args", params);
+ ACPI_FUNCTION_TRACE_PTR(ds_method_data_init_args, params);
if (!params) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
@@ -251,7 +251,7 @@ acpi_ds_method_data_get_node(u16 opcode,
struct acpi_walk_state *walk_state,
struct acpi_namespace_node **node)
{
- ACPI_FUNCTION_TRACE("ds_method_data_get_node");
+ ACPI_FUNCTION_TRACE(ds_method_data_get_node);
/*
* Method Locals and Arguments are supported
@@ -318,10 +318,10 @@ acpi_ds_method_data_set_value(u16 opcode,
acpi_status status;
struct acpi_namespace_node *node;
- ACPI_FUNCTION_TRACE("ds_method_data_set_value");
+ ACPI_FUNCTION_TRACE(ds_method_data_set_value);
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "new_obj %p Opcode %X, Refs=%d [%s]\n", object,
+ "NewObj %p Opcode %X, Refs=%d [%s]\n", object,
opcode, object->common.reference_count,
acpi_ut_get_type_name(object->common.type)));
@@ -336,7 +336,7 @@ acpi_ds_method_data_set_value(u16 opcode,
* Increment ref count so object can't be deleted while installed.
* NOTE: We do not copy the object in order to preserve the call by
* reference semantics of ACPI Control Method invocation.
- * (See ACPI specification 2.0_c)
+ * (See ACPI Specification 2.0_c)
*/
acpi_ut_add_reference(object);
@@ -351,7 +351,7 @@ acpi_ds_method_data_set_value(u16 opcode,
* FUNCTION: acpi_ds_method_data_get_value
*
* PARAMETERS: Opcode - Either AML_LOCAL_OP or AML_ARG_OP
- * Index - which local_var or argument to get
+ * Index - Which local_var or argument to get
* walk_state - Current walk state object
* dest_desc - Where Arg or Local value is returned
*
@@ -372,7 +372,7 @@ acpi_ds_method_data_get_value(u16 opcode,
struct acpi_namespace_node *node;
union acpi_operand_object *object;
- ACPI_FUNCTION_TRACE("ds_method_data_get_value");
+ ACPI_FUNCTION_TRACE(ds_method_data_get_value);
/* Validate the object descriptor */
@@ -459,7 +459,7 @@ acpi_ds_method_data_get_value(u16 opcode,
* FUNCTION: acpi_ds_method_data_delete_value
*
* PARAMETERS: Opcode - Either AML_LOCAL_OP or AML_ARG_OP
- * Index - which local_var or argument to delete
+ * Index - Which local_var or argument to delete
* walk_state - Current walk state object
*
* RETURN: None
@@ -477,7 +477,7 @@ acpi_ds_method_data_delete_value(u16 opcode,
struct acpi_namespace_node *node;
union acpi_operand_object *object;
- ACPI_FUNCTION_TRACE("ds_method_data_delete_value");
+ ACPI_FUNCTION_TRACE(ds_method_data_delete_value);
/* Get the namespace node for the arg/local */
@@ -538,7 +538,7 @@ acpi_ds_store_object_to_local(u16 opcode,
union acpi_operand_object *current_obj_desc;
union acpi_operand_object *new_obj_desc;
- ACPI_FUNCTION_TRACE("ds_store_object_to_local");
+ ACPI_FUNCTION_TRACE(ds_store_object_to_local);
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Opcode=%X Index=%d Obj=%p\n",
opcode, index, obj_desc));
@@ -614,7 +614,7 @@ acpi_ds_store_object_to_local(u16 opcode,
&& (current_obj_desc->reference.opcode ==
AML_REF_OF_OP)) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Arg (%p) is an obj_ref(Node), storing in node %p\n",
+ "Arg (%p) is an ObjRef(Node), storing in node %p\n",
new_obj_desc,
current_obj_desc));
@@ -688,7 +688,7 @@ acpi_ds_method_data_get_type(u16 opcode,
struct acpi_namespace_node *node;
union acpi_operand_object *object;
- ACPI_FUNCTION_TRACE("ds_method_data_get_type");
+ ACPI_FUNCTION_TRACE(ds_method_data_get_type);
/* Get the namespace node for the arg/local */
@@ -701,6 +701,7 @@ acpi_ds_method_data_get_type(u16 opcode,
object = acpi_ns_get_attached_object(node);
if (!object) {
+
/* Uninitialized local/arg, return TYPE_ANY */
return_VALUE(ACPI_TYPE_ANY);
diff --git a/drivers/acpi/dispatcher/dsobject.c b/drivers/acpi/dispatcher/dsobject.c
index 8b21f0f9e51..72190abb1d5 100644
--- a/drivers/acpi/dispatcher/dsobject.c
+++ b/drivers/acpi/dispatcher/dsobject.c
@@ -81,7 +81,7 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
union acpi_operand_object *obj_desc;
acpi_status status;
- ACPI_FUNCTION_TRACE("ds_build_internal_object");
+ ACPI_FUNCTION_TRACE(ds_build_internal_object);
*obj_desc_ptr = NULL;
if (op->common.aml_opcode == AML_INT_NAMEPATH_OP) {
@@ -103,6 +103,7 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
common.
node)));
if (ACPI_FAILURE(status)) {
+
/* Check if we are resolving a named reference within a package */
if ((status == AE_NOT_FOUND)
@@ -186,7 +187,7 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
union acpi_parse_object *byte_list;
u32 byte_list_length = 0;
- ACPI_FUNCTION_TRACE("ds_build_internal_buffer_obj");
+ ACPI_FUNCTION_TRACE(ds_build_internal_buffer_obj);
/*
* If we are evaluating a Named buffer object "Name (xxxx, Buffer)".
@@ -195,6 +196,7 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
*/
obj_desc = *obj_desc_ptr;
if (!obj_desc) {
+
/* Create a new buffer object */
obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_BUFFER);
@@ -243,7 +245,7 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
"Buffer defined with zero length in AML, creating\n"));
} else {
obj_desc->buffer.pointer =
- ACPI_MEM_CALLOCATE(obj_desc->buffer.length);
+ ACPI_ALLOCATE_ZEROED(obj_desc->buffer.length);
if (!obj_desc->buffer.pointer) {
acpi_ut_delete_object_desc(obj_desc);
return_ACPI_STATUS(AE_NO_MEMORY);
@@ -291,7 +293,7 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
acpi_status status = AE_OK;
acpi_native_uint i;
- ACPI_FUNCTION_TRACE("ds_build_internal_package_obj");
+ ACPI_FUNCTION_TRACE(ds_build_internal_package_obj);
/* Find the parent of a possibly nested package */
@@ -339,9 +341,10 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
* individual objects). Add an extra pointer slot so
* that the list is always null terminated.
*/
- obj_desc->package.elements = ACPI_MEM_CALLOCATE(((acpi_size) obj_desc->
- package.count +
- 1) * sizeof(void *));
+ obj_desc->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size)
+ obj_desc->package.
+ count +
+ 1) * sizeof(void *));
if (!obj_desc->package.elements) {
acpi_ut_delete_object_desc(obj_desc);
@@ -355,6 +358,7 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
arg = arg->common.next;
for (i = 0; arg; i++) {
if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) {
+
/* Object (package or buffer) is already built */
obj_desc->package.elements[i] =
@@ -396,7 +400,7 @@ acpi_ds_create_node(struct acpi_walk_state *walk_state,
acpi_status status;
union acpi_operand_object *obj_desc;
- ACPI_FUNCTION_TRACE_PTR("ds_create_node", op);
+ ACPI_FUNCTION_TRACE_PTR(ds_create_node, op);
/*
* Because of the execution pass through the non-control-method
@@ -408,6 +412,7 @@ acpi_ds_create_node(struct acpi_walk_state *walk_state,
}
if (!op->common.value.arg) {
+
/* No arguments, there is nothing to do */
return_ACPI_STATUS(AE_OK);
@@ -464,11 +469,12 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
union acpi_operand_object *obj_desc;
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("ds_init_object_from_op");
+ ACPI_FUNCTION_TRACE(ds_init_object_from_op);
obj_desc = *ret_obj_desc;
op_info = acpi_ps_get_opcode_info(opcode);
if (op_info->class == AML_CLASS_UNKNOWN) {
+
/* Unknown opcode */
return_ACPI_STATUS(AE_TYPE);
@@ -626,6 +632,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
default: /* Other literals, etc.. */
if (op->common.aml_opcode == AML_INT_NAMEPATH_OP) {
+
/* Node was saved in Op */
obj_desc->reference.node = op->common.node;
diff --git a/drivers/acpi/dispatcher/dsopcode.c b/drivers/acpi/dispatcher/dsopcode.c
index 6229c10674e..5b974a8fe61 100644
--- a/drivers/acpi/dispatcher/dsopcode.c
+++ b/drivers/acpi/dispatcher/dsopcode.c
@@ -91,7 +91,7 @@ acpi_ds_execute_arguments(struct acpi_namespace_node *node,
union acpi_parse_object *op;
struct acpi_walk_state *walk_state;
- ACPI_FUNCTION_TRACE("ds_execute_arguments");
+ ACPI_FUNCTION_TRACE(ds_execute_arguments);
/*
* Allocate a new parser op to be the root of the parsed tree
@@ -193,7 +193,7 @@ acpi_ds_get_buffer_field_arguments(union acpi_operand_object *obj_desc)
struct acpi_namespace_node *node;
acpi_status status;
- ACPI_FUNCTION_TRACE_PTR("ds_get_buffer_field_arguments", obj_desc);
+ ACPI_FUNCTION_TRACE_PTR(ds_get_buffer_field_arguments, obj_desc);
if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
return_ACPI_STATUS(AE_OK);
@@ -206,7 +206,7 @@ acpi_ds_get_buffer_field_arguments(union acpi_operand_object *obj_desc)
ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
(ACPI_TYPE_BUFFER_FIELD, node, NULL));
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] buffer_field Arg Init\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] BufferField Arg Init\n",
acpi_ut_get_node_name(node)));
/* Execute the AML code for the term_arg arguments */
@@ -235,7 +235,7 @@ acpi_status acpi_ds_get_buffer_arguments(union acpi_operand_object *obj_desc)
struct acpi_namespace_node *node;
acpi_status status;
- ACPI_FUNCTION_TRACE_PTR("ds_get_buffer_arguments", obj_desc);
+ ACPI_FUNCTION_TRACE_PTR(ds_get_buffer_arguments, obj_desc);
if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
return_ACPI_STATUS(AE_OK);
@@ -279,7 +279,7 @@ acpi_status acpi_ds_get_package_arguments(union acpi_operand_object *obj_desc)
struct acpi_namespace_node *node;
acpi_status status;
- ACPI_FUNCTION_TRACE_PTR("ds_get_package_arguments", obj_desc);
+ ACPI_FUNCTION_TRACE_PTR(ds_get_package_arguments, obj_desc);
if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
return_ACPI_STATUS(AE_OK);
@@ -324,7 +324,7 @@ acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
acpi_status status;
union acpi_operand_object *extra_desc;
- ACPI_FUNCTION_TRACE_PTR("ds_get_region_arguments", obj_desc);
+ ACPI_FUNCTION_TRACE_PTR(ds_get_region_arguments, obj_desc);
if (obj_desc->region.flags & AOPOBJ_DATA_VALID) {
return_ACPI_STATUS(AE_OK);
@@ -342,8 +342,7 @@ acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
(ACPI_TYPE_REGION, node, NULL));
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "[%4.4s] op_region Arg Init at AML %p\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] OpRegion Arg Init at AML %p\n",
acpi_ut_get_node_name(node),
extra_desc->extra.aml_start));
@@ -352,6 +351,28 @@ acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
status = acpi_ds_execute_arguments(node, acpi_ns_get_parent_node(node),
extra_desc->extra.aml_length,
extra_desc->extra.aml_start);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Validate the region address/length via the host OS */
+
+ status = acpi_os_validate_address(obj_desc->region.space_id,
+ obj_desc->region.address,
+ (acpi_size) obj_desc->region.length);
+ if (ACPI_FAILURE(status)) {
+ /*
+ * Invalid address/length. We will emit an error message and mark
+ * the region as invalid, so that it will cause an additional error if
+ * it is ever used. Then return AE_OK.
+ */
+ ACPI_EXCEPTION((AE_INFO, status,
+ "During address validation of OpRegion [%4.4s]",
+ node->name.ascii));
+ obj_desc->common.flags |= AOPOBJ_INVALID;
+ status = AE_OK;
+ }
+
return_ACPI_STATUS(status);
}
@@ -411,7 +432,7 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
u8 field_flags;
acpi_status status;
- ACPI_FUNCTION_TRACE_PTR("ds_init_buffer_field", obj_desc);
+ ACPI_FUNCTION_TRACE_PTR(ds_init_buffer_field, obj_desc);
/* Host object must be a Buffer */
@@ -457,7 +478,7 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
if (bit_count == 0) {
ACPI_ERROR((AE_INFO,
- "Attempt to create_field of length zero"));
+ "Attempt to CreateField of length zero"));
status = AE_AML_OPERAND_VALUE;
goto cleanup;
}
@@ -595,7 +616,7 @@ acpi_ds_eval_buffer_field_operands(struct acpi_walk_state *walk_state,
struct acpi_namespace_node *node;
union acpi_parse_object *next_op;
- ACPI_FUNCTION_TRACE_PTR("ds_eval_buffer_field_operands", op);
+ ACPI_FUNCTION_TRACE_PTR(ds_eval_buffer_field_operands, op);
/*
* This is where we evaluate the address and length fields of the
@@ -627,7 +648,7 @@ acpi_ds_eval_buffer_field_operands(struct acpi_walk_state *walk_state,
ACPI_DUMP_OPERANDS(ACPI_WALK_OPERANDS, ACPI_IMODE_EXECUTE,
acpi_ps_get_opcode_name(op->common.aml_opcode),
walk_state->num_operands,
- "after acpi_ex_resolve_operands");
+ "after AcpiExResolveOperands");
if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO, "(%s) bad operand(s) (%X)",
@@ -640,6 +661,7 @@ acpi_ds_eval_buffer_field_operands(struct acpi_walk_state *walk_state,
/* Initialize the Buffer Field */
if (op->common.aml_opcode == AML_CREATE_FIELD_OP) {
+
/* NOTE: Slightly different operands for this opcode */
status =
@@ -685,7 +707,7 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
struct acpi_namespace_node *node;
union acpi_parse_object *next_op;
- ACPI_FUNCTION_TRACE_PTR("ds_eval_region_operands", op);
+ ACPI_FUNCTION_TRACE_PTR(ds_eval_region_operands, op);
/*
* This is where we evaluate the address and length fields of the
@@ -718,7 +740,7 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
ACPI_DUMP_OPERANDS(ACPI_WALK_OPERANDS, ACPI_IMODE_EXECUTE,
acpi_ps_get_opcode_name(op->common.aml_opcode),
- 1, "after acpi_ex_resolve_operands");
+ 1, "after AcpiExResolveOperands");
obj_desc = acpi_ns_get_attached_object(node);
if (!obj_desc) {
@@ -744,7 +766,7 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
operand_desc->integer.value;
acpi_ut_remove_reference(operand_desc);
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "rgn_obj %p Addr %8.8X%8.8X Len %X\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n",
obj_desc,
ACPI_FORMAT_UINT64(obj_desc->region.address),
obj_desc->region.length));
@@ -780,7 +802,7 @@ acpi_ds_eval_data_object_operands(struct acpi_walk_state *walk_state,
union acpi_operand_object *arg_desc;
u32 length;
- ACPI_FUNCTION_TRACE("ds_eval_data_object_operands");
+ ACPI_FUNCTION_TRACE(ds_eval_data_object_operands);
/* The first operand (for all of these data objects) is the length */
@@ -874,7 +896,7 @@ acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
acpi_status status = AE_OK;
union acpi_generic_state *control_state;
- ACPI_FUNCTION_NAME("ds_exec_begin_control_op");
+ ACPI_FUNCTION_NAME(ds_exec_begin_control_op);
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p Opcode=%2.2X State=%p\n", op,
op->common.aml_opcode, walk_state));
@@ -952,7 +974,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
acpi_status status = AE_OK;
union acpi_generic_state *control_state;
- ACPI_FUNCTION_NAME("ds_exec_end_control_op");
+ ACPI_FUNCTION_NAME(ds_exec_end_control_op);
switch (op->common.aml_opcode) {
case AML_IF_OP:
@@ -984,6 +1006,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "[WHILE_OP] Op=%p\n", op));
if (walk_state->control_state->common.value) {
+
/* Predicate was true, go back and evaluate it again! */
status = AE_CTRL_PENDING;
@@ -1014,6 +1037,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
* has been bubbled up the tree
*/
if (op->common.value.arg) {
+
/* Since we have a real Return(), delete any implicit return */
acpi_ds_clear_implicit_return(walk_state);
@@ -1047,6 +1071,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
walk_state->return_desc = walk_state->operands[0];
} else if ((walk_state->results) &&
(walk_state->results->results.num_results > 0)) {
+
/* Since we have a real Return(), delete any implicit return */
acpi_ds_clear_implicit_return(walk_state);
@@ -1095,7 +1120,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
}
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- "Completed RETURN_OP State=%p, ret_val=%p\n",
+ "Completed RETURN_OP State=%p, RetVal=%p\n",
walk_state, walk_state->return_desc));
/* End the control method execution right now */
diff --git a/drivers/acpi/dispatcher/dsutils.c b/drivers/acpi/dispatcher/dsutils.c
index 53356a591ac..05230baf5de 100644
--- a/drivers/acpi/dispatcher/dsutils.c
+++ b/drivers/acpi/dispatcher/dsutils.c
@@ -68,7 +68,7 @@ ACPI_MODULE_NAME("dsutils")
******************************************************************************/
void acpi_ds_clear_implicit_return(struct acpi_walk_state *walk_state)
{
- ACPI_FUNCTION_NAME("ds_clear_implicit_return");
+ ACPI_FUNCTION_NAME(ds_clear_implicit_return);
/*
* Slack must be enabled for this feature
@@ -115,7 +115,7 @@ u8
acpi_ds_do_implicit_return(union acpi_operand_object *return_desc,
struct acpi_walk_state *walk_state, u8 add_reference)
{
- ACPI_FUNCTION_NAME("ds_do_implicit_return");
+ ACPI_FUNCTION_NAME(ds_do_implicit_return);
/*
* Slack must be enabled for this feature, and we must
@@ -171,7 +171,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
{
const struct acpi_opcode_info *parent_info;
- ACPI_FUNCTION_TRACE_PTR("ds_is_result_used", op);
+ ACPI_FUNCTION_TRACE_PTR(ds_is_result_used, op);
/* Must have both an Op and a Result Object */
@@ -202,6 +202,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
*/
if ((!op->common.parent) ||
(op->common.parent->common.aml_opcode == AML_SCOPE_OP)) {
+
/* No parent, the return value cannot possibly be used */
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
@@ -340,7 +341,7 @@ acpi_ds_delete_result_if_not_used(union acpi_parse_object *op,
union acpi_operand_object *obj_desc;
acpi_status status;
- ACPI_FUNCTION_TRACE_PTR("ds_delete_result_if_not_used", result_obj);
+ ACPI_FUNCTION_TRACE_PTR(ds_delete_result_if_not_used, result_obj);
if (!op) {
ACPI_ERROR((AE_INFO, "Null Op"));
@@ -352,6 +353,7 @@ acpi_ds_delete_result_if_not_used(union acpi_parse_object *op,
}
if (!acpi_ds_is_result_used(op, walk_state)) {
+
/* Must pop the result stack (obj_desc should be equal to result_obj) */
status = acpi_ds_result_pop(&obj_desc, walk_state);
@@ -382,7 +384,7 @@ acpi_status acpi_ds_resolve_operands(struct acpi_walk_state *walk_state)
u32 i;
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE_PTR("ds_resolve_operands", walk_state);
+ ACPI_FUNCTION_TRACE_PTR(ds_resolve_operands, walk_state);
/*
* Attempt to resolve each of the valid operands
@@ -417,7 +419,7 @@ void acpi_ds_clear_operands(struct acpi_walk_state *walk_state)
{
u32 i;
- ACPI_FUNCTION_TRACE_PTR("ds_clear_operands", walk_state);
+ ACPI_FUNCTION_TRACE_PTR(ds_clear_operands, walk_state);
/* Remove a reference on each operand on the stack */
@@ -465,7 +467,7 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
acpi_interpreter_mode interpreter_mode;
const struct acpi_opcode_info *op_info;
- ACPI_FUNCTION_TRACE_PTR("ds_create_operand", arg);
+ ACPI_FUNCTION_TRACE_PTR(ds_create_operand, arg);
/* A valid name must be looked up in the namespace */
@@ -498,7 +500,9 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
*/
if ((walk_state->deferred_node) &&
(walk_state->deferred_node->type == ACPI_TYPE_BUFFER_FIELD)
- && (arg_index != 0)) {
+ && (arg_index ==
+ (u32) ((walk_state->opcode ==
+ AML_CREATE_FIELD_OP) ? 3 : 2))) {
obj_desc =
ACPI_CAST_PTR(union acpi_operand_object,
walk_state->deferred_node);
@@ -521,6 +525,7 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
&& (parent_op->common.aml_opcode != AML_REGION_OP)
&& (parent_op->common.aml_opcode !=
AML_INT_NAMEPATH_OP)) {
+
/* Enter name into namespace if not found */
interpreter_mode = ACPI_IMODE_LOAD_PASS2;
@@ -572,7 +577,7 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
/* Free the namestring created above */
- ACPI_MEM_FREE(name_string);
+ ACPI_FREE(name_string);
/* Check status from the lookup */
@@ -696,7 +701,7 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state,
union acpi_parse_object *arg;
u32 arg_count = 0;
- ACPI_FUNCTION_TRACE_PTR("ds_create_operands", first_arg);
+ ACPI_FUNCTION_TRACE_PTR(ds_create_operands, first_arg);
/* For all arguments in the list... */
diff --git a/drivers/acpi/dispatcher/dswexec.c b/drivers/acpi/dispatcher/dswexec.c
index f1af655ff11..3acbd9145d7 100644
--- a/drivers/acpi/dispatcher/dswexec.c
+++ b/drivers/acpi/dispatcher/dswexec.c
@@ -49,7 +49,6 @@
#include <acpi/acinterp.h>
#include <acpi/acnamesp.h>
#include <acpi/acdebug.h>
-#include <acpi/acdisasm.h>
#define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME("dswexec")
@@ -93,7 +92,7 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
union acpi_operand_object *obj_desc;
union acpi_operand_object *local_obj_desc = NULL;
- ACPI_FUNCTION_TRACE_PTR("ds_get_predicate_value", walk_state);
+ ACPI_FUNCTION_TRACE_PTR(ds_get_predicate_value, walk_state);
walk_state->control_state->common.state = 0;
@@ -123,7 +122,7 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
if (!obj_desc) {
ACPI_ERROR((AE_INFO,
- "No predicate obj_desc=%p State=%p",
+ "No predicate ObjDesc=%p State=%p",
obj_desc, walk_state));
return_ACPI_STATUS(AE_AML_NO_OPERAND);
@@ -140,7 +139,7 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
if (ACPI_GET_OBJECT_TYPE(local_obj_desc) != ACPI_TYPE_INTEGER) {
ACPI_ERROR((AE_INFO,
- "Bad predicate (not an integer) obj_desc=%p State=%p Type=%X",
+ "Bad predicate (not an integer) ObjDesc=%p State=%p Type=%X",
obj_desc, walk_state,
ACPI_GET_OBJECT_TYPE(obj_desc)));
@@ -214,7 +213,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
acpi_status status = AE_OK;
u32 opcode_class;
- ACPI_FUNCTION_TRACE_PTR("ds_exec_begin_op", walk_state);
+ ACPI_FUNCTION_TRACE_PTR(ds_exec_begin_op, walk_state);
op = walk_state->op;
if (!op) {
@@ -296,7 +295,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
case AML_CLASS_NAMED_OBJECT:
- if (walk_state->walk_type == ACPI_WALK_METHOD) {
+ if (walk_state->walk_type & ACPI_WALK_METHOD) {
/*
* Found a named object declaration during method execution;
* we must enter this object into the namespace. The created
@@ -354,7 +353,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
union acpi_parse_object *next_op;
union acpi_parse_object *first_arg;
- ACPI_FUNCTION_TRACE_PTR("ds_exec_end_op", walk_state);
+ ACPI_FUNCTION_TRACE_PTR(ds_exec_end_op, walk_state);
op = walk_state->op;
op_type = walk_state->op_info->type;
@@ -409,6 +408,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
* being the object_type and size_of operators.
*/
if (!(walk_state->op_info->flags & AML_NO_OPERAND_RESOLVE)) {
+
/* Resolve all operands */
status = acpi_ex_resolve_operands(walk_state->opcode,
@@ -423,7 +423,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
acpi_ps_get_opcode_name
(walk_state->opcode),
walk_state->num_operands,
- "after ex_resolve_operands");
+ "after ExResolveOperands");
}
}
@@ -437,7 +437,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
acpi_gbl_op_type_dispatch[op_type] (walk_state);
} else {
/*
- * Treat constructs of the form "Store(local_x,local_x)" as noops when the
+ * Treat constructs of the form "Store(LocalX,LocalX)" as noops when the
* Local is uninitialized.
*/
if ((status == AE_AML_UNINITIALIZED_LOCAL) &&
@@ -548,6 +548,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
*/
status = acpi_ds_resolve_operands(walk_state);
if (ACPI_FAILURE(status)) {
+
/* On error, clear all resolved operands */
acpi_ds_clear_operands(walk_state);
@@ -569,7 +570,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
case AML_TYPE_CREATE_FIELD:
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Executing create_field Buffer/Index Op=%p\n",
+ "Executing CreateField Buffer/Index Op=%p\n",
op));
status = acpi_ds_load2_end_op(walk_state);
@@ -584,7 +585,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
case AML_TYPE_CREATE_OBJECT:
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Executing create_object (Buffer/Package) Op=%p\n",
+ "Executing CreateObject (Buffer/Package) Op=%p\n",
op));
switch (op->common.parent->common.aml_opcode) {
@@ -657,7 +658,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
if (op->common.aml_opcode == AML_REGION_OP) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Executing op_region Address/Length Op=%p\n",
+ "Executing OpRegion Address/Length Op=%p\n",
op));
status =
@@ -722,6 +723,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
cleanup:
if (walk_state->result_obj) {
+
/* Break to debugger to display result */
ACPI_DEBUGGER_EXEC(acpi_db_display_result_object
diff --git a/drivers/acpi/dispatcher/dswload.c b/drivers/acpi/dispatcher/dswload.c
index d3d24da31c8..35074399c61 100644
--- a/drivers/acpi/dispatcher/dswload.c
+++ b/drivers/acpi/dispatcher/dswload.c
@@ -127,7 +127,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
char *path;
u32 flags;
- ACPI_FUNCTION_TRACE("ds_load1_begin_op");
+ ACPI_FUNCTION_TRACE(ds_load1_begin_op);
op = walk_state->op;
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
@@ -178,12 +178,12 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
* Target of Scope() not found. Generate an External for it, and
* insert the name into the namespace.
*/
- acpi_dm_add_to_external_list(path);
+ acpi_dm_add_to_external_list(path, ACPI_TYPE_DEVICE, 0);
status =
acpi_ns_lookup(walk_state->scope_info, path,
object_type, ACPI_IMODE_LOAD_PASS1,
ACPI_NS_SEARCH_PARENT, walk_state,
- &(node));
+ &node);
}
#endif
if (ACPI_FAILURE(status)) {
@@ -261,6 +261,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
*/
if (walk_state->deferred_node) {
+
/* This name is already in the namespace, get the node */
node = walk_state->deferred_node;
@@ -300,10 +301,41 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
status =
acpi_ns_lookup(walk_state->scope_info, path, object_type,
ACPI_IMODE_LOAD_PASS1, flags, walk_state,
- &(node));
+ &node);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(path, status);
- return_ACPI_STATUS(status);
+ if (status == AE_ALREADY_EXISTS) {
+
+ /* The name already exists in this scope */
+
+ if (node->flags & ANOBJ_IS_EXTERNAL) {
+ /*
+ * Allow one create on an object or segment that was
+ * previously declared External
+ */
+ node->flags &= ~ANOBJ_IS_EXTERNAL;
+ node->type = (u8) object_type;
+
+ /* Just retyped a node, probably will need to open a scope */
+
+ if (acpi_ns_opens_scope(object_type)) {
+ status =
+ acpi_ds_scope_stack_push
+ (node, object_type,
+ walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS
+ (status);
+ }
+ }
+ status = AE_OK;
+ }
+ }
+
+ if (ACPI_FAILURE(status)) {
+
+ ACPI_ERROR_NAMESPACE(path, status);
+ return_ACPI_STATUS(status);
+ }
}
break;
}
@@ -311,6 +343,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
/* Common exit */
if (!op) {
+
/* Create a new op */
op = acpi_ps_alloc_op(walk_state->opcode);
@@ -359,7 +392,7 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
acpi_object_type object_type;
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("ds_load1_end_op");
+ ACPI_FUNCTION_TRACE(ds_load1_end_op);
op = walk_state->op;
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
@@ -413,6 +446,7 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
#endif
if (op->common.aml_opcode == AML_NAME_OP) {
+
/* For Name opcode, get the object type from the argument */
if (op->common.value.arg) {
@@ -445,7 +479,7 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
* arguments.)
*/
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- "LOADING-Method: State=%p Op=%p named_obj=%p\n",
+ "LOADING-Method: State=%p Op=%p NamedObj=%p\n",
walk_state, op, op->named.node));
if (!acpi_ns_get_attached_object(op->named.node)) {
@@ -511,7 +545,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
acpi_object_type object_type;
char *buffer_ptr;
- ACPI_FUNCTION_TRACE("ds_load2_begin_op");
+ ACPI_FUNCTION_TRACE(ds_load2_begin_op);
op = walk_state->op;
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
@@ -521,6 +555,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
if ((walk_state->control_state) &&
(walk_state->control_state->common.state ==
ACPI_CONTROL_CONDITIONAL_EXECUTING)) {
+
/* We are executing a while loop outside of a method */
status = acpi_ds_exec_begin_op(walk_state, out_op);
@@ -554,10 +589,12 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
/* Get the name we are going to enter or lookup in the namespace */
if (walk_state->opcode == AML_INT_NAMEPATH_OP) {
+
/* For Namepath op, get the path string */
buffer_ptr = op->common.value.string;
if (!buffer_ptr) {
+
/* No name, just exit */
return_ACPI_STATUS(AE_OK);
@@ -680,6 +717,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
/* All other opcodes */
if (op && op->common.node) {
+
/* This op/node was previously entered into the namespace */
node = op->common.node;
@@ -705,6 +743,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
* Note: Name may already exist if we are executing a deferred opcode.
*/
if (walk_state->deferred_node) {
+
/* This name is already in the namespace, get the node */
node = walk_state->deferred_node;
@@ -727,6 +766,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
}
if (!op) {
+
/* Create a new op */
op = acpi_ps_alloc_op(walk_state->opcode);
@@ -776,7 +816,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
u32 i;
#endif
- ACPI_FUNCTION_TRACE("ds_load2_end_op");
+ ACPI_FUNCTION_TRACE(ds_load2_end_op);
op = walk_state->op;
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Opcode [%s] Op %p State %p\n",
@@ -870,7 +910,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
*/
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- "Create-Load [%s] State=%p Op=%p named_obj=%p\n",
+ "Create-Load [%s] State=%p Op=%p NamedObj=%p\n",
acpi_ps_get_opcode_name(op->common.aml_opcode),
walk_state, op, node));
@@ -1045,7 +1085,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
* arguments.)
*/
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- "LOADING-Method: State=%p Op=%p named_obj=%p\n",
+ "LOADING-Method: State=%p Op=%p NamedObj=%p\n",
walk_state, op, op->named.node));
if (!acpi_ns_get_attached_object(op->named.node)) {
@@ -1090,7 +1130,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
case AML_CLASS_METHOD_CALL:
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- "RESOLVING-method_call: State=%p Op=%p named_obj=%p\n",
+ "RESOLVING-MethodCall: State=%p Op=%p NamedObj=%p\n",
walk_state, op, node));
/*
@@ -1104,7 +1144,6 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
ACPI_NS_DONT_OPEN_SCOPE, walk_state,
&(new_node));
if (ACPI_SUCCESS(status)) {
-
/*
* Make sure that what we found is indeed a method
* We didn't search for a method on purpose, to see if the name
diff --git a/drivers/acpi/dispatcher/dswscope.c b/drivers/acpi/dispatcher/dswscope.c
index ada21ef4a17..c9228972f5f 100644
--- a/drivers/acpi/dispatcher/dswscope.c
+++ b/drivers/acpi/dispatcher/dswscope.c
@@ -63,9 +63,10 @@ void acpi_ds_scope_stack_clear(struct acpi_walk_state *walk_state)
{
union acpi_generic_state *scope_info;
- ACPI_FUNCTION_NAME("ds_scope_stack_clear");
+ ACPI_FUNCTION_NAME(ds_scope_stack_clear);
while (walk_state->scope_info) {
+
/* Pop a scope off the stack */
scope_info = walk_state->scope_info;
@@ -102,9 +103,10 @@ acpi_ds_scope_stack_push(struct acpi_namespace_node *node,
union acpi_generic_state *scope_info;
union acpi_generic_state *old_scope_info;
- ACPI_FUNCTION_TRACE("ds_scope_stack_push");
+ ACPI_FUNCTION_TRACE(ds_scope_stack_push);
if (!node) {
+
/* Invalid scope */
ACPI_ERROR((AE_INFO, "Null scope parameter"));
@@ -126,7 +128,7 @@ acpi_ds_scope_stack_push(struct acpi_namespace_node *node,
/* Init new scope object */
- scope_info->common.data_type = ACPI_DESC_TYPE_STATE_WSCOPE;
+ scope_info->common.descriptor_type = ACPI_DESC_TYPE_STATE_WSCOPE;
scope_info->scope.node = node;
scope_info->common.value = (u16) type;
@@ -176,7 +178,7 @@ acpi_status acpi_ds_scope_stack_pop(struct acpi_walk_state *walk_state)
union acpi_generic_state *scope_info;
union acpi_generic_state *new_scope_info;
- ACPI_FUNCTION_TRACE("ds_scope_stack_pop");
+ ACPI_FUNCTION_TRACE(ds_scope_stack_pop);
/*
* Pop scope info object off the stack.
diff --git a/drivers/acpi/dispatcher/dswstate.c b/drivers/acpi/dispatcher/dswstate.c
index fa78cb74ee3..7817e552267 100644
--- a/drivers/acpi/dispatcher/dswstate.c
+++ b/drivers/acpi/dispatcher/dswstate.c
@@ -66,7 +66,6 @@ void *acpi_ds_obj_stack_get_value(u32 index,
#endif
#ifdef ACPI_FUTURE_USAGE
-
/*******************************************************************************
*
* FUNCTION: acpi_ds_result_remove
@@ -88,7 +87,7 @@ acpi_ds_result_remove(union acpi_operand_object **object,
{
union acpi_generic_state *state;
- ACPI_FUNCTION_NAME("ds_result_remove");
+ ACPI_FUNCTION_NAME(ds_result_remove);
state = walk_state->results;
if (!state) {
@@ -128,7 +127,6 @@ acpi_ds_result_remove(union acpi_operand_object **object,
return (AE_OK);
}
-
#endif /* ACPI_FUTURE_USAGE */
/*******************************************************************************
@@ -152,7 +150,7 @@ acpi_ds_result_pop(union acpi_operand_object ** object,
acpi_native_uint index;
union acpi_generic_state *state;
- ACPI_FUNCTION_NAME("ds_result_pop");
+ ACPI_FUNCTION_NAME(ds_result_pop);
state = walk_state->results;
if (!state) {
@@ -170,6 +168,7 @@ acpi_ds_result_pop(union acpi_operand_object ** object,
state->results.num_results--;
for (index = ACPI_OBJ_NUM_OPERANDS; index; index--) {
+
/* Check for a valid result object */
if (state->results.obj_desc[index - 1]) {
@@ -213,7 +212,7 @@ acpi_ds_result_pop_from_bottom(union acpi_operand_object ** object,
acpi_native_uint index;
union acpi_generic_state *state;
- ACPI_FUNCTION_NAME("ds_result_pop_from_bottom");
+ ACPI_FUNCTION_NAME(ds_result_pop_from_bottom);
state = walk_state->results;
if (!state) {
@@ -278,7 +277,7 @@ acpi_ds_result_push(union acpi_operand_object * object,
{
union acpi_generic_state *state;
- ACPI_FUNCTION_NAME("ds_result_push");
+ ACPI_FUNCTION_NAME(ds_result_push);
state = walk_state->results;
if (!state) {
@@ -331,14 +330,14 @@ acpi_status acpi_ds_result_stack_push(struct acpi_walk_state * walk_state)
{
union acpi_generic_state *state;
- ACPI_FUNCTION_NAME("ds_result_stack_push");
+ ACPI_FUNCTION_NAME(ds_result_stack_push);
state = acpi_ut_create_generic_state();
if (!state) {
return (AE_NO_MEMORY);
}
- state->common.data_type = ACPI_DESC_TYPE_STATE_RESULT;
+ state->common.descriptor_type = ACPI_DESC_TYPE_STATE_RESULT;
acpi_ut_push_generic_state(&walk_state->results, state);
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Results=%p State=%p\n",
@@ -363,7 +362,7 @@ acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state * walk_state)
{
union acpi_generic_state *state;
- ACPI_FUNCTION_NAME("ds_result_stack_pop");
+ ACPI_FUNCTION_NAME(ds_result_stack_pop);
/* Check for stack underflow */
@@ -376,7 +375,7 @@ acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state * walk_state)
state = acpi_ut_pop_generic_state(&walk_state->results);
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Result=%p remaining_results=%X State=%p\n",
+ "Result=%p RemainingResults=%X State=%p\n",
state, state->results.num_results, walk_state));
acpi_ut_delete_generic_state(state);
@@ -400,7 +399,7 @@ acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state * walk_state)
acpi_status
acpi_ds_obj_stack_push(void *object, struct acpi_walk_state * walk_state)
{
- ACPI_FUNCTION_NAME("ds_obj_stack_push");
+ ACPI_FUNCTION_NAME(ds_obj_stack_push);
/* Check for stack overflow */
@@ -445,9 +444,10 @@ acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state * walk_state)
{
u32 i;
- ACPI_FUNCTION_NAME("ds_obj_stack_pop");
+ ACPI_FUNCTION_NAME(ds_obj_stack_pop);
for (i = 0; i < pop_count; i++) {
+
/* Check for stack underflow */
if (walk_state->num_operands == 0) {
@@ -491,9 +491,10 @@ acpi_ds_obj_stack_pop_and_delete(u32 pop_count,
u32 i;
union acpi_operand_object *obj_desc;
- ACPI_FUNCTION_NAME("ds_obj_stack_pop_and_delete");
+ ACPI_FUNCTION_NAME(ds_obj_stack_pop_and_delete);
for (i = 0; i < pop_count; i++) {
+
/* Check for stack underflow */
if (walk_state->num_operands == 0) {
@@ -538,13 +539,13 @@ acpi_ds_obj_stack_pop_and_delete(u32 pop_count,
struct acpi_walk_state *acpi_ds_get_current_walk_state(struct acpi_thread_state
*thread)
{
- ACPI_FUNCTION_NAME("ds_get_current_walk_state");
+ ACPI_FUNCTION_NAME(ds_get_current_walk_state);
if (!thread) {
return (NULL);
}
- ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "Current walk_state %p\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "Current WalkState %p\n",
thread->walk_state_list));
return (thread->walk_state_list);
@@ -567,7 +568,7 @@ void
acpi_ds_push_walk_state(struct acpi_walk_state *walk_state,
struct acpi_thread_state *thread)
{
- ACPI_FUNCTION_TRACE("ds_push_walk_state");
+ ACPI_FUNCTION_TRACE(ds_push_walk_state);
walk_state->next = thread->walk_state_list;
thread->walk_state_list = walk_state;
@@ -593,11 +594,12 @@ struct acpi_walk_state *acpi_ds_pop_walk_state(struct acpi_thread_state *thread)
{
struct acpi_walk_state *walk_state;
- ACPI_FUNCTION_TRACE("ds_pop_walk_state");
+ ACPI_FUNCTION_TRACE(ds_pop_walk_state);
walk_state = thread->walk_state_list;
if (walk_state) {
+
/* Next walk state becomes the current walk state */
thread->walk_state_list = walk_state->next;
@@ -618,7 +620,7 @@ struct acpi_walk_state *acpi_ds_pop_walk_state(struct acpi_thread_state *thread)
*
* PARAMETERS: owner_id - ID for object creation
* Origin - Starting point for this walk
- * mth_desc - Method object
+ * method_desc - Method object
* Thread - Current thread state
*
* RETURN: Pointer to the new walk state.
@@ -632,24 +634,24 @@ struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id,
union acpi_parse_object
*origin,
union acpi_operand_object
- *mth_desc,
+ *method_desc,
struct acpi_thread_state
*thread)
{
struct acpi_walk_state *walk_state;
acpi_status status;
- ACPI_FUNCTION_TRACE("ds_create_walk_state");
+ ACPI_FUNCTION_TRACE(ds_create_walk_state);
- walk_state = ACPI_MEM_CALLOCATE(sizeof(struct acpi_walk_state));
+ walk_state = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_walk_state));
if (!walk_state) {
return_PTR(NULL);
}
- walk_state->data_type = ACPI_DESC_TYPE_WALK;
+ walk_state->descriptor_type = ACPI_DESC_TYPE_WALK;
+ walk_state->method_desc = method_desc;
walk_state->owner_id = owner_id;
walk_state->origin = origin;
- walk_state->method_desc = mth_desc;
walk_state->thread = thread;
walk_state->parser_state.start_op = origin;
@@ -664,7 +666,7 @@ struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id,
status = acpi_ds_result_stack_push(walk_state);
if (ACPI_FAILURE(status)) {
- ACPI_MEM_FREE(walk_state);
+ ACPI_FREE(walk_state);
return_PTR(NULL);
}
@@ -701,13 +703,13 @@ acpi_ds_init_aml_walk(struct acpi_walk_state *walk_state,
struct acpi_namespace_node *method_node,
u8 * aml_start,
u32 aml_length,
- struct acpi_parameter_info *info, u8 pass_number)
+ struct acpi_evaluate_info *info, u8 pass_number)
{
acpi_status status;
struct acpi_parse_state *parser_state = &walk_state->parser_state;
union acpi_parse_object *extra_op;
- ACPI_FUNCTION_TRACE("ds_init_aml_walk");
+ ACPI_FUNCTION_TRACE(ds_init_aml_walk);
walk_state->parser_state.aml =
walk_state->parser_state.aml_start = aml_start;
@@ -778,6 +780,7 @@ acpi_ds_init_aml_walk(struct acpi_walk_state *walk_state,
}
if (parser_state->start_node) {
+
/* Push start scope on scope stack and make it current */
status =
@@ -810,21 +813,24 @@ void acpi_ds_delete_walk_state(struct acpi_walk_state *walk_state)
{
union acpi_generic_state *state;
- ACPI_FUNCTION_TRACE_PTR("ds_delete_walk_state", walk_state);
+ ACPI_FUNCTION_TRACE_PTR(ds_delete_walk_state, walk_state);
if (!walk_state) {
return;
}
- if (walk_state->data_type != ACPI_DESC_TYPE_WALK) {
+ if (walk_state->descriptor_type != ACPI_DESC_TYPE_WALK) {
ACPI_ERROR((AE_INFO, "%p is not a valid walk state",
walk_state));
return;
}
+ /* There should not be any open scopes */
+
if (walk_state->parser_state.scope) {
ACPI_ERROR((AE_INFO, "%p walk still has a scope list",
walk_state));
+ acpi_ps_cleanup_scope(&walk_state->parser_state);
}
/* Always must free any linked control states */
@@ -854,7 +860,7 @@ void acpi_ds_delete_walk_state(struct acpi_walk_state *walk_state)
acpi_ut_delete_generic_state(state);
}
- ACPI_MEM_FREE(walk_state);
+ ACPI_FREE(walk_state);
return_VOID;
}
@@ -879,7 +885,7 @@ acpi_ds_result_insert(void *object,
{
union acpi_generic_state *state;
- ACPI_FUNCTION_NAME("ds_result_insert");
+ ACPI_FUNCTION_NAME(ds_result_insert);
state = walk_state->results;
if (!state) {
@@ -937,7 +943,7 @@ acpi_status acpi_ds_obj_stack_delete_all(struct acpi_walk_state * walk_state)
{
u32 i;
- ACPI_FUNCTION_TRACE_PTR("ds_obj_stack_delete_all", walk_state);
+ ACPI_FUNCTION_TRACE_PTR(ds_obj_stack_delete_all, walk_state);
/* The stack size is configurable, but fixed */
@@ -969,7 +975,7 @@ acpi_status
acpi_ds_obj_stack_pop_object(union acpi_operand_object **object,
struct acpi_walk_state *walk_state)
{
- ACPI_FUNCTION_NAME("ds_obj_stack_pop_object");
+ ACPI_FUNCTION_NAME(ds_obj_stack_pop_object);
/* Check for stack underflow */
@@ -1025,7 +1031,7 @@ acpi_ds_obj_stack_pop_object(union acpi_operand_object **object,
void *acpi_ds_obj_stack_get_value(u32 index, struct acpi_walk_state *walk_state)
{
- ACPI_FUNCTION_TRACE_PTR("ds_obj_stack_get_value", walk_state);
+ ACPI_FUNCTION_TRACE_PTR(ds_obj_stack_get_value, walk_state);
/* Can't do it if the stack is empty */
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index eee0864ba30..18b3ea9dace 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -116,7 +116,7 @@ union acpi_ec {
struct acpi_generic_address command_addr;
struct acpi_generic_address data_addr;
unsigned long global_lock;
- spinlock_t lock;
+ struct semaphore sem;
} poll;
};
@@ -323,7 +323,6 @@ static int acpi_ec_poll_read(union acpi_ec *ec, u8 address, u32 * data)
{
acpi_status status = AE_OK;
int result = 0;
- unsigned long flags = 0;
u32 glk = 0;
ACPI_FUNCTION_TRACE("acpi_ec_read");
@@ -339,8 +338,11 @@ static int acpi_ec_poll_read(union acpi_ec *ec, u8 address, u32 * data)
return_VALUE(-ENODEV);
}
- spin_lock_irqsave(&ec->poll.lock, flags);
-
+ if (down_interruptible(&ec->poll.sem)) {
+ result = -ERESTARTSYS;
+ goto end_nosem;
+ }
+
acpi_hw_low_level_write(8, ACPI_EC_COMMAND_READ,
&ec->common.command_addr);
result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE);
@@ -358,8 +360,8 @@ static int acpi_ec_poll_read(union acpi_ec *ec, u8 address, u32 * data)
*data, address));
end:
- spin_unlock_irqrestore(&ec->poll.lock, flags);
-
+ up(&ec->poll.sem);
+end_nosem:
if (ec->common.global_lock)
acpi_release_global_lock(glk);
@@ -370,7 +372,6 @@ static int acpi_ec_poll_write(union acpi_ec *ec, u8 address, u8 data)
{
int result = 0;
acpi_status status = AE_OK;
- unsigned long flags = 0;
u32 glk = 0;
ACPI_FUNCTION_TRACE("acpi_ec_write");
@@ -384,8 +385,11 @@ static int acpi_ec_poll_write(union acpi_ec *ec, u8 address, u8 data)
return_VALUE(-ENODEV);
}
- spin_lock_irqsave(&ec->poll.lock, flags);
-
+ if (down_interruptible(&ec->poll.sem)) {
+ result = -ERESTARTSYS;
+ goto end_nosem;
+ }
+
acpi_hw_low_level_write(8, ACPI_EC_COMMAND_WRITE,
&ec->common.command_addr);
result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE);
@@ -406,8 +410,8 @@ static int acpi_ec_poll_write(union acpi_ec *ec, u8 address, u8 data)
data, address));
end:
- spin_unlock_irqrestore(&ec->poll.lock, flags);
-
+ up(&ec->poll.sem);
+end_nosem:
if (ec->common.global_lock)
acpi_release_global_lock(glk);
@@ -568,7 +572,6 @@ static int acpi_ec_poll_query(union acpi_ec *ec, u32 * data)
{
int result = 0;
acpi_status status = AE_OK;
- unsigned long flags = 0;
u32 glk = 0;
ACPI_FUNCTION_TRACE("acpi_ec_query");
@@ -589,8 +592,11 @@ static int acpi_ec_poll_query(union acpi_ec *ec, u32 * data)
* Note that successful completion of the query causes the ACPI_EC_SCI
* bit to be cleared (and thus clearing the interrupt source).
*/
- spin_lock_irqsave(&ec->poll.lock, flags);
-
+ if (down_interruptible(&ec->poll.sem)) {
+ result = -ERESTARTSYS;
+ goto end_nosem;
+ }
+
acpi_hw_low_level_write(8, ACPI_EC_COMMAND_QUERY,
&ec->common.command_addr);
result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF);
@@ -602,8 +608,8 @@ static int acpi_ec_poll_query(union acpi_ec *ec, u32 * data)
result = -ENODATA;
end:
- spin_unlock_irqrestore(&ec->poll.lock, flags);
-
+ up(&ec->poll.sem);
+end_nosem:
if (ec->common.global_lock)
acpi_release_global_lock(glk);
@@ -680,7 +686,6 @@ static void acpi_ec_gpe_poll_query(void *ec_cxt)
{
union acpi_ec *ec = (union acpi_ec *)ec_cxt;
u32 value = 0;
- unsigned long flags = 0;
static char object_name[5] = { '_', 'Q', '0', '0', '\0' };
const char hex[] = { '0', '1', '2', '3', '4', '5', '6', '7',
'8', '9', 'A', 'B', 'C', 'D', 'E', 'F'
@@ -691,9 +696,11 @@ static void acpi_ec_gpe_poll_query(void *ec_cxt)
if (!ec_cxt)
goto end;
- spin_lock_irqsave(&ec->poll.lock, flags);
+ if (down_interruptible (&ec->poll.sem)) {
+ return_VOID;
+ }
acpi_hw_low_level_read(8, &value, &ec->common.command_addr);
- spin_unlock_irqrestore(&ec->poll.lock, flags);
+ up(&ec->poll.sem);
/* TBD: Implement asynch events!
* NOTE: All we care about are EC-SCI's. Other EC events are
@@ -763,8 +770,7 @@ static u32 acpi_ec_gpe_poll_handler(void *data)
acpi_disable_gpe(NULL, ec->common.gpe_bit, ACPI_ISR);
- status = acpi_os_queue_for_execution(OSD_PRIORITY_GPE,
- acpi_ec_gpe_query, ec);
+ status = acpi_os_execute(OSL_EC_POLL_HANDLER, acpi_ec_gpe_query, ec);
if (status == AE_OK)
return ACPI_INTERRUPT_HANDLED;
@@ -799,7 +805,7 @@ static u32 acpi_ec_gpe_intr_handler(void *data)
if (value & ACPI_EC_FLAG_SCI) {
atomic_add(1, &ec->intr.pending_gpe);
- status = acpi_os_queue_for_execution(OSD_PRIORITY_GPE,
+ status = acpi_os_execute(OSL_EC_BURST_HANDLER,
acpi_ec_gpe_query, ec);
return status == AE_OK ?
ACPI_INTERRUPT_HANDLED : ACPI_INTERRUPT_NOT_HANDLED;
@@ -991,7 +997,6 @@ static int acpi_ec_poll_add(struct acpi_device *device)
int result = 0;
acpi_status status = AE_OK;
union acpi_ec *ec = NULL;
- unsigned long uid;
ACPI_FUNCTION_TRACE("acpi_ec_add");
@@ -1005,7 +1010,7 @@ static int acpi_ec_poll_add(struct acpi_device *device)
ec->common.handle = device->handle;
ec->common.uid = -1;
- spin_lock_init(&ec->poll.lock);
+ init_MUTEX(&ec->poll.sem);
strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_EC_CLASS);
acpi_driver_data(device) = ec;
@@ -1014,10 +1019,9 @@ static int acpi_ec_poll_add(struct acpi_device *device)
acpi_evaluate_integer(ec->common.handle, "_GLK", NULL,
&ec->common.global_lock);
- /* If our UID matches the UID for the ECDT-enumerated EC,
- we now have the *real* EC info, so kill the makeshift one. */
- acpi_evaluate_integer(ec->common.handle, "_UID", NULL, &uid);
- if (ec_ecdt && ec_ecdt->common.uid == uid) {
+ /* XXX we don't test uids, because on some boxes ecdt uid = 0, see:
+ http://bugzilla.kernel.org/show_bug.cgi?id=6111 */
+ if (ec_ecdt) {
acpi_remove_address_space_handler(ACPI_ROOT_OBJECT,
ACPI_ADR_SPACE_EC,
&acpi_ec_space_handler);
@@ -1062,7 +1066,6 @@ static int acpi_ec_intr_add(struct acpi_device *device)
int result = 0;
acpi_status status = AE_OK;
union acpi_ec *ec = NULL;
- unsigned long uid;
ACPI_FUNCTION_TRACE("acpi_ec_add");
@@ -1088,10 +1091,9 @@ static int acpi_ec_intr_add(struct acpi_device *device)
acpi_evaluate_integer(ec->common.handle, "_GLK", NULL,
&ec->common.global_lock);
- /* If our UID matches the UID for the ECDT-enumerated EC,
- we now have the *real* EC info, so kill the makeshift one. */
- acpi_evaluate_integer(ec->common.handle, "_UID", NULL, &uid);
- if (ec_ecdt && ec_ecdt->common.uid == uid) {
+ /* XXX we don't test uids, because on some boxes ecdt uid = 0, see:
+ http://bugzilla.kernel.org/show_bug.cgi?id=6111 */
+ if (ec_ecdt) {
acpi_remove_address_space_handler(ACPI_ROOT_OBJECT,
ACPI_ADR_SPACE_EC,
&acpi_ec_space_handler);
@@ -1300,7 +1302,7 @@ acpi_fake_ecdt_poll_callback(acpi_handle handle,
&ec_ecdt->common.gpe_bit);
if (ACPI_FAILURE(status))
return status;
- spin_lock_init(&ec_ecdt->poll.lock);
+ init_MUTEX(&ec_ecdt->poll.sem);
ec_ecdt->common.global_lock = TRUE;
ec_ecdt->common.handle = handle;
@@ -1416,7 +1418,7 @@ static int __init acpi_ec_poll_get_real_ecdt(void)
ec_ecdt->common.status_addr = ecdt_ptr->ec_control;
ec_ecdt->common.data_addr = ecdt_ptr->ec_data;
ec_ecdt->common.gpe_bit = ecdt_ptr->gpe_bit;
- spin_lock_init(&ec_ecdt->poll.lock);
+ init_MUTEX(&ec_ecdt->poll.sem);
/* use the GL just to be safe */
ec_ecdt->common.global_lock = TRUE;
ec_ecdt->common.uid = ecdt_ptr->uid;
diff --git a/drivers/acpi/events/evevent.c b/drivers/acpi/events/evevent.c
index c9ac05c4685..919037d6acf 100644
--- a/drivers/acpi/events/evevent.c
+++ b/drivers/acpi/events/evevent.c
@@ -68,7 +68,7 @@ acpi_status acpi_ev_initialize_events(void)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ev_initialize_events");
+ ACPI_FUNCTION_TRACE(ev_initialize_events);
/* Make sure we have ACPI tables */
@@ -118,7 +118,7 @@ acpi_status acpi_ev_install_fadt_gpes(void)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ev_install_fadt_gpes");
+ ACPI_FUNCTION_TRACE(ev_install_fadt_gpes);
/* Namespace must be locked */
@@ -157,7 +157,7 @@ acpi_status acpi_ev_install_xrupt_handlers(void)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ev_install_xrupt_handlers");
+ ACPI_FUNCTION_TRACE(ev_install_xrupt_handlers);
/* Install the SCI handler */
@@ -241,7 +241,7 @@ u32 acpi_ev_fixed_event_detect(void)
u32 fixed_enable;
acpi_native_uint i;
- ACPI_FUNCTION_NAME("ev_fixed_event_detect");
+ ACPI_FUNCTION_NAME(ev_fixed_event_detect);
/*
* Read the fixed feature status and enable registers, as all the cases
@@ -260,12 +260,14 @@ u32 acpi_ev_fixed_event_detect(void)
* Check for all possible Fixed Events and dispatch those that are active
*/
for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) {
+
/* Both the status and enable bits must be on for this event */
if ((fixed_status & acpi_gbl_fixed_event_info[i].
status_bit_mask)
&& (fixed_enable & acpi_gbl_fixed_event_info[i].
enable_bit_mask)) {
+
/* Found an active (signalled) event */
int_status |= acpi_ev_fixed_event_dispatch((u32) i);
diff --git a/drivers/acpi/events/evgpe.c b/drivers/acpi/events/evgpe.c
index f64f977dd3d..f01d339407f 100644
--- a/drivers/acpi/events/evgpe.c
+++ b/drivers/acpi/events/evgpe.c
@@ -69,7 +69,7 @@ acpi_ev_set_gpe_type(struct acpi_gpe_event_info *gpe_event_info, u8 type)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ev_set_gpe_type");
+ ACPI_FUNCTION_TRACE(ev_set_gpe_type);
/* Validate type and update register enable masks */
@@ -115,7 +115,7 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info,
struct acpi_gpe_register_info *gpe_register_info;
u8 register_bit;
- ACPI_FUNCTION_TRACE("ev_update_gpe_enable_masks");
+ ACPI_FUNCTION_TRACE(ev_update_gpe_enable_masks);
gpe_register_info = gpe_event_info->register_info;
if (!gpe_register_info) {
@@ -178,7 +178,7 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info,
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ev_enable_gpe");
+ ACPI_FUNCTION_TRACE(ev_enable_gpe);
/* Make sure HW enable masks are updated */
@@ -207,6 +207,7 @@ acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info,
ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_RUN_ENABLED);
if (write_to_hardware) {
+
/* Clear the GPE (of stale events), then enable it */
status = acpi_hw_clear_gpe(gpe_event_info);
@@ -243,7 +244,7 @@ acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ev_disable_gpe");
+ ACPI_FUNCTION_TRACE(ev_disable_gpe);
if (!(gpe_event_info->flags & ACPI_GPE_ENABLE_MASK)) {
return_ACPI_STATUS(AE_OK);
@@ -313,6 +314,7 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
/* A NULL gpe_block means use the FADT-defined GPE block(s) */
if (!gpe_device) {
+
/* Examine GPE Block 0 and 1 (These blocks are permanent) */
for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) {
@@ -380,10 +382,11 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
u32 status_reg;
u32 enable_reg;
acpi_cpu_flags flags;
+ acpi_cpu_flags hw_flags;
acpi_native_uint i;
acpi_native_uint j;
- ACPI_FUNCTION_NAME("ev_gpe_detect");
+ ACPI_FUNCTION_NAME(ev_gpe_detect);
/* Check for the case where there are no GPEs */
@@ -391,9 +394,12 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
return (int_status);
}
- /* Examine all GPE blocks attached to this interrupt level */
+ /* We need to hold the GPE lock now, hardware lock in the loop */
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+ /* Examine all GPE blocks attached to this interrupt level */
+
gpe_block = gpe_xrupt_list->gpe_block_list_head;
while (gpe_block) {
/*
@@ -402,10 +408,13 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
* Find all currently active GP events.
*/
for (i = 0; i < gpe_block->register_count; i++) {
+
/* Get the next status/enable pair */
gpe_register_info = &gpe_block->register_info[i];
+ hw_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
+
/* Read the Status Register */
status =
@@ -414,6 +423,8 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
&gpe_register_info->
status_address);
if (ACPI_FAILURE(status)) {
+ acpi_os_release_lock(acpi_gbl_hardware_lock,
+ hw_flags);
goto unlock_and_exit;
}
@@ -424,6 +435,8 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
&enable_reg,
&gpe_register_info->
enable_address);
+ acpi_os_release_lock(acpi_gbl_hardware_lock, hw_flags);
+
if (ACPI_FAILURE(status)) {
goto unlock_and_exit;
}
@@ -437,6 +450,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
enabled_status_byte = (u8) (status_reg & enable_reg);
if (!enabled_status_byte) {
+
/* No active GPEs in this register, move on */
continue;
@@ -445,6 +459,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
/* Now look at the individual GPEs in this byte register */
for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
+
/* Examine one GPE bit */
if (enabled_status_byte &
@@ -483,9 +498,9 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
*
* RETURN: None
*
- * DESCRIPTION: Perform the actual execution of a GPE control method. This
- * function is called from an invocation of acpi_os_queue_for_execution
- * (and therefore does NOT execute at interrupt level) so that
+ * DESCRIPTION: Perform the actual execution of a GPE control method. This
+ * function is called from an invocation of acpi_os_execute and
+ * therefore does NOT execute at interrupt level - so that
* the control method itself is not executed in the context of
* an interrupt handler.
*
@@ -494,12 +509,11 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
{
struct acpi_gpe_event_info *gpe_event_info = (void *)context;
- u32 gpe_number = 0;
acpi_status status;
struct acpi_gpe_event_info local_gpe_event_info;
- struct acpi_parameter_info info;
+ struct acpi_evaluate_info *info;
- ACPI_FUNCTION_TRACE("ev_asynch_execute_gpe_method");
+ ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
if (ACPI_FAILURE(status)) {
@@ -535,22 +549,35 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
*/
if ((local_gpe_event_info.flags & ACPI_GPE_DISPATCH_MASK) ==
ACPI_GPE_DISPATCH_METHOD) {
- /*
- * Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the _Lxx/_Exx
- * control method that corresponds to this GPE
- */
- info.node = local_gpe_event_info.dispatch.method_node;
- info.parameters =
- ACPI_CAST_PTR(union acpi_operand_object *, gpe_event_info);
- info.parameter_type = ACPI_PARAM_GPE;
- status = acpi_ns_evaluate_by_handle(&info);
+ /* Allocate the evaluation information block */
+
+ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+ if (!info) {
+ status = AE_NO_MEMORY;
+ } else {
+ /*
+ * Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the _Lxx/_Exx
+ * control method that corresponds to this GPE
+ */
+ info->prefix_node =
+ local_gpe_event_info.dispatch.method_node;
+ info->parameters =
+ ACPI_CAST_PTR(union acpi_operand_object *,
+ gpe_event_info);
+ info->parameter_type = ACPI_PARAM_GPE;
+ info->flags = ACPI_IGNORE_RETURN_VALUE;
+
+ status = acpi_ns_evaluate(info);
+ ACPI_FREE(info);
+ }
+
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "While evaluating method [%4.4s] for GPE[%2X]",
+ "While evaluating GPE method [%4.4s]",
acpi_ut_get_node_name
(local_gpe_event_info.dispatch.
- method_node), gpe_number));
+ method_node)));
}
}
@@ -593,7 +620,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ev_gpe_dispatch");
+ ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
/*
* If edge-triggered, clear the GPE status bit now. Note that
@@ -669,9 +696,9 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
* Execute the method associated with the GPE
* NOTE: Level-triggered GPEs are cleared after the method completes.
*/
- status = acpi_os_queue_for_execution(OSD_PRIORITY_GPE,
- acpi_ev_asynch_execute_gpe_method,
- gpe_event_info);
+ status = acpi_os_execute(OSL_GPE_HANDLER,
+ acpi_ev_asynch_execute_gpe_method,
+ gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Unable to queue handler for GPE[%2X] - event disabled",
@@ -716,7 +743,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
*
* DESCRIPTION: Determine if a a GPE is "wake-only".
*
- * Called from Notify() code in interpreter when a "device_wake"
+ * Called from Notify() code in interpreter when a "DeviceWake"
* Notify comes in.
*
******************************************************************************/
@@ -726,7 +753,7 @@ acpi_ev_check_for_wake_only_gpe(struct acpi_gpe_event_info *gpe_event_info)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ev_check_for_wake_only_gpe");
+ ACPI_FUNCTION_TRACE(ev_check_for_wake_only_gpe);
if ((gpe_event_info) && /* Only >0 for _Lxx/_Exx */
((gpe_event_info->flags & ACPI_GPE_SYSTEM_MASK) == ACPI_GPE_SYSTEM_RUNNING)) { /* System state at GPE time */
diff --git a/drivers/acpi/events/evgpeblk.c b/drivers/acpi/events/evgpeblk.c
index 0fd00b5ad65..95ddeb48bc0 100644
--- a/drivers/acpi/events/evgpeblk.c
+++ b/drivers/acpi/events/evgpeblk.c
@@ -131,14 +131,14 @@ u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
*
******************************************************************************/
-acpi_status acpi_ev_walk_gpe_list(ACPI_GPE_CALLBACK gpe_walk_callback)
+acpi_status acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback)
{
struct acpi_gpe_block_info *gpe_block;
struct acpi_gpe_xrupt_info *gpe_xrupt_info;
acpi_status status = AE_OK;
acpi_cpu_flags flags;
- ACPI_FUNCTION_TRACE("ev_walk_gpe_list");
+ ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
@@ -146,10 +146,12 @@ acpi_status acpi_ev_walk_gpe_list(ACPI_GPE_CALLBACK gpe_walk_callback)
gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
while (gpe_xrupt_info) {
+
/* Walk all Gpe Blocks attached to this interrupt level */
gpe_block = gpe_xrupt_info->gpe_block_list_head;
while (gpe_block) {
+
/* One callback per GPE block */
status = gpe_walk_callback(gpe_xrupt_info, gpe_block);
@@ -190,11 +192,12 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
acpi_native_uint i;
acpi_native_uint j;
- ACPI_FUNCTION_TRACE("ev_delete_gpe_handlers");
+ ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers);
/* Examine each GPE Register within the block */
for (i = 0; i < gpe_block->register_count; i++) {
+
/* Now look at the individual GPEs in this byte register */
for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
@@ -204,7 +207,7 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
ACPI_GPE_DISPATCH_HANDLER) {
- ACPI_MEM_FREE(gpe_event_info->dispatch.handler);
+ ACPI_FREE(gpe_event_info->dispatch.handler);
gpe_event_info->dispatch.handler = NULL;
gpe_event_info->flags &=
~ACPI_GPE_DISPATCH_MASK;
@@ -248,7 +251,7 @@ acpi_ev_save_method_info(acpi_handle obj_handle,
u8 type;
acpi_status status;
- ACPI_FUNCTION_TRACE("ev_save_method_info");
+ ACPI_FUNCTION_TRACE(ev_save_method_info);
/*
* _Lxx and _Exx GPE method support
@@ -279,9 +282,9 @@ acpi_ev_save_method_info(acpi_handle obj_handle,
default:
/* Unknown method type, just ignore it! */
- ACPI_ERROR((AE_INFO,
- "Unknown GPE method type: %s (name not of form _Lxx or _Exx)",
- name));
+ ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
+ "Ignoring unknown GPE method type: %s (name not of form _Lxx or _Exx)",
+ name));
return_ACPI_STATUS(AE_OK);
}
@@ -289,11 +292,12 @@ acpi_ev_save_method_info(acpi_handle obj_handle,
gpe_number = ACPI_STRTOUL(&name[2], NULL, 16);
if (gpe_number == ACPI_UINT32_MAX) {
+
/* Conversion failed; invalid method, just ignore it */
- ACPI_ERROR((AE_INFO,
- "Could not extract GPE number from name: %s (name is not of form _Lxx or _Exx)",
- name));
+ ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
+ "Could not extract GPE number from name: %s (name is not of form _Lxx or _Exx)",
+ name));
return_ACPI_STATUS(AE_OK);
}
@@ -364,13 +368,14 @@ acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
u32 gpe_number;
acpi_status status;
- ACPI_FUNCTION_TRACE("ev_match_prw_and_gpe");
+ ACPI_FUNCTION_TRACE(ev_match_prw_and_gpe);
/* Check for a _PRW method under this device */
status = acpi_ut_evaluate_object(obj_handle, METHOD_NAME__PRW,
ACPI_BTYPE_PACKAGE, &pkg_desc);
if (ACPI_FAILURE(status)) {
+
/* Ignore all errors from _PRW, we don't want to abort the subsystem */
return_ACPI_STATUS(AE_OK);
@@ -394,6 +399,7 @@ acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
obj_desc = pkg_desc->package.elements[0];
if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) {
+
/* Use FADT-defined GPE device (from definition of _PRW) */
target_gpe_device = acpi_gbl_fadt_gpe_device;
@@ -402,6 +408,7 @@ acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
gpe_number = (u32) obj_desc->integer.value;
} else if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_PACKAGE) {
+
/* Package contains a GPE reference and GPE number within a GPE block */
if ((obj_desc->package.count < 2) ||
@@ -482,7 +489,7 @@ static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
acpi_status status;
acpi_cpu_flags flags;
- ACPI_FUNCTION_TRACE("ev_get_gpe_xrupt_block");
+ ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block);
/* No need for lock since we are not changing any list elements here */
@@ -497,7 +504,7 @@ static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
/* Not found, must allocate a new xrupt descriptor */
- gpe_xrupt = ACPI_MEM_CALLOCATE(sizeof(struct acpi_gpe_xrupt_info));
+ gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info));
if (!gpe_xrupt) {
return_PTR(NULL);
}
@@ -556,7 +563,7 @@ acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
acpi_status status;
acpi_cpu_flags flags;
- ACPI_FUNCTION_TRACE("ev_delete_gpe_xrupt");
+ ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt);
/* We never want to remove the SCI interrupt handler */
@@ -588,7 +595,7 @@ acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
/* Free the block */
- ACPI_MEM_FREE(gpe_xrupt);
+ ACPI_FREE(gpe_xrupt);
return_ACPI_STATUS(AE_OK);
}
@@ -614,7 +621,7 @@ acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
acpi_status status;
acpi_cpu_flags flags;
- ACPI_FUNCTION_TRACE("ev_install_gpe_block");
+ ACPI_FUNCTION_TRACE(ev_install_gpe_block);
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
if (ACPI_FAILURE(status)) {
@@ -667,7 +674,7 @@ acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
acpi_status status;
acpi_cpu_flags flags;
- ACPI_FUNCTION_TRACE("ev_install_gpe_block");
+ ACPI_FUNCTION_TRACE(ev_install_gpe_block);
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
if (ACPI_FAILURE(status)) {
@@ -679,6 +686,7 @@ acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
status = acpi_hw_disable_gpe_block(gpe_block->xrupt_block, gpe_block);
if (!gpe_block->previous && !gpe_block->next) {
+
/* This is the last gpe_block on this interrupt */
status = acpi_ev_delete_gpe_xrupt(gpe_block->xrupt_block);
@@ -704,9 +712,9 @@ acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
/* Free the gpe_block */
- ACPI_MEM_FREE(gpe_block->register_info);
- ACPI_MEM_FREE(gpe_block->event_info);
- ACPI_MEM_FREE(gpe_block);
+ ACPI_FREE(gpe_block->register_info);
+ ACPI_FREE(gpe_block->event_info);
+ ACPI_FREE(gpe_block);
unlock_and_exit:
status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
@@ -736,17 +744,17 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
acpi_native_uint j;
acpi_status status;
- ACPI_FUNCTION_TRACE("ev_create_gpe_info_blocks");
+ ACPI_FUNCTION_TRACE(ev_create_gpe_info_blocks);
/* Allocate the GPE register information block */
- gpe_register_info = ACPI_MEM_CALLOCATE((acpi_size) gpe_block->
- register_count *
- sizeof(struct
- acpi_gpe_register_info));
+ gpe_register_info = ACPI_ALLOCATE_ZEROED((acpi_size) gpe_block->
+ register_count *
+ sizeof(struct
+ acpi_gpe_register_info));
if (!gpe_register_info) {
ACPI_ERROR((AE_INFO,
- "Could not allocate the gpe_register_info table"));
+ "Could not allocate the GpeRegisterInfo table"));
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -754,13 +762,14 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
* Allocate the GPE event_info block. There are eight distinct GPEs
* per register. Initialization to zeros is sufficient.
*/
- gpe_event_info = ACPI_MEM_CALLOCATE(((acpi_size) gpe_block->
- register_count *
- ACPI_GPE_REGISTER_WIDTH) *
- sizeof(struct acpi_gpe_event_info));
+ gpe_event_info = ACPI_ALLOCATE_ZEROED(((acpi_size) gpe_block->
+ register_count *
+ ACPI_GPE_REGISTER_WIDTH) *
+ sizeof(struct
+ acpi_gpe_event_info));
if (!gpe_event_info) {
ACPI_ERROR((AE_INFO,
- "Could not allocate the gpe_event_info table"));
+ "Could not allocate the GpeEventInfo table"));
status = AE_NO_MEMORY;
goto error_exit;
}
@@ -780,6 +789,7 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
this_event = gpe_event_info;
for (i = 0; i < gpe_block->register_count; i++) {
+
/* Init the register_info for this GPE register (8 GPEs) */
this_register->base_gpe_number =
@@ -839,10 +849,10 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
error_exit:
if (gpe_register_info) {
- ACPI_MEM_FREE(gpe_register_info);
+ ACPI_FREE(gpe_register_info);
}
if (gpe_event_info) {
- ACPI_MEM_FREE(gpe_event_info);
+ ACPI_FREE(gpe_event_info);
}
return_ACPI_STATUS(status);
@@ -878,7 +888,7 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
acpi_status status;
struct acpi_gpe_block_info *gpe_block;
- ACPI_FUNCTION_TRACE("ev_create_gpe_block");
+ ACPI_FUNCTION_TRACE(ev_create_gpe_block);
if (!register_count) {
return_ACPI_STATUS(AE_OK);
@@ -886,7 +896,7 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
/* Allocate a new GPE block */
- gpe_block = ACPI_MEM_CALLOCATE(sizeof(struct acpi_gpe_block_info));
+ gpe_block = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_block_info));
if (!gpe_block) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -906,7 +916,7 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
*/
status = acpi_ev_create_gpe_info_blocks(gpe_block);
if (ACPI_FAILURE(status)) {
- ACPI_MEM_FREE(gpe_block);
+ ACPI_FREE(gpe_block);
return_ACPI_STATUS(status);
}
@@ -914,7 +924,7 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
status = acpi_ev_install_gpe_block(gpe_block, interrupt_number);
if (ACPI_FAILURE(status)) {
- ACPI_MEM_FREE(gpe_block);
+ ACPI_FREE(gpe_block);
return_ACPI_STATUS(status);
}
@@ -971,7 +981,7 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
acpi_native_uint i;
acpi_native_uint j;
- ACPI_FUNCTION_TRACE("ev_initialize_gpe_block");
+ ACPI_FUNCTION_TRACE(ev_initialize_gpe_block);
/* Ignore a null GPE block (e.g., if no GPE block 1 exists) */
@@ -1013,6 +1023,7 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
for (i = 0; i < gpe_block->register_count; i++) {
for (j = 0; j < 8; j++) {
+
/* Get the info block for this particular GPE */
gpe_event_info =
@@ -1040,7 +1051,7 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
status = acpi_hw_enable_runtime_gpe_block(NULL, gpe_block);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR((AE_INFO, "Could not enable GPEs in gpe_block %p",
+ ACPI_ERROR((AE_INFO, "Could not enable GPEs in GpeBlock %p",
gpe_block));
}
@@ -1066,7 +1077,7 @@ acpi_status acpi_ev_gpe_initialize(void)
u32 gpe_number_max = 0;
acpi_status status;
- ACPI_FUNCTION_TRACE("ev_gpe_initialize");
+ ACPI_FUNCTION_TRACE(ev_gpe_initialize);
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
@@ -1099,6 +1110,7 @@ acpi_status acpi_ev_gpe_initialize(void)
* particular block is not supported.
*/
if (acpi_gbl_FADT->gpe0_blk_len && acpi_gbl_FADT->xgpe0_blk.address) {
+
/* GPE block 0 exists (has both length and address > 0) */
register_count0 = (u16) (acpi_gbl_FADT->gpe0_blk_len / 2);
@@ -1121,6 +1133,7 @@ acpi_status acpi_ev_gpe_initialize(void)
}
if (acpi_gbl_FADT->gpe1_blk_len && acpi_gbl_FADT->xgpe1_blk.address) {
+
/* GPE block 1 exists (has both length and address > 0) */
register_count1 = (u16) (acpi_gbl_FADT->gpe1_blk_len / 2);
@@ -1168,6 +1181,7 @@ acpi_status acpi_ev_gpe_initialize(void)
/* Exit if there are no GPE registers */
if ((register_count0 + register_count1) == 0) {
+
/* GPEs are not required by ACPI, this is OK */
ACPI_DEBUG_PRINT((ACPI_DB_INIT,
diff --git a/drivers/acpi/events/evmisc.c b/drivers/acpi/events/evmisc.c
index 0909ba69577..6eef4efddcf 100644
--- a/drivers/acpi/events/evmisc.c
+++ b/drivers/acpi/events/evmisc.c
@@ -49,12 +49,13 @@
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evmisc")
+/* Names for Notify() values, used for debug output */
#ifdef ACPI_DEBUG_OUTPUT
static const char *acpi_notify_value_names[] = {
"Bus Check",
"Device Check",
"Device Wake",
- "Eject request",
+ "Eject Request",
"Device Check Light",
"Frequency Mismatch",
"Bus Mode Mismatch",
@@ -124,7 +125,7 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
union acpi_generic_state *notify_info;
acpi_status status = AE_OK;
- ACPI_FUNCTION_NAME("ev_queue_notify_request");
+ ACPI_FUNCTION_NAME(ev_queue_notify_request);
/*
* For value 3 (Ejection Request), some device method may need to be run.
@@ -150,6 +151,7 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
obj_desc = acpi_ns_get_attached_object(node);
if (obj_desc) {
+
/* We have the notify object, Get the right handler */
switch (node->type) {
@@ -184,14 +186,15 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
return (AE_NO_MEMORY);
}
- notify_info->common.data_type = ACPI_DESC_TYPE_STATE_NOTIFY;
+ notify_info->common.descriptor_type =
+ ACPI_DESC_TYPE_STATE_NOTIFY;
notify_info->notify.node = node;
notify_info->notify.value = (u16) notify_value;
notify_info->notify.handler_obj = handler_obj;
- status = acpi_os_queue_for_execution(OSD_PRIORITY_HIGH,
- acpi_ev_notify_dispatch,
- notify_info);
+ status =
+ acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_notify_dispatch,
+ notify_info);
if (ACPI_FAILURE(status)) {
acpi_ut_delete_generic_state(notify_info);
}
@@ -240,6 +243,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context)
* to the device.
*/
if (notify_info->notify.value <= ACPI_MAX_SYS_NOTIFY) {
+
/* Global system notification handler */
if (acpi_gbl_system_notify.handler) {
@@ -297,6 +301,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_global_lock_thread(void *context)
/* Signal threads that are waiting for the lock */
if (acpi_gbl_global_lock_thread_count) {
+
/* Send sufficient units to the semaphore */
status =
@@ -335,15 +340,16 @@ static u32 acpi_ev_global_lock_handler(void *context)
*/
ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_common_fACS.global_lock, acquired);
if (acquired) {
+
/* Got the lock, now wake all threads waiting for it */
acpi_gbl_global_lock_acquired = TRUE;
/* Run the Global Lock thread which will signal all waiting threads */
- status = acpi_os_queue_for_execution(OSD_PRIORITY_HIGH,
- acpi_ev_global_lock_thread,
- context);
+ status =
+ acpi_os_execute(OSL_GLOBAL_LOCK_HANDLER,
+ acpi_ev_global_lock_thread, context);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Could not queue Global Lock thread"));
@@ -371,7 +377,7 @@ acpi_status acpi_ev_init_global_lock_handler(void)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ev_init_global_lock_handler");
+ ACPI_FUNCTION_TRACE(ev_init_global_lock_handler);
acpi_gbl_global_lock_present = TRUE;
status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
@@ -413,7 +419,7 @@ acpi_status acpi_ev_acquire_global_lock(u16 timeout)
acpi_status status = AE_OK;
u8 acquired = FALSE;
- ACPI_FUNCTION_TRACE("ev_acquire_global_lock");
+ ACPI_FUNCTION_TRACE(ev_acquire_global_lock);
#ifndef ACPI_APPLICATION
/* Make sure that we actually have a global lock */
@@ -439,6 +445,7 @@ acpi_status acpi_ev_acquire_global_lock(u16 timeout)
ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_common_fACS.global_lock, acquired);
if (acquired) {
+
/* We got the lock */
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
@@ -458,8 +465,9 @@ acpi_status acpi_ev_acquire_global_lock(u16 timeout)
* Acquire the global lock semaphore first.
* Since this wait will block, we must release the interpreter
*/
- status = acpi_ex_system_wait_semaphore(acpi_gbl_global_lock_semaphore,
- timeout);
+ status =
+ acpi_ex_system_wait_semaphore(acpi_gbl_global_lock_semaphore,
+ timeout);
return_ACPI_STATUS(status);
}
@@ -480,7 +488,7 @@ acpi_status acpi_ev_release_global_lock(void)
u8 pending = FALSE;
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("ev_release_global_lock");
+ ACPI_FUNCTION_TRACE(ev_release_global_lock);
if (!acpi_gbl_global_lock_thread_count) {
ACPI_WARNING((AE_INFO,
@@ -492,6 +500,7 @@ acpi_status acpi_ev_release_global_lock(void)
acpi_gbl_global_lock_thread_count--;
if (acpi_gbl_global_lock_thread_count) {
+
/* There are still some threads holding the lock, cannot release */
return_ACPI_STATUS(AE_OK);
@@ -533,7 +542,7 @@ void acpi_ev_terminate(void)
acpi_native_uint i;
acpi_status status;
- ACPI_FUNCTION_TRACE("ev_terminate");
+ ACPI_FUNCTION_TRACE(ev_terminate);
if (acpi_gbl_events_initialized) {
/*
@@ -573,7 +582,7 @@ void acpi_ev_terminate(void)
if (acpi_gbl_original_mode == ACPI_SYS_MODE_LEGACY) {
status = acpi_disable();
if (ACPI_FAILURE(status)) {
- ACPI_WARNING((AE_INFO, "acpi_disable failed"));
+ ACPI_WARNING((AE_INFO, "AcpiDisable failed"));
}
}
return_VOID;
diff --git a/drivers/acpi/events/evregion.c b/drivers/acpi/events/evregion.c
index 6da58e77641..094a17e4c86 100644
--- a/drivers/acpi/events/evregion.c
+++ b/drivers/acpi/events/evregion.c
@@ -83,7 +83,7 @@ acpi_status acpi_ev_install_region_handlers(void)
acpi_status status;
acpi_native_uint i;
- ACPI_FUNCTION_TRACE("ev_install_region_handlers");
+ ACPI_FUNCTION_TRACE(ev_install_region_handlers);
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
@@ -153,7 +153,7 @@ acpi_status acpi_ev_initialize_op_regions(void)
acpi_status status;
acpi_native_uint i;
- ACPI_FUNCTION_TRACE("ev_initialize_op_regions");
+ ACPI_FUNCTION_TRACE(ev_initialize_op_regions);
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
@@ -164,6 +164,7 @@ acpi_status acpi_ev_initialize_op_regions(void)
* Run the _REG methods for op_regions in each default address space
*/
for (i = 0; i < ACPI_NUM_DEFAULT_SPACES; i++) {
+
/* TBD: Make sure handler is the DEFAULT handler, otherwise
* _REG will have already been run.
*/
@@ -192,12 +193,12 @@ acpi_status acpi_ev_initialize_op_regions(void)
acpi_status
acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
{
- struct acpi_parameter_info info;
- union acpi_operand_object *params[3];
+ struct acpi_evaluate_info *info;
+ union acpi_operand_object *args[3];
union acpi_operand_object *region_obj2;
acpi_status status;
- ACPI_FUNCTION_TRACE("ev_execute_reg_method");
+ ACPI_FUNCTION_TRACE(ev_execute_reg_method);
region_obj2 = acpi_ns_get_secondary_object(region_obj);
if (!region_obj2) {
@@ -208,48 +209,60 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
return_ACPI_STATUS(AE_OK);
}
+ /* Allocate and initialize the evaluation information block */
+
+ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+ if (!info) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ info->prefix_node = region_obj2->extra.method_REG;
+ info->pathname = NULL;
+ info->parameters = args;
+ info->parameter_type = ACPI_PARAM_ARGS;
+ info->flags = ACPI_IGNORE_RETURN_VALUE;
+
/*
* The _REG method has two arguments:
*
- * Arg0, Integer: Operation region space ID
- * Same value as region_obj->Region.space_id
- * Arg1, Integer: connection status
- * 1 for connecting the handler,
- * 0 for disconnecting the handler
- * Passed as a parameter
+ * Arg0 - Integer:
+ * Operation region space ID Same value as region_obj->Region.space_id
+ *
+ * Arg1 - Integer:
+ * connection status 1 for connecting the handler, 0 for disconnecting
+ * the handler (Passed as a parameter)
*/
- params[0] = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
- if (!params[0]) {
- return_ACPI_STATUS(AE_NO_MEMORY);
+ args[0] = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+ if (!args[0]) {
+ status = AE_NO_MEMORY;
+ goto cleanup1;
}
- params[1] = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
- if (!params[1]) {
+ args[1] = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+ if (!args[1]) {
status = AE_NO_MEMORY;
- goto cleanup;
+ goto cleanup2;
}
/* Setup the parameter objects */
- params[0]->integer.value = region_obj->region.space_id;
- params[1]->integer.value = function;
- params[2] = NULL;
-
- info.node = region_obj2->extra.method_REG;
- info.parameters = params;
- info.parameter_type = ACPI_PARAM_ARGS;
+ args[0]->integer.value = region_obj->region.space_id;
+ args[1]->integer.value = function;
+ args[2] = NULL;
/* Execute the method, no return value */
ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
- (ACPI_TYPE_METHOD, info.node, NULL));
- status = acpi_ns_evaluate_by_handle(&info);
+ (ACPI_TYPE_METHOD, info->prefix_node, NULL));
- acpi_ut_remove_reference(params[1]);
+ status = acpi_ns_evaluate(info);
+ acpi_ut_remove_reference(args[1]);
- cleanup:
- acpi_ut_remove_reference(params[0]);
+ cleanup2:
+ acpi_ut_remove_reference(args[0]);
+ cleanup1:
+ ACPI_FREE(info);
return_ACPI_STATUS(status);
}
@@ -261,7 +274,8 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
* Function - Read or Write operation
* Address - Where in the space to read or write
* bit_width - Field width in bits (8, 16, 32, or 64)
- * Value - Pointer to in or out value
+ * Value - Pointer to in or out value, must be
+ * full 64-bit acpi_integer
*
* RETURN: Status
*
@@ -274,7 +288,7 @@ acpi_status
acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
u32 function,
acpi_physical_address address,
- u32 bit_width, void *value)
+ u32 bit_width, acpi_integer * value)
{
acpi_status status;
acpi_status status2;
@@ -284,7 +298,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
union acpi_operand_object *region_obj2;
void *region_context = NULL;
- ACPI_FUNCTION_TRACE("ev_address_space_dispatch");
+ ACPI_FUNCTION_TRACE(ev_address_space_dispatch);
region_obj2 = acpi_ns_get_secondary_object(region_obj);
if (!region_obj2) {
@@ -315,6 +329,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
*/
region_setup = handler_desc->address_space.setup;
if (!region_setup) {
+
/* No initialization routine, exit with error */
ACPI_ERROR((AE_INFO,
@@ -361,9 +376,10 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
region_obj->region.flags |= AOPOBJ_SETUP_COMPLETE;
if (region_obj2->extra.region_context) {
+
/* The handler for this region was already installed */
- ACPI_MEM_FREE(region_context);
+ ACPI_FREE(region_context);
} else {
/*
* Save the returned context for use in all accesses to
@@ -386,9 +402,8 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
acpi_ut_get_region_name(region_obj->region.
space_id)));
- if (!
- (handler_desc->address_space.
- hflags & ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
+ if (!(handler_desc->address_space.handler_flags &
+ ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
/*
* For handlers other than the default (supplied) handlers, we must
* exit the interpreter because the handler *might* block -- we don't
@@ -409,9 +424,8 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
space_id)));
}
- if (!
- (handler_desc->address_space.
- hflags & ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
+ if (!(handler_desc->address_space.handler_flags &
+ ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
/*
* We just returned from a non-default handler, we must re-enter the
* interpreter
@@ -451,7 +465,7 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
union acpi_operand_object *region_obj2;
acpi_status status;
- ACPI_FUNCTION_TRACE("ev_detach_region");
+ ACPI_FUNCTION_TRACE(ev_detach_region);
region_obj2 = acpi_ns_get_secondary_object(region_obj);
if (!region_obj2) {
@@ -463,6 +477,7 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
handler_obj = region_obj->region.handler;
if (!handler_obj) {
+
/* This region has no handler, all done */
return_VOID;
@@ -474,6 +489,7 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
last_obj_ptr = &handler_obj->address_space.region_list;
while (obj_desc) {
+
/* Is this the correct Region? */
if (obj_desc == region_obj) {
@@ -583,7 +599,7 @@ acpi_ev_attach_region(union acpi_operand_object *handler_obj,
u8 acpi_ns_is_locked)
{
- ACPI_FUNCTION_TRACE("ev_attach_region");
+ ACPI_FUNCTION_TRACE(ev_attach_region);
ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
"Adding Region [%4.4s] %p to address handler %p [%s]\n",
@@ -636,7 +652,7 @@ acpi_ev_install_handler(acpi_handle obj_handle,
struct acpi_namespace_node *node;
acpi_status status;
- ACPI_FUNCTION_NAME("ev_install_handler");
+ ACPI_FUNCTION_NAME(ev_install_handler);
handler_obj = (union acpi_operand_object *)context;
@@ -666,6 +682,7 @@ acpi_ev_install_handler(acpi_handle obj_handle,
obj_desc = acpi_ns_get_attached_object(node);
if (!obj_desc) {
+
/* No object, just exit */
return (AE_OK);
@@ -674,10 +691,12 @@ acpi_ev_install_handler(acpi_handle obj_handle,
/* Devices are handled different than regions */
if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_DEVICE) {
+
/* Check if this Device already has a handler for this address space */
next_handler_obj = obj_desc->device.handler;
while (next_handler_obj) {
+
/* Found a handler, is it for the same address space? */
if (next_handler_obj->address_space.space_id ==
@@ -764,9 +783,9 @@ acpi_ev_install_space_handler(struct acpi_namespace_node * node,
union acpi_operand_object *handler_obj;
acpi_status status;
acpi_object_type type;
- u16 flags = 0;
+ u8 flags = 0;
- ACPI_FUNCTION_TRACE("ev_install_space_handler");
+ ACPI_FUNCTION_TRACE(ev_install_space_handler);
/*
* This registration is valid for only the types below
@@ -839,6 +858,7 @@ acpi_ev_install_space_handler(struct acpi_namespace_node * node,
/* Walk the handler list for this device */
while (handler_obj) {
+
/* Same space_id indicates a handler already installed */
if (handler_obj->address_space.space_id == space_id) {
@@ -921,7 +941,7 @@ acpi_ev_install_space_handler(struct acpi_namespace_node * node,
/* Init handler obj */
handler_obj->address_space.space_id = (u8) space_id;
- handler_obj->address_space.hflags = flags;
+ handler_obj->address_space.handler_flags = flags;
handler_obj->address_space.region_list = NULL;
handler_obj->address_space.node = node;
handler_obj->address_space.handler = handler;
@@ -979,7 +999,7 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ev_execute_reg_methods");
+ ACPI_FUNCTION_TRACE(ev_execute_reg_methods);
/*
* Run all _REG methods for all Operation Regions for this
@@ -1001,7 +1021,7 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
*
* PARAMETERS: walk_namespace callback
*
- * DESCRIPTION: Run _REg method for region objects of the requested space_iD
+ * DESCRIPTION: Run _REG method for region objects of the requested space_iD
*
******************************************************************************/
@@ -1035,6 +1055,7 @@ acpi_ev_reg_run(acpi_handle obj_handle,
obj_desc = acpi_ns_get_attached_object(node);
if (!obj_desc) {
+
/* No object, just exit */
return (AE_OK);
diff --git a/drivers/acpi/events/evrgnini.c b/drivers/acpi/events/evrgnini.c
index baed8c1a1b9..5b3c7a85eb9 100644
--- a/drivers/acpi/events/evrgnini.c
+++ b/drivers/acpi/events/evrgnini.c
@@ -71,11 +71,22 @@ acpi_ev_system_memory_region_setup(acpi_handle handle,
(union acpi_operand_object *)handle;
struct acpi_mem_space_context *local_region_context;
- ACPI_FUNCTION_TRACE("ev_system_memory_region_setup");
+ ACPI_FUNCTION_TRACE(ev_system_memory_region_setup);
if (function == ACPI_REGION_DEACTIVATE) {
if (*region_context) {
- ACPI_MEM_FREE(*region_context);
+ local_region_context =
+ (struct acpi_mem_space_context *)*region_context;
+
+ /* Delete a cached mapping if present */
+
+ if (local_region_context->mapped_length) {
+ acpi_os_unmap_memory(local_region_context->
+ mapped_logical_address,
+ local_region_context->
+ mapped_length);
+ }
+ ACPI_FREE(local_region_context);
*region_context = NULL;
}
return_ACPI_STATUS(AE_OK);
@@ -84,7 +95,7 @@ acpi_ev_system_memory_region_setup(acpi_handle handle,
/* Create a new context */
local_region_context =
- ACPI_MEM_CALLOCATE(sizeof(struct acpi_mem_space_context));
+ ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_mem_space_context));
if (!(local_region_context)) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -118,7 +129,7 @@ acpi_ev_io_space_region_setup(acpi_handle handle,
u32 function,
void *handler_context, void **region_context)
{
- ACPI_FUNCTION_TRACE("ev_io_space_region_setup");
+ ACPI_FUNCTION_TRACE(ev_io_space_region_setup);
if (function == ACPI_REGION_DEACTIVATE) {
*region_context = NULL;
@@ -161,7 +172,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
(union acpi_operand_object *)handle;
struct acpi_device_id object_hID;
- ACPI_FUNCTION_TRACE("ev_pci_config_region_setup");
+ ACPI_FUNCTION_TRACE(ev_pci_config_region_setup);
handler_obj = region_obj->region.handler;
if (!handler_obj) {
@@ -178,7 +189,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
*region_context = NULL;
if (function == ACPI_REGION_DEACTIVATE) {
if (pci_id) {
- ACPI_MEM_FREE(pci_id);
+ ACPI_FREE(pci_id);
}
return_ACPI_STATUS(status);
}
@@ -199,6 +210,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
* handlers with that device.
*/
if (handler_obj->address_space.node == acpi_gbl_root_node) {
+
/* Start search from the parent object */
pci_root_node = parent_node;
@@ -220,6 +232,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
PCI_EXPRESS_ROOT_HID_STRING,
sizeof(PCI_EXPRESS_ROOT_HID_STRING)))))
{
+
/* Install a handler for this PCI root bridge */
status =
@@ -235,7 +248,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
} else {
ACPI_EXCEPTION((AE_INFO,
status,
- "Could not install pci_config handler for Root Bridge %4.4s",
+ "Could not install PciConfig handler for Root Bridge %4.4s",
acpi_ut_get_node_name
(pci_root_node)));
}
@@ -262,7 +275,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
/* Region is still not initialized. Create a new context */
- pci_id = ACPI_MEM_CALLOCATE(sizeof(struct acpi_pci_id));
+ pci_id = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pci_id));
if (!pci_id) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -337,7 +350,7 @@ acpi_ev_pci_bar_region_setup(acpi_handle handle,
u32 function,
void *handler_context, void **region_context)
{
- ACPI_FUNCTION_TRACE("ev_pci_bar_region_setup");
+ ACPI_FUNCTION_TRACE(ev_pci_bar_region_setup);
return_ACPI_STATUS(AE_OK);
}
@@ -364,7 +377,7 @@ acpi_ev_cmos_region_setup(acpi_handle handle,
u32 function,
void *handler_context, void **region_context)
{
- ACPI_FUNCTION_TRACE("ev_cmos_region_setup");
+ ACPI_FUNCTION_TRACE(ev_cmos_region_setup);
return_ACPI_STATUS(AE_OK);
}
@@ -389,7 +402,7 @@ acpi_ev_default_region_setup(acpi_handle handle,
u32 function,
void *handler_context, void **region_context)
{
- ACPI_FUNCTION_TRACE("ev_default_region_setup");
+ ACPI_FUNCTION_TRACE(ev_default_region_setup);
if (function == ACPI_REGION_DEACTIVATE) {
*region_context = NULL;
@@ -435,7 +448,7 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
acpi_name *reg_name_ptr = (acpi_name *) METHOD_NAME__REG;
union acpi_operand_object *region_obj2;
- ACPI_FUNCTION_TRACE_U32("ev_initialize_region", acpi_ns_locked);
+ ACPI_FUNCTION_TRACE_U32(ev_initialize_region, acpi_ns_locked);
if (!region_obj) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -462,8 +475,9 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
/* Find any "_REG" method associated with this region definition */
- status = acpi_ns_search_node(*reg_name_ptr, node,
- ACPI_TYPE_METHOD, &method_node);
+ status =
+ acpi_ns_search_one_scope(*reg_name_ptr, node, ACPI_TYPE_METHOD,
+ &method_node);
if (ACPI_SUCCESS(status)) {
/*
* The _REG method is optional and there can be only one per region
@@ -478,11 +492,13 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
* ie: acpi_gbl_root_node->parent_entry being set to NULL
*/
while (node) {
+
/* Check to see if a handler exists */
handler_obj = NULL;
obj_desc = acpi_ns_get_attached_object(node);
if (obj_desc) {
+
/* Can only be a handler if the object exists */
switch (node->type) {
@@ -507,10 +523,12 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
}
while (handler_obj) {
+
/* Is this handler of the correct type? */
if (handler_obj->address_space.space_id ==
space_id) {
+
/* Found correct handler */
ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
@@ -571,7 +589,7 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
/* If we get here, there is no handler for this region */
ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
- "No handler for region_type %s(%X) (region_obj %p)\n",
+ "No handler for RegionType %s(%X) (RegionObj %p)\n",
acpi_ut_get_region_name(space_id), space_id,
region_obj));
diff --git a/drivers/acpi/events/evsci.c b/drivers/acpi/events/evsci.c
index 9a622169008..8106215ad55 100644
--- a/drivers/acpi/events/evsci.c
+++ b/drivers/acpi/events/evsci.c
@@ -69,7 +69,7 @@ static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context)
struct acpi_gpe_xrupt_info *gpe_xrupt_list = context;
u32 interrupt_handled = ACPI_INTERRUPT_NOT_HANDLED;
- ACPI_FUNCTION_TRACE("ev_sci_xrupt_handler");
+ ACPI_FUNCTION_TRACE(ev_sci_xrupt_handler);
/*
* We are guaranteed by the ACPI CA initialization/shutdown code that
@@ -108,7 +108,7 @@ u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context)
struct acpi_gpe_xrupt_info *gpe_xrupt_list = context;
u32 interrupt_handled = ACPI_INTERRUPT_NOT_HANDLED;
- ACPI_FUNCTION_TRACE("ev_gpe_xrupt_handler");
+ ACPI_FUNCTION_TRACE(ev_gpe_xrupt_handler);
/*
* We are guaranteed by the ACPI CA initialization/shutdown code that
@@ -140,7 +140,7 @@ u32 acpi_ev_install_sci_handler(void)
{
u32 status = AE_OK;
- ACPI_FUNCTION_TRACE("ev_install_sci_handler");
+ ACPI_FUNCTION_TRACE(ev_install_sci_handler);
status = acpi_os_install_interrupt_handler((u32) acpi_gbl_FADT->sci_int,
acpi_ev_sci_xrupt_handler,
@@ -171,7 +171,7 @@ acpi_status acpi_ev_remove_sci_handler(void)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ev_remove_sci_handler");
+ ACPI_FUNCTION_TRACE(ev_remove_sci_handler);
/* Just let the OS remove the handler and disable the level */
diff --git a/drivers/acpi/events/evxface.c b/drivers/acpi/events/evxface.c
index b38b39dde54..76c34a66e0e 100644
--- a/drivers/acpi/events/evxface.c
+++ b/drivers/acpi/events/evxface.c
@@ -41,8 +41,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/module.h>
-
#include <acpi/acpi.h>
#include <acpi/acnamesp.h>
#include <acpi/acevents.h>
@@ -68,7 +66,7 @@ acpi_status acpi_install_exception_handler(acpi_exception_handler handler)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("acpi_install_exception_handler");
+ ACPI_FUNCTION_TRACE(acpi_install_exception_handler);
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
if (ACPI_FAILURE(status)) {
@@ -90,6 +88,8 @@ acpi_status acpi_install_exception_handler(acpi_exception_handler handler)
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
}
+
+ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
#endif /* ACPI_FUTURE_USAGE */
/*******************************************************************************
@@ -107,14 +107,13 @@ acpi_status acpi_install_exception_handler(acpi_exception_handler handler)
* event.
*
******************************************************************************/
-
acpi_status
acpi_install_fixed_event_handler(u32 event,
acpi_event_handler handler, void *context)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("acpi_install_fixed_event_handler");
+ ACPI_FUNCTION_TRACE(acpi_install_fixed_event_handler);
/* Parameter validation */
@@ -161,7 +160,7 @@ acpi_install_fixed_event_handler(u32 event,
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_install_fixed_event_handler);
+ACPI_EXPORT_SYMBOL(acpi_install_fixed_event_handler)
/*******************************************************************************
*
@@ -175,13 +174,12 @@ EXPORT_SYMBOL(acpi_install_fixed_event_handler);
* DESCRIPTION: Disables the event and unregisters the event handler.
*
******************************************************************************/
-
acpi_status
acpi_remove_fixed_event_handler(u32 event, acpi_event_handler handler)
{
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("acpi_remove_fixed_event_handler");
+ ACPI_FUNCTION_TRACE(acpi_remove_fixed_event_handler);
/* Parameter validation */
@@ -216,7 +214,7 @@ acpi_remove_fixed_event_handler(u32 event, acpi_event_handler handler)
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_remove_fixed_event_handler);
+ACPI_EXPORT_SYMBOL(acpi_remove_fixed_event_handler)
/*******************************************************************************
*
@@ -235,7 +233,6 @@ EXPORT_SYMBOL(acpi_remove_fixed_event_handler);
* DESCRIPTION: Install a handler for notifies on an ACPI device
*
******************************************************************************/
-
acpi_status
acpi_install_notify_handler(acpi_handle device,
u32 handler_type,
@@ -246,7 +243,7 @@ acpi_install_notify_handler(acpi_handle device,
struct acpi_namespace_node *node;
acpi_status status;
- ACPI_FUNCTION_TRACE("acpi_install_notify_handler");
+ ACPI_FUNCTION_TRACE(acpi_install_notify_handler);
/* Parameter validation */
@@ -275,6 +272,7 @@ acpi_install_notify_handler(acpi_handle device,
* only one <external> global handler can be regsitered (per notify type).
*/
if (device == ACPI_ROOT_OBJECT) {
+
/* Make sure the handler is not already installed */
if (((handler_type & ACPI_SYSTEM_NOTIFY) &&
@@ -317,6 +315,7 @@ acpi_install_notify_handler(acpi_handle device,
obj_desc = acpi_ns_get_attached_object(node);
if (obj_desc) {
+
/* Object exists - make sure there's no handler */
if (((handler_type & ACPI_SYSTEM_NOTIFY) &&
@@ -370,6 +369,7 @@ acpi_install_notify_handler(acpi_handle device,
}
if (handler_type == ACPI_ALL_NOTIFY) {
+
/* Extra ref if installed in both */
acpi_ut_add_reference(notify_obj);
@@ -381,7 +381,7 @@ acpi_install_notify_handler(acpi_handle device,
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_install_notify_handler);
+ACPI_EXPORT_SYMBOL(acpi_install_notify_handler)
/*******************************************************************************
*
@@ -399,7 +399,6 @@ EXPORT_SYMBOL(acpi_install_notify_handler);
* DESCRIPTION: Remove a handler for notifies on an ACPI device
*
******************************************************************************/
-
acpi_status
acpi_remove_notify_handler(acpi_handle device,
u32 handler_type, acpi_notify_handler handler)
@@ -409,7 +408,7 @@ acpi_remove_notify_handler(acpi_handle device,
struct acpi_namespace_node *node;
acpi_status status;
- ACPI_FUNCTION_TRACE("acpi_remove_notify_handler");
+ ACPI_FUNCTION_TRACE(acpi_remove_notify_handler);
/* Parameter validation */
@@ -535,7 +534,7 @@ acpi_remove_notify_handler(acpi_handle device,
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_remove_notify_handler);
+ACPI_EXPORT_SYMBOL(acpi_remove_notify_handler)
/*******************************************************************************
*
@@ -554,7 +553,6 @@ EXPORT_SYMBOL(acpi_remove_notify_handler);
* DESCRIPTION: Install a handler for a General Purpose Event.
*
******************************************************************************/
-
acpi_status
acpi_install_gpe_handler(acpi_handle gpe_device,
u32 gpe_number,
@@ -565,7 +563,7 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
acpi_status status;
acpi_cpu_flags flags;
- ACPI_FUNCTION_TRACE("acpi_install_gpe_handler");
+ ACPI_FUNCTION_TRACE(acpi_install_gpe_handler);
/* Parameter validation */
@@ -596,7 +594,7 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
/* Allocate and init handler object */
- handler = ACPI_MEM_CALLOCATE(sizeof(struct acpi_handler_info));
+ handler = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_handler_info));
if (!handler) {
status = AE_NO_MEMORY;
goto unlock_and_exit;
@@ -630,7 +628,7 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_install_gpe_handler);
+ACPI_EXPORT_SYMBOL(acpi_install_gpe_handler)
/*******************************************************************************
*
@@ -646,7 +644,6 @@ EXPORT_SYMBOL(acpi_install_gpe_handler);
* DESCRIPTION: Remove a handler for a General Purpose acpi_event.
*
******************************************************************************/
-
acpi_status
acpi_remove_gpe_handler(acpi_handle gpe_device,
u32 gpe_number, acpi_event_handler address)
@@ -656,7 +653,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
acpi_status status;
acpi_cpu_flags flags;
- ACPI_FUNCTION_TRACE("acpi_remove_gpe_handler");
+ ACPI_FUNCTION_TRACE(acpi_remove_gpe_handler);
/* Parameter validation */
@@ -724,14 +721,14 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
/* Now we can free the handler object */
- ACPI_MEM_FREE(handler);
+ ACPI_FREE(handler);
unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_remove_gpe_handler);
+ACPI_EXPORT_SYMBOL(acpi_remove_gpe_handler)
/*******************************************************************************
*
@@ -746,7 +743,6 @@ EXPORT_SYMBOL(acpi_remove_gpe_handler);
* DESCRIPTION: Acquire the ACPI Global Lock
*
******************************************************************************/
-
acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle)
{
acpi_status status;
@@ -771,7 +767,7 @@ acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle)
return (status);
}
-EXPORT_SYMBOL(acpi_acquire_global_lock);
+ACPI_EXPORT_SYMBOL(acpi_acquire_global_lock)
/*******************************************************************************
*
@@ -784,7 +780,6 @@ EXPORT_SYMBOL(acpi_acquire_global_lock);
* DESCRIPTION: Release the ACPI Global Lock. The handle must be valid.
*
******************************************************************************/
-
acpi_status acpi_release_global_lock(u32 handle)
{
acpi_status status;
@@ -797,4 +792,4 @@ acpi_status acpi_release_global_lock(u32 handle)
return (status);
}
-EXPORT_SYMBOL(acpi_release_global_lock);
+ACPI_EXPORT_SYMBOL(acpi_release_global_lock)
diff --git a/drivers/acpi/events/evxfevnt.c b/drivers/acpi/events/evxfevnt.c
index ec9ce8429f1..7ebc2efac93 100644
--- a/drivers/acpi/events/evxfevnt.c
+++ b/drivers/acpi/events/evxfevnt.c
@@ -41,8 +41,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/module.h>
-
#include <acpi/acpi.h>
#include <acpi/acevents.h>
#include <acpi/acnamesp.h>
@@ -65,7 +63,7 @@ acpi_status acpi_enable(void)
{
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("acpi_enable");
+ ACPI_FUNCTION_TRACE(acpi_enable);
/* Make sure we have the FADT */
@@ -94,6 +92,8 @@ acpi_status acpi_enable(void)
return_ACPI_STATUS(status);
}
+ACPI_EXPORT_SYMBOL(acpi_enable)
+
/*******************************************************************************
*
* FUNCTION: acpi_disable
@@ -105,12 +105,11 @@ acpi_status acpi_enable(void)
* DESCRIPTION: Transfers the system into LEGACY (non-ACPI) mode.
*
******************************************************************************/
-
acpi_status acpi_disable(void)
{
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("acpi_disable");
+ ACPI_FUNCTION_TRACE(acpi_disable);
if (!acpi_gbl_FADT) {
ACPI_WARNING((AE_INFO, "No FADT information present!"));
@@ -137,6 +136,8 @@ acpi_status acpi_disable(void)
return_ACPI_STATUS(status);
}
+ACPI_EXPORT_SYMBOL(acpi_disable)
+
/*******************************************************************************
*
* FUNCTION: acpi_enable_event
@@ -149,13 +150,12 @@ acpi_status acpi_disable(void)
* DESCRIPTION: Enable an ACPI event (fixed)
*
******************************************************************************/
-
acpi_status acpi_enable_event(u32 event, u32 flags)
{
acpi_status status = AE_OK;
u32 value;
- ACPI_FUNCTION_TRACE("acpi_enable_event");
+ ACPI_FUNCTION_TRACE(acpi_enable_event);
/* Decode the Fixed Event */
@@ -193,7 +193,7 @@ acpi_status acpi_enable_event(u32 event, u32 flags)
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_enable_event);
+ACPI_EXPORT_SYMBOL(acpi_enable_event)
/*******************************************************************************
*
@@ -208,13 +208,12 @@ EXPORT_SYMBOL(acpi_enable_event);
* DESCRIPTION: Set the type of an individual GPE
*
******************************************************************************/
-
acpi_status acpi_set_gpe_type(acpi_handle gpe_device, u32 gpe_number, u8 type)
{
acpi_status status = AE_OK;
struct acpi_gpe_event_info *gpe_event_info;
- ACPI_FUNCTION_TRACE("acpi_set_gpe_type");
+ ACPI_FUNCTION_TRACE(acpi_set_gpe_type);
/* Ensure that we have a valid GPE number */
@@ -236,7 +235,7 @@ acpi_status acpi_set_gpe_type(acpi_handle gpe_device, u32 gpe_number, u8 type)
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_set_gpe_type);
+ACPI_EXPORT_SYMBOL(acpi_set_gpe_type)
/*******************************************************************************
*
@@ -252,13 +251,12 @@ EXPORT_SYMBOL(acpi_set_gpe_type);
* DESCRIPTION: Enable an ACPI event (general purpose)
*
******************************************************************************/
-
acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
{
acpi_status status = AE_OK;
struct acpi_gpe_event_info *gpe_event_info;
- ACPI_FUNCTION_TRACE("acpi_enable_gpe");
+ ACPI_FUNCTION_TRACE(acpi_enable_gpe);
/* Use semaphore lock if not executing at interrupt level */
@@ -288,7 +286,7 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_enable_gpe);
+ACPI_EXPORT_SYMBOL(acpi_enable_gpe)
/*******************************************************************************
*
@@ -304,13 +302,12 @@ EXPORT_SYMBOL(acpi_enable_gpe);
* DESCRIPTION: Disable an ACPI event (general purpose)
*
******************************************************************************/
-
acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
{
acpi_status status = AE_OK;
struct acpi_gpe_event_info *gpe_event_info;
- ACPI_FUNCTION_TRACE("acpi_disable_gpe");
+ ACPI_FUNCTION_TRACE(acpi_disable_gpe);
/* Use semaphore lock if not executing at interrupt level */
@@ -338,6 +335,8 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
return_ACPI_STATUS(status);
}
+ACPI_EXPORT_SYMBOL(acpi_disable_gpe)
+
/*******************************************************************************
*
* FUNCTION: acpi_disable_event
@@ -350,13 +349,12 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
* DESCRIPTION: Disable an ACPI event (fixed)
*
******************************************************************************/
-
acpi_status acpi_disable_event(u32 event, u32 flags)
{
acpi_status status = AE_OK;
u32 value;
- ACPI_FUNCTION_TRACE("acpi_disable_event");
+ ACPI_FUNCTION_TRACE(acpi_disable_event);
/* Decode the Fixed Event */
@@ -392,7 +390,7 @@ acpi_status acpi_disable_event(u32 event, u32 flags)
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_disable_event);
+ACPI_EXPORT_SYMBOL(acpi_disable_event)
/*******************************************************************************
*
@@ -405,12 +403,11 @@ EXPORT_SYMBOL(acpi_disable_event);
* DESCRIPTION: Clear an ACPI event (fixed)
*
******************************************************************************/
-
acpi_status acpi_clear_event(u32 event)
{
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("acpi_clear_event");
+ ACPI_FUNCTION_TRACE(acpi_clear_event);
/* Decode the Fixed Event */
@@ -429,7 +426,7 @@ acpi_status acpi_clear_event(u32 event)
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_clear_event);
+ACPI_EXPORT_SYMBOL(acpi_clear_event)
/*******************************************************************************
*
@@ -444,13 +441,12 @@ EXPORT_SYMBOL(acpi_clear_event);
* DESCRIPTION: Clear an ACPI event (general purpose)
*
******************************************************************************/
-
acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
{
acpi_status status = AE_OK;
struct acpi_gpe_event_info *gpe_event_info;
- ACPI_FUNCTION_TRACE("acpi_clear_gpe");
+ ACPI_FUNCTION_TRACE(acpi_clear_gpe);
/* Use semaphore lock if not executing at interrupt level */
@@ -478,6 +474,8 @@ acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
return_ACPI_STATUS(status);
}
+ACPI_EXPORT_SYMBOL(acpi_clear_gpe)
+
#ifdef ACPI_FUTURE_USAGE
/*******************************************************************************
*
@@ -492,12 +490,11 @@ acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
* DESCRIPTION: Obtains and returns the current status of the event
*
******************************************************************************/
-
acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status)
{
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("acpi_get_event_status");
+ ACPI_FUNCTION_TRACE(acpi_get_event_status);
if (!event_status) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -518,6 +515,8 @@ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status)
return_ACPI_STATUS(status);
}
+ACPI_EXPORT_SYMBOL(acpi_get_event_status)
+
/*******************************************************************************
*
* FUNCTION: acpi_get_gpe_status
@@ -533,7 +532,6 @@ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status)
* DESCRIPTION: Get status of an event (general purpose)
*
******************************************************************************/
-
acpi_status
acpi_get_gpe_status(acpi_handle gpe_device,
u32 gpe_number, u32 flags, acpi_event_status * event_status)
@@ -541,7 +539,7 @@ acpi_get_gpe_status(acpi_handle gpe_device,
acpi_status status = AE_OK;
struct acpi_gpe_event_info *gpe_event_info;
- ACPI_FUNCTION_TRACE("acpi_get_gpe_status");
+ ACPI_FUNCTION_TRACE(acpi_get_gpe_status);
/* Use semaphore lock if not executing at interrupt level */
@@ -570,6 +568,8 @@ acpi_get_gpe_status(acpi_handle gpe_device,
}
return_ACPI_STATUS(status);
}
+
+ACPI_EXPORT_SYMBOL(acpi_get_gpe_status)
#endif /* ACPI_FUTURE_USAGE */
/*******************************************************************************
@@ -586,7 +586,6 @@ acpi_get_gpe_status(acpi_handle gpe_device,
* DESCRIPTION: Create and Install a block of GPE registers
*
******************************************************************************/
-
acpi_status
acpi_install_gpe_block(acpi_handle gpe_device,
struct acpi_generic_address *gpe_block_address,
@@ -597,7 +596,7 @@ acpi_install_gpe_block(acpi_handle gpe_device,
struct acpi_namespace_node *node;
struct acpi_gpe_block_info *gpe_block;
- ACPI_FUNCTION_TRACE("acpi_install_gpe_block");
+ ACPI_FUNCTION_TRACE(acpi_install_gpe_block);
if ((!gpe_device) || (!gpe_block_address) || (!register_count)) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -636,6 +635,7 @@ acpi_install_gpe_block(acpi_handle gpe_device,
obj_desc = acpi_ns_get_attached_object(node);
if (!obj_desc) {
+
/* No object, create a new one */
obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_DEVICE);
@@ -665,7 +665,7 @@ acpi_install_gpe_block(acpi_handle gpe_device,
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_install_gpe_block);
+ACPI_EXPORT_SYMBOL(acpi_install_gpe_block)
/*******************************************************************************
*
@@ -678,14 +678,13 @@ EXPORT_SYMBOL(acpi_install_gpe_block);
* DESCRIPTION: Remove a previously installed block of GPE registers
*
******************************************************************************/
-
acpi_status acpi_remove_gpe_block(acpi_handle gpe_device)
{
union acpi_operand_object *obj_desc;
acpi_status status;
struct acpi_namespace_node *node;
- ACPI_FUNCTION_TRACE("acpi_remove_gpe_block");
+ ACPI_FUNCTION_TRACE(acpi_remove_gpe_block);
if (!gpe_device) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -721,4 +720,4 @@ acpi_status acpi_remove_gpe_block(acpi_handle gpe_device)
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_remove_gpe_block);
+ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block)
diff --git a/drivers/acpi/events/evxfregn.c b/drivers/acpi/events/evxfregn.c
index abf5caca9ae..e8b86a0baad 100644
--- a/drivers/acpi/events/evxfregn.c
+++ b/drivers/acpi/events/evxfregn.c
@@ -42,8 +42,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/module.h>
-
#include <acpi/acpi.h>
#include <acpi/acnamesp.h>
#include <acpi/acevents.h>
@@ -75,7 +73,7 @@ acpi_install_address_space_handler(acpi_handle device,
struct acpi_namespace_node *node;
acpi_status status;
- ACPI_FUNCTION_TRACE("acpi_install_address_space_handler");
+ ACPI_FUNCTION_TRACE(acpi_install_address_space_handler);
/* Parameter validation */
@@ -114,7 +112,7 @@ acpi_install_address_space_handler(acpi_handle device,
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_install_address_space_handler);
+ACPI_EXPORT_SYMBOL(acpi_install_address_space_handler)
/*******************************************************************************
*
@@ -129,7 +127,6 @@ EXPORT_SYMBOL(acpi_install_address_space_handler);
* DESCRIPTION: Remove a previously installed handler.
*
******************************************************************************/
-
acpi_status
acpi_remove_address_space_handler(acpi_handle device,
acpi_adr_space_type space_id,
@@ -142,7 +139,7 @@ acpi_remove_address_space_handler(acpi_handle device,
struct acpi_namespace_node *node;
acpi_status status;
- ACPI_FUNCTION_TRACE("acpi_remove_address_space_handler");
+ ACPI_FUNCTION_TRACE(acpi_remove_address_space_handler);
/* Parameter validation */
@@ -176,9 +173,11 @@ acpi_remove_address_space_handler(acpi_handle device,
handler_obj = obj_desc->device.handler;
last_obj_ptr = &obj_desc->device.handler;
while (handler_obj) {
+
/* We have a handler, see if user requested this one */
if (handler_obj->address_space.space_id == space_id) {
+
/* Matched space_id, first dereference this in the Regions */
ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
@@ -229,7 +228,7 @@ acpi_remove_address_space_handler(acpi_handle device,
/* The handler does not exist */
ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
- "Unable to remove address handler %p for %s(%X), dev_node %p, obj %p\n",
+ "Unable to remove address handler %p for %s(%X), DevNode %p, obj %p\n",
handler, acpi_ut_get_region_name(space_id), space_id,
node, obj_desc));
@@ -240,4 +239,4 @@ acpi_remove_address_space_handler(acpi_handle device,
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_remove_address_space_handler);
+ACPI_EXPORT_SYMBOL(acpi_remove_address_space_handler)
diff --git a/drivers/acpi/executer/exconfig.c b/drivers/acpi/executer/exconfig.c
index a29782fe3ec..823352435e0 100644
--- a/drivers/acpi/executer/exconfig.c
+++ b/drivers/acpi/executer/exconfig.c
@@ -82,7 +82,7 @@ acpi_ex_add_table(struct acpi_table_header *table,
struct acpi_table_desc table_info;
union acpi_operand_object *obj_desc;
- ACPI_FUNCTION_TRACE("ex_add_table");
+ ACPI_FUNCTION_TRACE(ex_add_table);
/* Create an object to be the table handle */
@@ -100,7 +100,7 @@ acpi_ex_add_table(struct acpi_table_header *table,
ACPI_MEMSET(&table_info, 0, sizeof(struct acpi_table_desc));
- table_info.type = ACPI_TABLE_SSDT;
+ table_info.type = ACPI_TABLE_ID_SSDT;
table_info.pointer = table;
table_info.length = (acpi_size) table->length;
table_info.allocation = ACPI_MEM_ALLOCATED;
@@ -110,6 +110,7 @@ acpi_ex_add_table(struct acpi_table_header *table,
if (ACPI_FAILURE(status)) {
if (status == AE_ALREADY_EXISTS) {
+
/* Table already exists, just return the handle */
return_ACPI_STATUS(AE_OK);
@@ -121,6 +122,7 @@ acpi_ex_add_table(struct acpi_table_header *table,
status = acpi_ns_load_table(table_info.installed_desc, parent_node);
if (ACPI_FAILURE(status)) {
+
/* Uninstall table on error */
(void)acpi_tb_uninstall_table(table_info.installed_desc);
@@ -160,7 +162,7 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
struct acpi_namespace_node *parameter_node = NULL;
union acpi_operand_object *ddb_handle;
- ACPI_FUNCTION_TRACE("ex_load_table_op");
+ ACPI_FUNCTION_TRACE(ex_load_table_op);
#if 0
/*
@@ -169,6 +171,7 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
*/
status = acpi_tb_match_signature(operand[0]->string.pointer, NULL);
if (status == AE_OK) {
+
/* Signature matched -- don't allow override */
return_ACPI_STATUS(AE_ALREADY_EXISTS);
@@ -211,9 +214,8 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
* location within the namespace where the table will be loaded.
*/
status =
- acpi_ns_get_node_by_path(operand[3]->string.pointer,
- start_node, ACPI_NS_SEARCH_PARENT,
- &parent_node);
+ acpi_ns_get_node(start_node, operand[3]->string.pointer,
+ ACPI_NS_SEARCH_PARENT, &parent_node);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
@@ -234,9 +236,8 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
/* Find the node referenced by the parameter_path_string */
status =
- acpi_ns_get_node_by_path(operand[4]->string.pointer,
- start_node, ACPI_NS_SEARCH_PARENT,
- &parameter_node);
+ acpi_ns_get_node(start_node, operand[4]->string.pointer,
+ ACPI_NS_SEARCH_PARENT, &parameter_node);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
@@ -252,6 +253,7 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
/* Parameter Data (optional) */
if (parameter_node) {
+
/* Store the parameter data into the optional parameter object */
status = acpi_ex_store(operand[5],
@@ -294,9 +296,10 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
struct acpi_table_header *table_ptr = NULL;
acpi_physical_address address;
struct acpi_table_header table_header;
+ acpi_integer temp;
u32 i;
- ACPI_FUNCTION_TRACE("ex_load_op");
+ ACPI_FUNCTION_TRACE(ex_load_op);
/* Object can be either an op_region or a Field */
@@ -322,7 +325,7 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
address = obj_desc->region.address;
- /* Get the table length from the table header */
+ /* Get part of the table header to get the table length */
table_header.length = 0;
for (i = 0; i < 8; i++) {
@@ -330,11 +333,14 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
acpi_ev_address_space_dispatch(obj_desc, ACPI_READ,
(acpi_physical_address)
(i + address), 8,
- ((u8 *) &
- table_header) + i);
+ &temp);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
+
+ /* Get the one valid byte of the returned 64-bit value */
+
+ ACPI_CAST_PTR(u8, &table_header)[i] = (u8) temp;
}
/* Sanity check the table length */
@@ -345,7 +351,7 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
/* Allocate a buffer for the entire table */
- table_ptr = ACPI_MEM_ALLOCATE(table_header.length);
+ table_ptr = ACPI_ALLOCATE(table_header.length);
if (!table_ptr) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -357,11 +363,14 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
acpi_ev_address_space_dispatch(obj_desc, ACPI_READ,
(acpi_physical_address)
(i + address), 8,
- ((u8 *) table_ptr +
- i));
+ &temp);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
+
+ /* Get the one valid byte of the returned 64-bit value */
+
+ ACPI_CAST_PTR(u8, table_ptr)[i] = (u8) temp;
}
break;
@@ -407,12 +416,8 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
/* The table must be either an SSDT or a PSDT */
- if ((!ACPI_STRNCMP(table_ptr->signature,
- acpi_gbl_table_data[ACPI_TABLE_PSDT].signature,
- acpi_gbl_table_data[ACPI_TABLE_PSDT].sig_length)) &&
- (!ACPI_STRNCMP(table_ptr->signature,
- acpi_gbl_table_data[ACPI_TABLE_SSDT].signature,
- acpi_gbl_table_data[ACPI_TABLE_SSDT].sig_length))) {
+ if ((!ACPI_COMPARE_NAME(table_ptr->signature, PSDT_SIG)) &&
+ (!ACPI_COMPARE_NAME(table_ptr->signature, SSDT_SIG))) {
ACPI_ERROR((AE_INFO,
"Table has invalid signature [%4.4s], must be SSDT or PSDT",
table_ptr->signature));
@@ -424,6 +429,7 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
status = acpi_ex_add_table(table_ptr, acpi_gbl_root_node, &ddb_handle);
if (ACPI_FAILURE(status)) {
+
/* On error, table_ptr was deallocated above */
return_ACPI_STATUS(status);
@@ -442,7 +448,7 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
cleanup:
if (ACPI_FAILURE(status)) {
- ACPI_MEM_FREE(table_ptr);
+ ACPI_FREE(table_ptr);
}
return_ACPI_STATUS(status);
}
@@ -465,7 +471,7 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle)
union acpi_operand_object *table_desc = ddb_handle;
struct acpi_table_desc *table_info;
- ACPI_FUNCTION_TRACE("ex_unload_table");
+ ACPI_FUNCTION_TRACE(ex_unload_table);
/*
* Validate the handle
diff --git a/drivers/acpi/executer/exconvrt.c b/drivers/acpi/executer/exconvrt.c
index e6d52e12d77..b732e399b1e 100644
--- a/drivers/acpi/executer/exconvrt.c
+++ b/drivers/acpi/executer/exconvrt.c
@@ -79,7 +79,7 @@ acpi_ex_convert_to_integer(union acpi_operand_object *obj_desc,
u32 count;
acpi_status status;
- ACPI_FUNCTION_TRACE_PTR("ex_convert_to_integer", obj_desc);
+ ACPI_FUNCTION_TRACE_PTR(ex_convert_to_integer, obj_desc);
switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
case ACPI_TYPE_INTEGER:
@@ -199,7 +199,7 @@ acpi_ex_convert_to_buffer(union acpi_operand_object *obj_desc,
union acpi_operand_object *return_desc;
u8 *new_buf;
- ACPI_FUNCTION_TRACE_PTR("ex_convert_to_buffer", obj_desc);
+ ACPI_FUNCTION_TRACE_PTR(ex_convert_to_buffer, obj_desc);
switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
case ACPI_TYPE_BUFFER:
@@ -319,6 +319,7 @@ acpi_ex_convert_to_ascii(acpi_integer integer,
remainder = 0;
for (i = decimal_length; i > 0; i--) {
+
/* Divide by nth factor of 10 */
digit = integer;
@@ -346,6 +347,7 @@ acpi_ex_convert_to_ascii(acpi_integer integer,
hex_length = (acpi_native_uint) ACPI_MUL_2(data_width);
for (i = 0, j = (hex_length - 1); i < hex_length; i++, j--) {
+
/* Get one hex digit, most significant digits first */
string[k] =
@@ -400,7 +402,7 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
u16 base = 16;
u8 separator = ',';
- ACPI_FUNCTION_TRACE_PTR("ex_convert_to_string", obj_desc);
+ ACPI_FUNCTION_TRACE_PTR(ex_convert_to_string, obj_desc);
switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
case ACPI_TYPE_STRING:
@@ -567,7 +569,7 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type,
{
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("ex_convert_to_target_type");
+ ACPI_FUNCTION_TRACE(ex_convert_to_target_type);
/* Default behavior */
@@ -657,7 +659,7 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type,
default:
ACPI_ERROR((AE_INFO,
- "Unknown Target type ID 0x%X aml_opcode %X dest_type %s",
+ "Unknown Target type ID 0x%X AmlOpcode %X DestType %s",
GET_CURRENT_ARG_TYPE(walk_state->op_info->
runtime_args),
walk_state->opcode,
diff --git a/drivers/acpi/executer/excreate.c b/drivers/acpi/executer/excreate.c
index 68057540283..106dc7219df 100644
--- a/drivers/acpi/executer/excreate.c
+++ b/drivers/acpi/executer/excreate.c
@@ -69,7 +69,7 @@ acpi_status acpi_ex_create_alias(struct acpi_walk_state *walk_state)
struct acpi_namespace_node *alias_node;
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("ex_create_alias");
+ ACPI_FUNCTION_TRACE(ex_create_alias);
/* Get the source/alias operands (both namespace nodes) */
@@ -164,7 +164,7 @@ acpi_status acpi_ex_create_event(struct acpi_walk_state *walk_state)
acpi_status status;
union acpi_operand_object *obj_desc;
- ACPI_FUNCTION_TRACE("ex_create_event");
+ ACPI_FUNCTION_TRACE(ex_create_event);
obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_EVENT);
if (!obj_desc) {
@@ -216,7 +216,7 @@ acpi_status acpi_ex_create_mutex(struct acpi_walk_state *walk_state)
acpi_status status = AE_OK;
union acpi_operand_object *obj_desc;
- ACPI_FUNCTION_TRACE_PTR("ex_create_mutex", ACPI_WALK_OPERANDS);
+ ACPI_FUNCTION_TRACE_PTR(ex_create_mutex, ACPI_WALK_OPERANDS);
/* Create the new mutex object */
@@ -243,8 +243,9 @@ acpi_status acpi_ex_create_mutex(struct acpi_walk_state *walk_state)
obj_desc->mutex.node =
(struct acpi_namespace_node *)walk_state->operands[0];
- status = acpi_ns_attach_object(obj_desc->mutex.node,
- obj_desc, ACPI_TYPE_MUTEX);
+ status =
+ acpi_ns_attach_object(obj_desc->mutex.node, obj_desc,
+ ACPI_TYPE_MUTEX);
cleanup:
/*
@@ -280,7 +281,7 @@ acpi_ex_create_region(u8 * aml_start,
struct acpi_namespace_node *node;
union acpi_operand_object *region_obj2;
- ACPI_FUNCTION_TRACE("ex_create_region");
+ ACPI_FUNCTION_TRACE(ex_create_region);
/* Get the Namespace Node */
@@ -300,7 +301,7 @@ acpi_ex_create_region(u8 * aml_start,
*/
if ((region_space >= ACPI_NUM_PREDEFINED_REGIONS) &&
(region_space < ACPI_USER_REGION_BEGIN)) {
- ACPI_ERROR((AE_INFO, "Invalid address_space type %X",
+ ACPI_ERROR((AE_INFO, "Invalid AddressSpace type %X",
region_space));
return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
}
@@ -364,7 +365,7 @@ acpi_status acpi_ex_create_table_region(struct acpi_walk_state *walk_state)
struct acpi_table_header *table;
union acpi_operand_object *region_obj2;
- ACPI_FUNCTION_TRACE("ex_create_table_region");
+ ACPI_FUNCTION_TRACE(ex_create_table_region);
/* Get the Node from the object stack */
@@ -452,7 +453,7 @@ acpi_status acpi_ex_create_processor(struct acpi_walk_state *walk_state)
union acpi_operand_object *obj_desc;
acpi_status status;
- ACPI_FUNCTION_TRACE_PTR("ex_create_processor", walk_state);
+ ACPI_FUNCTION_TRACE_PTR(ex_create_processor, walk_state);
/* Create the processor object */
@@ -464,9 +465,9 @@ acpi_status acpi_ex_create_processor(struct acpi_walk_state *walk_state)
/* Initialize the processor object from the operands */
obj_desc->processor.proc_id = (u8) operand[1]->integer.value;
+ obj_desc->processor.length = (u8) operand[3]->integer.value;
obj_desc->processor.address =
(acpi_io_address) operand[2]->integer.value;
- obj_desc->processor.length = (u8) operand[3]->integer.value;
/* Install the processor object in the parent Node */
@@ -499,7 +500,7 @@ acpi_status acpi_ex_create_power_resource(struct acpi_walk_state *walk_state)
acpi_status status;
union acpi_operand_object *obj_desc;
- ACPI_FUNCTION_TRACE_PTR("ex_create_power_resource", walk_state);
+ ACPI_FUNCTION_TRACE_PTR(ex_create_power_resource, walk_state);
/* Create the power resource object */
@@ -549,7 +550,7 @@ acpi_ex_create_method(u8 * aml_start,
acpi_status status;
u8 method_flags;
- ACPI_FUNCTION_TRACE_PTR("ex_create_method", walk_state);
+ ACPI_FUNCTION_TRACE_PTR(ex_create_method, walk_state);
/* Create a new method object */
diff --git a/drivers/acpi/executer/exdump.c b/drivers/acpi/executer/exdump.c
index a7cca8d4f85..7b9718e976b 100644
--- a/drivers/acpi/executer/exdump.c
+++ b/drivers/acpi/executer/exdump.c
@@ -61,6 +61,10 @@ static void acpi_ex_out_pointer(char *title, void *value);
static void acpi_ex_out_address(char *title, acpi_physical_address value);
+static void
+acpi_ex_dump_object(union acpi_operand_object *obj_desc,
+ struct acpi_exdump_info *info);
+
static void acpi_ex_dump_reference_obj(union acpi_operand_object *obj_desc);
static void
@@ -119,7 +123,7 @@ static struct acpi_exdump_info acpi_ex_dump_event[2] = {
static struct acpi_exdump_info acpi_ex_dump_method[8] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_method), NULL},
- {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.param_count), "param_count"},
+ {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.param_count), "ParamCount"},
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.concurrency), "Concurrency"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(method.semaphore), "Semaphore"},
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.owner_id), "Owner Id"},
@@ -263,12 +267,10 @@ static struct acpi_exdump_info acpi_ex_dump_field_common[7] = {
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(common_field.node), "Parent Node"}
};
-static struct acpi_exdump_info acpi_ex_dump_node[6] = {
+static struct acpi_exdump_info acpi_ex_dump_node[5] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_node), NULL},
{ACPI_EXD_UINT8, ACPI_EXD_NSOFFSET(flags), "Flags"},
{ACPI_EXD_UINT8, ACPI_EXD_NSOFFSET(owner_id), "Owner Id"},
- {ACPI_EXD_UINT16, ACPI_EXD_NSOFFSET(reference_count),
- "Reference Count"},
{ACPI_EXD_POINTER, ACPI_EXD_NSOFFSET(child), "Child List"},
{ACPI_EXD_POINTER, ACPI_EXD_NSOFFSET(peer), "Next Peer"}
};
@@ -330,7 +332,7 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
if (!info) {
acpi_os_printf
- ("ex_dump_object: Display not implemented for object type %s\n",
+ ("ExDumpObject: Display not implemented for object type %s\n",
acpi_ut_get_object_type_name(obj_desc));
return;
}
@@ -454,7 +456,7 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
u32 length;
u32 index;
- ACPI_FUNCTION_NAME("ex_dump_operand")
+ ACPI_FUNCTION_NAME(ex_dump_operand)
if (!
((ACPI_LV_EXEC & acpi_dbg_level)
@@ -463,6 +465,7 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
}
if (!obj_desc) {
+
/* This could be a null element of a package */
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Null Object Descriptor\n"));
@@ -522,7 +525,7 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
case AML_REF_OF_OP:
- acpi_os_printf("Reference: (ref_of) %p\n",
+ acpi_os_printf("Reference: (RefOf) %p\n",
obj_desc->reference.object);
break;
@@ -532,6 +535,7 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
obj_desc->reference.offset);
if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) {
+
/* Value is an Integer */
acpi_os_printf(" value is [%8.8X%8.8x]",
@@ -610,7 +614,7 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
case ACPI_TYPE_PACKAGE:
- acpi_os_printf("Package [Len %X] element_array %p\n",
+ acpi_os_printf("Package [Len %X] ElementArray %p\n",
obj_desc->package.count,
obj_desc->package.elements);
@@ -662,13 +666,13 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
case ACPI_TYPE_LOCAL_BANK_FIELD:
- acpi_os_printf("bank_field\n");
+ acpi_os_printf("BankField\n");
break;
case ACPI_TYPE_LOCAL_REGION_FIELD:
acpi_os_printf
- ("region_field: Bits=%X acc_width=%X Lock=%X Update=%X at byte=%X bit=%X of below:\n",
+ ("RegionField: Bits=%X AccWidth=%X Lock=%X Update=%X at byte=%X bit=%X of below:\n",
obj_desc->field.bit_length,
obj_desc->field.access_byte_width,
obj_desc->field.field_flags & AML_FIELD_LOCK_RULE_MASK,
@@ -681,12 +685,12 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
case ACPI_TYPE_LOCAL_INDEX_FIELD:
- acpi_os_printf("index_field\n");
+ acpi_os_printf("IndexField\n");
break;
case ACPI_TYPE_BUFFER_FIELD:
- acpi_os_printf("buffer_field: %X bits at byte %X bit %X of\n",
+ acpi_os_printf("BufferField: %X bits at byte %X bit %X of\n",
obj_desc->buffer_field.bit_length,
obj_desc->buffer_field.base_byte_offset,
obj_desc->buffer_field.start_field_bit_offset);
@@ -777,7 +781,7 @@ acpi_ex_dump_operands(union acpi_operand_object **operands,
{
acpi_native_uint i;
- ACPI_FUNCTION_NAME("ex_dump_operands");
+ ACPI_FUNCTION_NAME(ex_dump_operands);
if (!ident) {
ident = "?";
@@ -901,7 +905,7 @@ static void acpi_ex_dump_reference_obj(union acpi_operand_object *obj_desc)
acpi_os_printf("Could not convert name to pathname\n");
} else {
acpi_os_printf("%s\n", (char *)ret_buf.pointer);
- ACPI_MEM_FREE(ret_buf.pointer);
+ ACPI_FREE(ret_buf.pointer);
}
} else if (obj_desc->reference.object) {
acpi_os_printf("\nReferenced Object: %p\n",
@@ -1017,7 +1021,7 @@ acpi_ex_dump_package_obj(union acpi_operand_object *obj_desc,
void
acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags)
{
- ACPI_FUNCTION_TRACE("ex_dump_object_descriptor");
+ ACPI_FUNCTION_TRACE(ex_dump_object_descriptor);
if (!obj_desc) {
return_VOID;
@@ -1046,7 +1050,7 @@ acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags)
if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) != ACPI_DESC_TYPE_OPERAND) {
acpi_os_printf
- ("ex_dump_object_descriptor: %p is not an ACPI operand object: [%s]\n",
+ ("ExDumpObjectDescriptor: %p is not an ACPI operand object: [%s]\n",
obj_desc, acpi_ut_get_descriptor_name(obj_desc));
return_VOID;
}
diff --git a/drivers/acpi/executer/exfield.c b/drivers/acpi/executer/exfield.c
index e259201ce9a..9ea9c3a67ca 100644
--- a/drivers/acpi/executer/exfield.c
+++ b/drivers/acpi/executer/exfield.c
@@ -73,7 +73,7 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
void *buffer;
u8 locked;
- ACPI_FUNCTION_TRACE_PTR("ex_read_data_from_field", obj_desc);
+ ACPI_FUNCTION_TRACE_PTR(ex_read_data_from_field, obj_desc);
/* Parameter validation */
@@ -142,6 +142,7 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
length =
(acpi_size) ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->field.bit_length);
if (length > acpi_gbl_integer_byte_width) {
+
/* Field is too large for an Integer, create a Buffer instead */
buffer_desc = acpi_ut_create_buffer_object(length);
@@ -163,11 +164,11 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
}
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
- "field_read [TO]: Obj %p, Type %X, Buf %p, byte_len %X\n",
+ "FieldRead [TO]: Obj %p, Type %X, Buf %p, ByteLen %X\n",
obj_desc, ACPI_GET_OBJECT_TYPE(obj_desc), buffer,
(u32) length));
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
- "field_read [FROM]: bit_len %X, bit_off %X, byte_off %X\n",
+ "FieldRead [FROM]: BitLen %X, BitOff %X, ByteOff %X\n",
obj_desc->common_field.bit_length,
obj_desc->common_field.start_field_bit_offset,
obj_desc->common_field.base_byte_offset));
@@ -219,7 +220,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
u8 locked;
union acpi_operand_object *buffer_desc;
- ACPI_FUNCTION_TRACE_PTR("ex_write_data_to_field", obj_desc);
+ ACPI_FUNCTION_TRACE_PTR(ex_write_data_to_field, obj_desc);
/* Parameter validation */
@@ -329,9 +330,10 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length);
if (length < required_length) {
+
/* We need to create a new buffer */
- new_buffer = ACPI_MEM_CALLOCATE(required_length);
+ new_buffer = ACPI_ALLOCATE_ZEROED(required_length);
if (!new_buffer) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -347,14 +349,14 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
}
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
- "field_write [FROM]: Obj %p (%s:%X), Buf %p, byte_len %X\n",
+ "FieldWrite [FROM]: Obj %p (%s:%X), Buf %p, ByteLen %X\n",
source_desc,
acpi_ut_get_type_name(ACPI_GET_OBJECT_TYPE
(source_desc)),
ACPI_GET_OBJECT_TYPE(source_desc), buffer, length));
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
- "field_write [TO]: Obj %p (%s:%X), bit_len %X, bit_off %X, byte_off %X\n",
+ "FieldWrite [TO]: Obj %p (%s:%X), BitLen %X, BitOff %X, ByteOff %X\n",
obj_desc,
acpi_ut_get_type_name(ACPI_GET_OBJECT_TYPE(obj_desc)),
ACPI_GET_OBJECT_TYPE(obj_desc),
@@ -375,7 +377,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
/* Free temporary buffer if we used one */
if (new_buffer) {
- ACPI_MEM_FREE(new_buffer);
+ ACPI_FREE(new_buffer);
}
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/executer/exfldio.c b/drivers/acpi/executer/exfldio.c
index bd1af35f7fc..051053f7ccc 100644
--- a/drivers/acpi/executer/exfldio.c
+++ b/drivers/acpi/executer/exfldio.c
@@ -87,7 +87,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
acpi_status status = AE_OK;
union acpi_operand_object *rgn_desc;
- ACPI_FUNCTION_TRACE_U32("ex_setup_region", field_datum_byte_offset);
+ ACPI_FUNCTION_TRACE_U32(ex_setup_region, field_datum_byte_offset);
rgn_desc = obj_desc->common_field.region_obj;
@@ -112,7 +112,18 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
}
}
+ /* Exit if Address/Length have been disallowed by the host OS */
+
+ if (rgn_desc->common.flags & AOPOBJ_INVALID) {
+ return_ACPI_STATUS(AE_AML_ILLEGAL_ADDRESS);
+ }
+
+ /*
+ * Exit now for SMBus address space, it has a non-linear address space
+ * and the request cannot be directly validated
+ */
if (rgn_desc->region.space_id == ACPI_ADR_SPACE_SMBUS) {
+
/* SMBus has a non-linear address space */
return_ACPI_STATUS(AE_OK);
@@ -134,10 +145,10 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
* length of one field datum (access width) must fit within the region.
* (Region length is specified in bytes)
*/
- if (rgn_desc->region.length < (obj_desc->common_field.base_byte_offset +
- field_datum_byte_offset +
- obj_desc->common_field.
- access_byte_width)) {
+ if (rgn_desc->region.length <
+ (obj_desc->common_field.base_byte_offset +
+ field_datum_byte_offset +
+ obj_desc->common_field.access_byte_width)) {
if (acpi_gbl_enable_interpreter_slack) {
/*
* Slack mode only: We will go ahead and allow access to this
@@ -217,7 +228,7 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
union acpi_operand_object *rgn_desc;
acpi_physical_address address;
- ACPI_FUNCTION_TRACE("ex_access_region");
+ ACPI_FUNCTION_TRACE(ex_access_region);
/*
* Ensure that the region operands are fully evaluated and verify
@@ -246,7 +257,7 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_BFIELD,
- " Region [%s:%X], Width %X, byte_base %X, Offset %X at %8.8X%8.8X\n",
+ " Region [%s:%X], Width %X, ByteBase %X, Offset %X at %8.8X%8.8X\n",
acpi_ut_get_region_name(rgn_desc->region.
space_id),
rgn_desc->region.space_id,
@@ -352,7 +363,7 @@ acpi_ex_field_datum_io(union acpi_operand_object *obj_desc,
acpi_status status;
acpi_integer local_value;
- ACPI_FUNCTION_TRACE_U32("ex_field_datum_io", field_datum_byte_offset);
+ ACPI_FUNCTION_TRACE_U32(ex_field_datum_io, field_datum_byte_offset);
if (read_write == ACPI_READ) {
if (!value) {
@@ -487,10 +498,11 @@ acpi_ex_field_datum_io(union acpi_operand_object *obj_desc,
}
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
- "I/O to Data Register: value_ptr %p\n",
+ "I/O to Data Register: ValuePtr %p\n",
value));
if (read_write == ACPI_READ) {
+
/* Read the datum from the data_register */
status =
@@ -559,7 +571,7 @@ acpi_ex_write_with_update_rule(union acpi_operand_object *obj_desc,
acpi_integer merged_value;
acpi_integer current_value;
- ACPI_FUNCTION_TRACE_U32("ex_write_with_update_rule", mask);
+ ACPI_FUNCTION_TRACE_U32(ex_write_with_update_rule, mask);
/* Start with the new bits */
@@ -568,6 +580,7 @@ acpi_ex_write_with_update_rule(union acpi_operand_object *obj_desc,
/* If the mask is all ones, we don't need to worry about the update rule */
if (mask != ACPI_INTEGER_MAX) {
+
/* Decode the update rule */
switch (obj_desc->common_field.
@@ -614,7 +627,7 @@ acpi_ex_write_with_update_rule(union acpi_operand_object *obj_desc,
default:
ACPI_ERROR((AE_INFO,
- "Unknown update_rule value: %X",
+ "Unknown UpdateRule value: %X",
(obj_desc->common_field.
field_flags &
AML_FIELD_UPDATE_RULE_MASK)));
@@ -623,7 +636,7 @@ acpi_ex_write_with_update_rule(union acpi_operand_object *obj_desc,
}
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
- "Mask %8.8X%8.8X, datum_offset %X, Width %X, Value %8.8X%8.8X, merged_value %8.8X%8.8X\n",
+ "Mask %8.8X%8.8X, DatumOffset %X, Width %X, Value %8.8X%8.8X, MergedValue %8.8X%8.8X\n",
ACPI_FORMAT_UINT64(mask),
field_datum_byte_offset,
obj_desc->common_field.access_byte_width,
@@ -666,7 +679,7 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
u32 field_datum_count;
u32 i;
- ACPI_FUNCTION_TRACE("ex_extract_from_field");
+ ACPI_FUNCTION_TRACE(ex_extract_from_field);
/* Validate target buffer and clear it */
@@ -704,6 +717,7 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
/* Read the rest of the field */
for (i = 1; i < field_datum_count; i++) {
+
/* Get next input datum from the field */
field_offset += obj_desc->common_field.access_byte_width;
@@ -771,6 +785,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
{
acpi_status status;
acpi_integer mask;
+ acpi_integer width_mask;
acpi_integer merged_datum;
acpi_integer raw_datum = 0;
u32 field_offset = 0;
@@ -780,7 +795,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
u32 field_datum_count;
u32 i;
- ACPI_FUNCTION_TRACE("ex_insert_into_field");
+ ACPI_FUNCTION_TRACE(ex_insert_into_field);
/* Validate input buffer */
@@ -795,15 +810,20 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
/* Compute the number of datums (access width data items) */
+ width_mask =
+ ACPI_MASK_BITS_ABOVE(obj_desc->common_field.access_bit_width);
mask =
- ACPI_MASK_BITS_BELOW(obj_desc->common_field.start_field_bit_offset);
- datum_count =
- ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length,
- obj_desc->common_field.access_bit_width);
- field_datum_count =
- ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length +
- obj_desc->common_field.start_field_bit_offset,
- obj_desc->common_field.access_bit_width);
+ width_mask & ACPI_MASK_BITS_BELOW(obj_desc->common_field.
+ start_field_bit_offset);
+
+ datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length,
+ obj_desc->common_field.access_bit_width);
+
+ field_datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length +
+ obj_desc->common_field.
+ start_field_bit_offset,
+ obj_desc->common_field.
+ access_bit_width);
/* Get initial Datum from the input buffer */
@@ -817,6 +837,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
/* Write the entire field */
for (i = 1; i < field_datum_count; i++) {
+
/* Write merged datum to the target field */
merged_datum &= mask;
@@ -833,7 +854,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
merged_datum = raw_datum >>
(obj_desc->common_field.access_bit_width -
obj_desc->common_field.start_field_bit_offset);
- mask = ACPI_INTEGER_MAX;
+ mask = width_mask;
if (i == datum_count) {
break;
diff --git a/drivers/acpi/executer/exmisc.c b/drivers/acpi/executer/exmisc.c
index 48c18d29222..bd98aab017c 100644
--- a/drivers/acpi/executer/exmisc.c
+++ b/drivers/acpi/executer/exmisc.c
@@ -72,7 +72,7 @@ acpi_ex_get_object_reference(union acpi_operand_object *obj_desc,
union acpi_operand_object *reference_obj;
union acpi_operand_object *referenced_obj;
- ACPI_FUNCTION_TRACE_PTR("ex_get_object_reference", obj_desc);
+ ACPI_FUNCTION_TRACE_PTR(ex_get_object_reference, obj_desc);
*return_desc = NULL;
@@ -168,7 +168,7 @@ acpi_ex_concat_template(union acpi_operand_object *operand0,
acpi_size length1;
acpi_size new_length;
- ACPI_FUNCTION_TRACE("ex_concat_template");
+ ACPI_FUNCTION_TRACE(ex_concat_template);
/*
* Find the end_tag descriptor in each resource template.
@@ -250,7 +250,7 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
char *new_buf;
acpi_status status;
- ACPI_FUNCTION_TRACE("ex_do_concatenate");
+ ACPI_FUNCTION_TRACE(ex_do_concatenate);
/*
* Convert the second operand if necessary. The first operand
@@ -445,10 +445,24 @@ acpi_ex_do_math_op(u16 opcode, acpi_integer integer0, acpi_integer integer1)
case AML_SHIFT_LEFT_OP: /* shift_left (Operand, shift_count, Result) */
+ /*
+ * We need to check if the shiftcount is larger than the integer bit
+ * width since the behavior of this is not well-defined in the C language.
+ */
+ if (integer1 >= acpi_gbl_integer_bit_width) {
+ return (0);
+ }
return (integer0 << integer1);
case AML_SHIFT_RIGHT_OP: /* shift_right (Operand, shift_count, Result) */
+ /*
+ * We need to check if the shiftcount is larger than the integer bit
+ * width since the behavior of this is not well-defined in the C language.
+ */
+ if (integer1 >= acpi_gbl_integer_bit_width) {
+ return (0);
+ }
return (integer0 >> integer1);
case AML_SUBTRACT_OP: /* Subtract (Integer0, Integer1, Result) */
@@ -489,7 +503,7 @@ acpi_ex_do_logical_numeric_op(u16 opcode,
acpi_status status = AE_OK;
u8 local_result = FALSE;
- ACPI_FUNCTION_TRACE("ex_do_logical_numeric_op");
+ ACPI_FUNCTION_TRACE(ex_do_logical_numeric_op);
switch (opcode) {
case AML_LAND_OP: /* LAnd (Integer0, Integer1) */
@@ -557,7 +571,7 @@ acpi_ex_do_logical_op(u16 opcode,
u8 local_result = FALSE;
int compare;
- ACPI_FUNCTION_TRACE("ex_do_logical_op");
+ ACPI_FUNCTION_TRACE(ex_do_logical_op);
/*
* Convert the second operand if necessary. The first operand
@@ -649,6 +663,7 @@ acpi_ex_do_logical_op(u16 opcode,
/* Length and all bytes must be equal */
if ((length0 == length1) && (compare == 0)) {
+
/* Length and all bytes match ==> TRUE */
local_result = TRUE;
diff --git a/drivers/acpi/executer/exmutex.c b/drivers/acpi/executer/exmutex.c
index f843b22e20b..93098d68cad 100644
--- a/drivers/acpi/executer/exmutex.c
+++ b/drivers/acpi/executer/exmutex.c
@@ -61,7 +61,7 @@ acpi_ex_link_mutex(union acpi_operand_object *obj_desc,
*
* RETURN: None
*
- * DESCRIPTION: Remove a mutex from the "acquired_mutex" list
+ * DESCRIPTION: Remove a mutex from the "AcquiredMutex" list
*
******************************************************************************/
@@ -95,7 +95,7 @@ void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc)
*
* RETURN: None
*
- * DESCRIPTION: Add a mutex to the "acquired_mutex" list for this walk
+ * DESCRIPTION: Add a mutex to the "AcquiredMutex" list for this walk
*
******************************************************************************/
@@ -144,7 +144,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
{
acpi_status status;
- ACPI_FUNCTION_TRACE_PTR("ex_acquire_mutex", obj_desc);
+ ACPI_FUNCTION_TRACE_PTR(ex_acquire_mutex, obj_desc);
if (!obj_desc) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -165,7 +165,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
*/
if (walk_state->thread->current_sync_level > obj_desc->mutex.sync_level) {
ACPI_ERROR((AE_INFO,
- "Cannot acquire Mutex [%4.4s], incorrect sync_level",
+ "Cannot acquire Mutex [%4.4s], incorrect SyncLevel",
acpi_ut_get_node_name(obj_desc->mutex.node)));
return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
}
@@ -173,6 +173,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
/* Support for multiple acquires by the owning thread */
if (obj_desc->mutex.owner_thread) {
+
/* Special case for Global Lock, allow all threads */
if ((obj_desc->mutex.owner_thread->thread_id ==
@@ -192,6 +193,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
status = acpi_ex_system_acquire_mutex(time_desc, obj_desc);
if (ACPI_FAILURE(status)) {
+
/* Includes failure from a timeout on time_desc */
return_ACPI_STATUS(status);
@@ -232,7 +234,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ex_release_mutex");
+ ACPI_FUNCTION_TRACE(ex_release_mutex);
if (!obj_desc) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -277,7 +279,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
*/
if (obj_desc->mutex.sync_level > walk_state->thread->current_sync_level) {
ACPI_ERROR((AE_INFO,
- "Cannot release Mutex [%4.4s], incorrect sync_level",
+ "Cannot release Mutex [%4.4s], incorrect SyncLevel",
acpi_ut_get_node_name(obj_desc->mutex.node)));
return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
}
@@ -286,6 +288,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
obj_desc->mutex.acquisition_depth--;
if (obj_desc->mutex.acquisition_depth != 0) {
+
/* Just decrement the depth and return */
return_ACPI_STATUS(AE_OK);
diff --git a/drivers/acpi/executer/exnames.c b/drivers/acpi/executer/exnames.c
index 054fe5e1a31..d3d70364626 100644
--- a/drivers/acpi/executer/exnames.c
+++ b/drivers/acpi/executer/exnames.c
@@ -77,7 +77,7 @@ static char *acpi_ex_allocate_name_string(u32 prefix_count, u32 num_name_segs)
char *name_string;
u32 size_needed;
- ACPI_FUNCTION_TRACE("ex_allocate_name_string");
+ ACPI_FUNCTION_TRACE(ex_allocate_name_string);
/*
* Allow room for all \ and ^ prefixes, all segments and a multi_name_prefix.
@@ -85,6 +85,7 @@ static char *acpi_ex_allocate_name_string(u32 prefix_count, u32 num_name_segs)
* This may actually be somewhat longer than needed.
*/
if (prefix_count == ACPI_UINT32_MAX) {
+
/* Special case for root */
size_needed = 1 + (ACPI_NAME_SIZE * num_name_segs) + 2 + 1;
@@ -97,7 +98,7 @@ static char *acpi_ex_allocate_name_string(u32 prefix_count, u32 num_name_segs)
* Allocate a buffer for the name.
* This buffer must be deleted by the caller!
*/
- name_string = ACPI_MEM_ALLOCATE(size_needed);
+ name_string = ACPI_ALLOCATE(size_needed);
if (!name_string) {
ACPI_ERROR((AE_INFO,
"Could not allocate size %d", size_needed));
@@ -119,11 +120,13 @@ static char *acpi_ex_allocate_name_string(u32 prefix_count, u32 num_name_segs)
/* Set up Dual or Multi prefixes if needed */
if (num_name_segs > 2) {
+
/* Set up multi prefixes */
*temp_ptr++ = AML_MULTI_NAME_PREFIX_OP;
*temp_ptr++ = (char)num_name_segs;
} else if (2 == num_name_segs) {
+
/* Set up dual prefixes */
*temp_ptr++ = AML_DUAL_NAME_PREFIX;
@@ -159,7 +162,7 @@ static acpi_status acpi_ex_name_segment(u8 ** in_aml_address, char *name_string)
u32 index;
char char_buf[5];
- ACPI_FUNCTION_TRACE("ex_name_segment");
+ ACPI_FUNCTION_TRACE(ex_name_segment);
/*
* If first character is a digit, then we know that we aren't looking at a
@@ -176,7 +179,7 @@ static acpi_status acpi_ex_name_segment(u8 ** in_aml_address, char *name_string)
for (index = 0;
(index < ACPI_NAME_SIZE)
- && (acpi_ut_valid_acpi_character(*aml_address)); index++) {
+ && (acpi_ut_valid_acpi_char(*aml_address, 0)); index++) {
char_buf[index] = *aml_address++;
ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "%c\n", char_buf[index]));
}
@@ -184,6 +187,7 @@ static acpi_status acpi_ex_name_segment(u8 ** in_aml_address, char *name_string)
/* Valid name segment */
if (index == 4) {
+
/* Found 4 valid characters */
char_buf[4] = '\0';
@@ -249,11 +253,12 @@ acpi_ex_get_name_string(acpi_object_type data_type,
u32 prefix_count = 0;
u8 has_prefix = FALSE;
- ACPI_FUNCTION_TRACE_PTR("ex_get_name_string", aml_address);
+ ACPI_FUNCTION_TRACE_PTR(ex_get_name_string, aml_address);
if (ACPI_TYPE_LOCAL_REGION_FIELD == data_type ||
ACPI_TYPE_LOCAL_BANK_FIELD == data_type ||
ACPI_TYPE_LOCAL_INDEX_FIELD == data_type) {
+
/* Disallow prefixes for types associated with field_unit names */
name_string = acpi_ex_allocate_name_string(0, 1);
@@ -272,7 +277,7 @@ acpi_ex_get_name_string(acpi_object_type data_type,
case AML_ROOT_PREFIX:
ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
- "root_prefix(\\) at %p\n",
+ "RootPrefix(\\) at %p\n",
aml_address));
/*
@@ -290,7 +295,7 @@ acpi_ex_get_name_string(acpi_object_type data_type,
do {
ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
- "parent_prefix (^) at %p\n",
+ "ParentPrefix (^) at %p\n",
aml_address));
aml_address++;
@@ -314,7 +319,7 @@ acpi_ex_get_name_string(acpi_object_type data_type,
case AML_DUAL_NAME_PREFIX:
ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
- "dual_name_prefix at %p\n",
+ "DualNamePrefix at %p\n",
aml_address));
aml_address++;
@@ -341,7 +346,7 @@ acpi_ex_get_name_string(acpi_object_type data_type,
case AML_MULTI_NAME_PREFIX_OP:
ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
- "multi_name_prefix at %p\n",
+ "MultiNamePrefix at %p\n",
aml_address));
/* Fetch count of segments remaining in name path */
@@ -377,7 +382,7 @@ acpi_ex_get_name_string(acpi_object_type data_type,
if (prefix_count == ACPI_UINT32_MAX) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "name_seg is \"\\\" followed by NULL\n"));
+ "NameSeg is \"\\\" followed by NULL\n"));
}
/* Consume the NULL byte */
@@ -410,6 +415,7 @@ acpi_ex_get_name_string(acpi_object_type data_type,
}
if (AE_CTRL_PENDING == status && has_prefix) {
+
/* Ran out of segments after processing a prefix */
ACPI_ERROR((AE_INFO, "Malformed Name at %p", name_string));
@@ -418,7 +424,7 @@ acpi_ex_get_name_string(acpi_object_type data_type,
if (ACPI_FAILURE(status)) {
if (name_string) {
- ACPI_MEM_FREE(name_string);
+ ACPI_FREE(name_string);
}
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/executer/exoparg1.c b/drivers/acpi/executer/exoparg1.c
index 23d0823bcd5..6374d8be88e 100644
--- a/drivers/acpi/executer/exoparg1.c
+++ b/drivers/acpi/executer/exoparg1.c
@@ -89,7 +89,7 @@ acpi_status acpi_ex_opcode_0A_0T_1R(struct acpi_walk_state *walk_state)
acpi_status status = AE_OK;
union acpi_operand_object *return_desc = NULL;
- ACPI_FUNCTION_TRACE_STR("ex_opcode_0A_0T_1R",
+ ACPI_FUNCTION_TRACE_STR(ex_opcode_0A_0T_1R,
acpi_ps_get_opcode_name(walk_state->opcode));
/* Examine the AML opcode */
@@ -150,7 +150,7 @@ acpi_status acpi_ex_opcode_1A_0T_0R(struct acpi_walk_state *walk_state)
union acpi_operand_object **operand = &walk_state->operands[0];
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE_STR("ex_opcode_1A_0T_0R",
+ ACPI_FUNCTION_TRACE_STR(ex_opcode_1A_0T_0R,
acpi_ps_get_opcode_name(walk_state->opcode));
/* Examine the AML opcode */
@@ -216,7 +216,7 @@ acpi_status acpi_ex_opcode_1A_1T_0R(struct acpi_walk_state *walk_state)
acpi_status status = AE_OK;
union acpi_operand_object **operand = &walk_state->operands[0];
- ACPI_FUNCTION_TRACE_STR("ex_opcode_1A_1T_0R",
+ ACPI_FUNCTION_TRACE_STR(ex_opcode_1A_1T_0R,
acpi_ps_get_opcode_name(walk_state->opcode));
/* Examine the AML opcode */
@@ -264,7 +264,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
acpi_integer power_of_ten;
acpi_integer digit;
- ACPI_FUNCTION_TRACE_STR("ex_opcode_1A_1T_1R",
+ ACPI_FUNCTION_TRACE_STR(ex_opcode_1A_1T_1R,
acpi_ps_get_opcode_name(walk_state->opcode));
/* Examine the AML opcode */
@@ -322,8 +322,9 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
/* Since the bit position is one-based, subtract from 33 (65) */
- return_desc->integer.value = temp32 == 0 ? 0 :
- (ACPI_INTEGER_BIT_SIZE + 1) - temp32;
+ return_desc->integer.value =
+ temp32 ==
+ 0 ? 0 : (ACPI_INTEGER_BIT_SIZE + 1) - temp32;
break;
case AML_FROM_BCD_OP: /* from_bcd (BCDValue, Result) */
@@ -342,6 +343,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
for (i = 0;
(i < acpi_gbl_integer_nybble_width) && (digit > 0);
i++) {
+
/* Get the least significant 4-bit BCD digit */
temp32 = ((u32) digit) & 0xF;
@@ -487,6 +489,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
status = acpi_ex_convert_to_string(operand[0], &return_desc,
ACPI_EXPLICIT_CONVERT_DECIMAL);
if (return_desc == operand[0]) {
+
/* No conversion performed, add ref to handle return value */
acpi_ut_add_reference(return_desc);
}
@@ -497,6 +500,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
status = acpi_ex_convert_to_string(operand[0], &return_desc,
ACPI_EXPLICIT_CONVERT_HEX);
if (return_desc == operand[0]) {
+
/* No conversion performed, add ref to handle return value */
acpi_ut_add_reference(return_desc);
}
@@ -506,6 +510,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
status = acpi_ex_convert_to_buffer(operand[0], &return_desc);
if (return_desc == operand[0]) {
+
/* No conversion performed, add ref to handle return value */
acpi_ut_add_reference(return_desc);
}
@@ -516,6 +521,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
status = acpi_ex_convert_to_integer(operand[0], &return_desc,
ACPI_ANY_BASE);
if (return_desc == operand[0]) {
+
/* No conversion performed, add ref to handle return value */
acpi_ut_add_reference(return_desc);
}
@@ -541,6 +547,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
}
if (ACPI_SUCCESS(status)) {
+
/* Store the return value computed above into the target object */
status = acpi_ex_store(return_desc, operand[1], walk_state);
@@ -548,16 +555,18 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
cleanup:
- if (!walk_state->result_obj) {
- walk_state->result_obj = return_desc;
- }
-
/* Delete return object on error */
if (ACPI_FAILURE(status)) {
acpi_ut_remove_reference(return_desc);
}
+ /* Save return object on success */
+
+ else if (!walk_state->result_obj) {
+ walk_state->result_obj = return_desc;
+ }
+
return_ACPI_STATUS(status);
}
@@ -582,7 +591,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
u32 type;
acpi_integer value;
- ACPI_FUNCTION_TRACE_STR("ex_opcode_1A_0T_1R",
+ ACPI_FUNCTION_TRACE_STR(ex_opcode_1A_0T_1R,
acpi_ps_get_opcode_name(walk_state->opcode));
/* Examine the AML opcode */
@@ -625,6 +634,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
temp_desc = operand[0];
if (ACPI_GET_DESCRIPTOR_TYPE(temp_desc) ==
ACPI_DESC_TYPE_OPERAND) {
+
/* Internal reference object - prevent deletion */
acpi_ut_add_reference(temp_desc);
@@ -689,6 +699,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
if (ACPI_FAILURE(status)) {
goto cleanup;
}
+
/* Allocate a descriptor to hold the type. */
return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
@@ -777,8 +788,25 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
/* Check for a method local or argument, or standalone String */
- if (ACPI_GET_DESCRIPTOR_TYPE(operand[0]) !=
+ if (ACPI_GET_DESCRIPTOR_TYPE(operand[0]) ==
ACPI_DESC_TYPE_NAMED) {
+ temp_desc =
+ acpi_ns_get_attached_object((struct
+ acpi_namespace_node *)
+ operand[0]);
+ if (temp_desc
+ &&
+ ((ACPI_GET_OBJECT_TYPE(temp_desc) ==
+ ACPI_TYPE_STRING)
+ || (ACPI_GET_OBJECT_TYPE(temp_desc) ==
+ ACPI_TYPE_LOCAL_REFERENCE))) {
+ operand[0] = temp_desc;
+ acpi_ut_add_reference(temp_desc);
+ } else {
+ status = AE_AML_OPERAND_TYPE;
+ goto cleanup;
+ }
+ } else {
switch (ACPI_GET_OBJECT_TYPE(operand[0])) {
case ACPI_TYPE_LOCAL_REFERENCE:
/*
@@ -827,26 +855,35 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
break;
case ACPI_TYPE_STRING:
+ break;
+ default:
+ status = AE_AML_OPERAND_TYPE;
+ goto cleanup;
+ }
+ }
+
+ if (ACPI_GET_DESCRIPTOR_TYPE(operand[0]) !=
+ ACPI_DESC_TYPE_NAMED) {
+ if (ACPI_GET_OBJECT_TYPE(operand[0]) ==
+ ACPI_TYPE_STRING) {
/*
* This is a deref_of (String). The string is a reference
* to a named ACPI object.
*
* 1) Find the owning Node
- * 2) Dereference the node to an actual object. Could be a
+ * 2) Dereference the node to an actual object. Could be a
* Field, so we need to resolve the node to a value.
*/
status =
- acpi_ns_get_node_by_path(operand[0]->string.
- pointer,
- walk_state->
- scope_info->scope.
- node,
- ACPI_NS_SEARCH_PARENT,
- ACPI_CAST_INDIRECT_PTR
- (struct
- acpi_namespace_node,
- &return_desc));
+ acpi_ns_get_node(walk_state->scope_info->
+ scope.node,
+ operand[0]->string.pointer,
+ ACPI_NS_SEARCH_PARENT,
+ ACPI_CAST_INDIRECT_PTR
+ (struct
+ acpi_namespace_node,
+ &return_desc));
if (ACPI_FAILURE(status)) {
goto cleanup;
}
@@ -857,11 +894,6 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
(struct acpi_namespace_node, &return_desc),
walk_state);
goto cleanup;
-
- default:
-
- status = AE_AML_OPERAND_TYPE;
- goto cleanup;
}
}
@@ -937,13 +969,12 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
acpi_ut_add_reference
(return_desc);
}
-
break;
default:
ACPI_ERROR((AE_INFO,
- "Unknown Index target_type %X in obj %p",
+ "Unknown Index TargetType %X in obj %p",
operand[0]->reference.
target_type, operand[0]));
status = AE_AML_OPERAND_TYPE;
@@ -957,7 +988,6 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
if (ACPI_GET_DESCRIPTOR_TYPE(return_desc) ==
ACPI_DESC_TYPE_NAMED) {
-
return_desc =
acpi_ns_get_attached_object((struct
acpi_namespace_node
@@ -972,7 +1002,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
default:
ACPI_ERROR((AE_INFO,
- "Unknown opcode in ref(%p) - %X",
+ "Unknown opcode in reference(%p) - %X",
operand[0],
operand[0]->reference.opcode));
@@ -998,6 +1028,11 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
acpi_ut_remove_reference(return_desc);
}
- walk_state->result_obj = return_desc;
+ /* Save return object on success */
+
+ else {
+ walk_state->result_obj = return_desc;
+ }
+
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/executer/exoparg2.c b/drivers/acpi/executer/exoparg2.c
index e263a5ddd40..7d2cbc11316 100644
--- a/drivers/acpi/executer/exoparg2.c
+++ b/drivers/acpi/executer/exoparg2.c
@@ -92,7 +92,7 @@ acpi_status acpi_ex_opcode_2A_0T_0R(struct acpi_walk_state *walk_state)
u32 value;
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE_STR("ex_opcode_2A_0T_0R",
+ ACPI_FUNCTION_TRACE_STR(ex_opcode_2A_0T_0R,
acpi_ps_get_opcode_name(walk_state->opcode));
/* Examine the opcode */
@@ -121,7 +121,7 @@ acpi_status acpi_ex_opcode_2A_0T_0R(struct acpi_walk_state *walk_state)
#ifdef ACPI_GPE_NOTIFY_CHECK
/*
* GPE method wake/notify check. Here, we want to ensure that we
- * don't receive any "device_wake" Notifies from a GPE _Lxx or _Exx
+ * don't receive any "DeviceWake" Notifies from a GPE _Lxx or _Exx
* GPE method during system runtime. If we do, the GPE is marked
* as "wake-only" and disabled.
*
@@ -138,6 +138,7 @@ acpi_status acpi_ex_opcode_2A_0T_0R(struct acpi_walk_state *walk_state)
acpi_ev_check_for_wake_only_gpe(walk_state->
gpe_event_info);
if (ACPI_FAILURE(status)) {
+
/* AE_WAKE_ONLY_GPE only error, means ignore this notify */
return_ACPI_STATUS(AE_OK)
@@ -185,7 +186,7 @@ acpi_status acpi_ex_opcode_2A_2T_1R(struct acpi_walk_state *walk_state)
union acpi_operand_object *return_desc2 = NULL;
acpi_status status;
- ACPI_FUNCTION_TRACE_STR("ex_opcode_2A_2T_1R",
+ ACPI_FUNCTION_TRACE_STR(ex_opcode_2A_2T_1R,
acpi_ps_get_opcode_name(walk_state->opcode));
/* Execute the opcode */
@@ -252,6 +253,7 @@ acpi_status acpi_ex_opcode_2A_2T_1R(struct acpi_walk_state *walk_state)
acpi_ut_remove_reference(return_desc2);
if (ACPI_FAILURE(status)) {
+
/* Delete the return object */
acpi_ut_remove_reference(return_desc1);
@@ -281,12 +283,13 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
acpi_status status = AE_OK;
acpi_size length;
- ACPI_FUNCTION_TRACE_STR("ex_opcode_2A_1T_1R",
+ ACPI_FUNCTION_TRACE_STR(ex_opcode_2A_1T_1R,
acpi_ps_get_opcode_name(walk_state->opcode));
/* Execute the opcode */
if (walk_state->op_info->flags & AML_MATH) {
+
/* All simple math opcodes (add, etc.) */
return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
@@ -383,54 +386,70 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
goto cleanup;
}
+ /* Initialize the Index reference object */
+
index = operand[1]->integer.value;
+ return_desc->reference.offset = (u32) index;
+ return_desc->reference.opcode = AML_INDEX_OP;
- /* At this point, the Source operand is a Package, Buffer, or String */
+ /*
+ * At this point, the Source operand is a String, Buffer, or Package.
+ * Verify that the index is within range.
+ */
+ switch (ACPI_GET_OBJECT_TYPE(operand[0])) {
+ case ACPI_TYPE_STRING:
- if (ACPI_GET_OBJECT_TYPE(operand[0]) == ACPI_TYPE_PACKAGE) {
- /* Object to be indexed is a Package */
+ if (index >= operand[0]->string.length) {
+ status = AE_AML_STRING_LIMIT;
+ }
+
+ return_desc->reference.target_type =
+ ACPI_TYPE_BUFFER_FIELD;
+ break;
+
+ case ACPI_TYPE_BUFFER:
+
+ if (index >= operand[0]->buffer.length) {
+ status = AE_AML_BUFFER_LIMIT;
+ }
+
+ return_desc->reference.target_type =
+ ACPI_TYPE_BUFFER_FIELD;
+ break;
+
+ case ACPI_TYPE_PACKAGE:
if (index >= operand[0]->package.count) {
- ACPI_ERROR((AE_INFO,
- "Index value (%X%8.8X) beyond package end (%X)",
- ACPI_FORMAT_UINT64(index),
- operand[0]->package.count));
status = AE_AML_PACKAGE_LIMIT;
- goto cleanup;
}
return_desc->reference.target_type = ACPI_TYPE_PACKAGE;
- return_desc->reference.object = operand[0];
return_desc->reference.where =
&operand[0]->package.elements[index];
- } else {
- /* Object to be indexed is a Buffer/String */
+ break;
- if (index >= operand[0]->buffer.length) {
- ACPI_ERROR((AE_INFO,
- "Index value (%X%8.8X) beyond end of buffer (%X)",
- ACPI_FORMAT_UINT64(index),
- operand[0]->buffer.length));
- status = AE_AML_BUFFER_LIMIT;
- goto cleanup;
- }
+ default:
- return_desc->reference.target_type =
- ACPI_TYPE_BUFFER_FIELD;
- return_desc->reference.object = operand[0];
+ status = AE_AML_INTERNAL;
+ goto cleanup;
+ }
+
+ /* Failure means that the Index was beyond the end of the object */
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Index (%X%8.8X) is beyond end of object",
+ ACPI_FORMAT_UINT64(index)));
+ goto cleanup;
}
/*
- * Add a reference to the target package/buffer/string for the life
- * of the index.
+ * Save the target object and add a reference to it for the life
+ * of the index
*/
+ return_desc->reference.object = operand[0];
acpi_ut_add_reference(operand[0]);
- /* Complete the Index reference object */
-
- return_desc->reference.opcode = AML_INDEX_OP;
- return_desc->reference.offset = (u32) index;
-
/* Store the reference to the Target */
status = acpi_ex_store(return_desc, operand[2], walk_state);
@@ -495,7 +514,7 @@ acpi_status acpi_ex_opcode_2A_0T_1R(struct acpi_walk_state *walk_state)
acpi_status status = AE_OK;
u8 logical_result = FALSE;
- ACPI_FUNCTION_TRACE_STR("ex_opcode_2A_0T_1R",
+ ACPI_FUNCTION_TRACE_STR(ex_opcode_2A_0T_1R,
acpi_ps_get_opcode_name(walk_state->opcode));
/* Create the internal return object */
@@ -509,6 +528,7 @@ acpi_status acpi_ex_opcode_2A_0T_1R(struct acpi_walk_state *walk_state)
/* Execute the Opcode */
if (walk_state->op_info->flags & AML_LOGICAL_NUMERIC) {
+
/* logical_op (Operand0, Operand1) */
status = acpi_ex_do_logical_numeric_op(walk_state->opcode,
@@ -518,6 +538,7 @@ acpi_status acpi_ex_opcode_2A_0T_1R(struct acpi_walk_state *walk_state)
value, &logical_result);
goto store_logical_result;
} else if (walk_state->op_info->flags & AML_LOGICAL) {
+
/* logical_op (Operand0, Operand1) */
status = acpi_ex_do_logical_op(walk_state->opcode, operand[0],
diff --git a/drivers/acpi/executer/exoparg3.c b/drivers/acpi/executer/exoparg3.c
index 6a3a883cb8a..e2d945dfd50 100644
--- a/drivers/acpi/executer/exoparg3.c
+++ b/drivers/acpi/executer/exoparg3.c
@@ -88,20 +88,19 @@ acpi_status acpi_ex_opcode_3A_0T_0R(struct acpi_walk_state *walk_state)
struct acpi_signal_fatal_info *fatal;
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE_STR("ex_opcode_3A_0T_0R",
+ ACPI_FUNCTION_TRACE_STR(ex_opcode_3A_0T_0R,
acpi_ps_get_opcode_name(walk_state->opcode));
switch (walk_state->opcode) {
case AML_FATAL_OP: /* Fatal (fatal_type fatal_code fatal_arg) */
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "fatal_op: Type %X Code %X Arg %X <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n",
+ "FatalOp: Type %X Code %X Arg %X <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n",
(u32) operand[0]->integer.value,
(u32) operand[1]->integer.value,
(u32) operand[2]->integer.value));
- fatal =
- ACPI_MEM_ALLOCATE(sizeof(struct acpi_signal_fatal_info));
+ fatal = ACPI_ALLOCATE(sizeof(struct acpi_signal_fatal_info));
if (fatal) {
fatal->type = (u32) operand[0]->integer.value;
fatal->code = (u32) operand[1]->integer.value;
@@ -114,7 +113,7 @@ acpi_status acpi_ex_opcode_3A_0T_0R(struct acpi_walk_state *walk_state)
/* Might return while OS is shutting down, just continue */
- ACPI_MEM_FREE(fatal);
+ ACPI_FREE(fatal);
break;
default:
@@ -151,7 +150,7 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
acpi_integer index;
acpi_size length;
- ACPI_FUNCTION_TRACE_STR("ex_opcode_3A_1T_1R",
+ ACPI_FUNCTION_TRACE_STR(ex_opcode_3A_1T_1R,
acpi_ps_get_opcode_name(walk_state->opcode));
switch (walk_state->opcode) {
@@ -196,7 +195,7 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
/* Always allocate a new buffer for the String */
- buffer = ACPI_MEM_CALLOCATE((acpi_size) length + 1);
+ buffer = ACPI_ALLOCATE_ZEROED((acpi_size) length + 1);
if (!buffer) {
status = AE_NO_MEMORY;
goto cleanup;
@@ -208,9 +207,10 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
/* If the requested length is zero, don't allocate a buffer */
if (length > 0) {
+
/* Allocate a new buffer for the Buffer */
- buffer = ACPI_MEM_CALLOCATE(length);
+ buffer = ACPI_ALLOCATE_ZEROED(length);
if (!buffer) {
status = AE_NO_MEMORY;
goto cleanup;
@@ -225,6 +225,7 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
}
if (buffer) {
+
/* We have a buffer, copy the portion requested */
ACPI_MEMCPY(buffer, operand[0]->string.pointer + index,
diff --git a/drivers/acpi/executer/exoparg6.c b/drivers/acpi/executer/exoparg6.c
index e043d924444..f0c0ba6eb40 100644
--- a/drivers/acpi/executer/exoparg6.c
+++ b/drivers/acpi/executer/exoparg6.c
@@ -220,7 +220,7 @@ acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state * walk_state)
acpi_integer index;
union acpi_operand_object *this_element;
- ACPI_FUNCTION_TRACE_STR("ex_opcode_6A_0T_1R",
+ ACPI_FUNCTION_TRACE_STR(ex_opcode_6A_0T_1R,
acpi_ps_get_opcode_name(walk_state->opcode));
switch (walk_state->opcode) {
@@ -276,6 +276,7 @@ acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state * walk_state)
* match was found.
*/
for (; index < operand[0]->package.count; index++) {
+
/* Get the current package element */
this_element = operand[0]->package.elements[index];
diff --git a/drivers/acpi/executer/exprep.c b/drivers/acpi/executer/exprep.c
index 7719ae5d4f1..44d064f427b 100644
--- a/drivers/acpi/executer/exprep.c
+++ b/drivers/acpi/executer/exprep.c
@@ -97,7 +97,7 @@ acpi_ex_generate_access(u32 field_bit_offset,
u32 minimum_accesses = 0xFFFFFFFF;
u32 accesses;
- ACPI_FUNCTION_TRACE("ex_generate_access");
+ ACPI_FUNCTION_TRACE(ex_generate_access);
/* Round Field start offset and length to "minimal" byte boundaries */
@@ -146,7 +146,7 @@ acpi_ex_generate_access(u32 field_bit_offset,
accesses = field_end_offset - field_start_offset;
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
- "access_width %d end is within region\n",
+ "AccessWidth %d end is within region\n",
access_byte_width));
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
@@ -173,7 +173,7 @@ acpi_ex_generate_access(u32 field_bit_offset,
}
} else {
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
- "access_width %d end is NOT within region\n",
+ "AccessWidth %d end is NOT within region\n",
access_byte_width));
if (access_byte_width == 1) {
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
@@ -228,7 +228,7 @@ acpi_ex_decode_field_access(union acpi_operand_object *obj_desc,
u32 byte_alignment;
u32 bit_length;
- ACPI_FUNCTION_TRACE("ex_decode_field_access");
+ ACPI_FUNCTION_TRACE(ex_decode_field_access);
access = (field_flags & AML_FIELD_ACCESS_TYPE_MASK);
@@ -322,7 +322,7 @@ acpi_ex_prep_common_field_object(union acpi_operand_object *obj_desc,
u32 byte_alignment;
u32 nearest_byte_address;
- ACPI_FUNCTION_TRACE("ex_prep_common_field_object");
+ ACPI_FUNCTION_TRACE(ex_prep_common_field_object);
/*
* Note: the structure being initialized is the
@@ -415,13 +415,13 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
u32 type;
acpi_status status;
- ACPI_FUNCTION_TRACE("ex_prep_field_value");
+ ACPI_FUNCTION_TRACE(ex_prep_field_value);
/* Parameter validation */
if (info->field_type != ACPI_TYPE_LOCAL_INDEX_FIELD) {
if (!info->region_node) {
- ACPI_ERROR((AE_INFO, "Null region_node"));
+ ACPI_ERROR((AE_INFO, "Null RegionNode"));
return_ACPI_STATUS(AE_AML_NO_OPERAND);
}
@@ -467,7 +467,7 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
acpi_ut_add_reference(obj_desc->field.region_obj);
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
- "region_field: bit_off %X, Off %X, Gran %X, Region %p\n",
+ "RegionField: BitOff %X, Off %X, Gran %X, Region %p\n",
obj_desc->field.start_field_bit_offset,
obj_desc->field.base_byte_offset,
obj_desc->field.access_byte_width,
@@ -488,7 +488,7 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
acpi_ut_add_reference(obj_desc->bank_field.bank_obj);
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
- "Bank Field: bit_off %X, Off %X, Gran %X, Region %p, bank_reg %p\n",
+ "Bank Field: BitOff %X, Off %X, Gran %X, Region %p, BankReg %p\n",
obj_desc->bank_field.start_field_bit_offset,
obj_desc->bank_field.base_byte_offset,
obj_desc->field.access_byte_width,
@@ -519,16 +519,29 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
acpi_ut_add_reference(obj_desc->index_field.index_obj);
/*
+ * April 2006: Changed to match MS behavior
+ *
* The value written to the Index register is the byte offset of the
- * target field
- * Note: may change code to: ACPI_DIV_8 (Info->field_bit_position)
+ * target field in units of the granularity of the index_field
+ *
+ * Previously, the value was calculated as an index in terms of the
+ * width of the Data register, as below:
+ *
+ * obj_desc->index_field.Value = (u32)
+ * (Info->field_bit_position / ACPI_MUL_8 (
+ * obj_desc->Field.access_byte_width));
+ *
+ * February 2006: Tried value as a byte offset:
+ * obj_desc->index_field.Value = (u32)
+ * ACPI_DIV_8 (Info->field_bit_position);
*/
- obj_desc->index_field.value = (u32)
- (info->field_bit_position /
- ACPI_MUL_8(obj_desc->field.access_byte_width));
+ obj_desc->index_field.value =
+ (u32) ACPI_ROUND_DOWN(ACPI_DIV_8(info->field_bit_position),
+ obj_desc->index_field.
+ access_byte_width);
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
- "index_field: bit_off %X, Off %X, Value %X, Gran %X, Index %p, Data %p\n",
+ "IndexField: BitOff %X, Off %X, Value %X, Gran %X, Index %p, Data %p\n",
obj_desc->index_field.start_field_bit_offset,
obj_desc->index_field.base_byte_offset,
obj_desc->index_field.value,
@@ -550,7 +563,7 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
acpi_ns_get_type(info->field_node));
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
- "Set named_obj %p [%4.4s], obj_desc %p\n",
+ "Set NamedObj %p [%4.4s], ObjDesc %p\n",
info->field_node,
acpi_ut_get_node_name(info->field_node), obj_desc));
diff --git a/drivers/acpi/executer/exregion.c b/drivers/acpi/executer/exregion.c
index 6a4cfdff606..3cc97ba48b3 100644
--- a/drivers/acpi/executer/exregion.c
+++ b/drivers/acpi/executer/exregion.c
@@ -81,7 +81,7 @@ acpi_ex_system_memory_space_handler(u32 function,
u32 remainder;
#endif
- ACPI_FUNCTION_TRACE("ex_system_memory_space_handler");
+ ACPI_FUNCTION_TRACE(ex_system_memory_space_handler);
/* Validate and translate the bit width */
@@ -103,7 +103,7 @@ acpi_ex_system_memory_space_handler(u32 function,
break;
default:
- ACPI_ERROR((AE_INFO, "Invalid system_memory width %d",
+ ACPI_ERROR((AE_INFO, "Invalid SystemMemory width %d",
bit_width));
return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
}
@@ -135,6 +135,7 @@ acpi_ex_system_memory_space_handler(u32 function,
* Delete the existing mapping and create a new one.
*/
if (mem_info->mapped_length) {
+
/* Valid mapping, delete it */
acpi_os_unmap_memory(mem_info->mapped_logical_address,
@@ -181,8 +182,8 @@ acpi_ex_system_memory_space_handler(u32 function,
(acpi_integer) mem_info->mapped_physical_address);
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "system_memory %d (%d width) Address=%8.8X%8.8X\n",
- function, bit_width, ACPI_FORMAT_UINT64(address)));
+ "System-Memory (width %d) R/W %d Address=%8.8X%8.8X\n",
+ bit_width, function, ACPI_FORMAT_UINT64(address)));
/*
* Perform the memory read or write
@@ -283,11 +284,11 @@ acpi_ex_system_io_space_handler(u32 function,
acpi_status status = AE_OK;
u32 value32;
- ACPI_FUNCTION_TRACE("ex_system_io_space_handler");
+ ACPI_FUNCTION_TRACE(ex_system_io_space_handler);
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "system_iO %d (%d width) Address=%8.8X%8.8X\n",
- function, bit_width, ACPI_FORMAT_UINT64(address)));
+ "System-IO (width %d) R/W %d Address=%8.8X%8.8X\n",
+ bit_width, function, ACPI_FORMAT_UINT64(address)));
/* Decode the function parameter */
@@ -342,7 +343,7 @@ acpi_ex_pci_config_space_handler(u32 function,
struct acpi_pci_id *pci_id;
u16 pci_register;
- ACPI_FUNCTION_TRACE("ex_pci_config_space_handler");
+ ACPI_FUNCTION_TRACE(ex_pci_config_space_handler);
/*
* The arguments to acpi_os(Read|Write)pci_configuration are:
@@ -360,7 +361,7 @@ acpi_ex_pci_config_space_handler(u32 function,
pci_register = (u16) (u32) address;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "pci_config %d (%d) Seg(%04x) Bus(%04x) Dev(%04x) Func(%04x) Reg(%04x)\n",
+ "Pci-Config %d (%d) Seg(%04x) Bus(%04x) Dev(%04x) Func(%04x) Reg(%04x)\n",
function, bit_width, pci_id->segment, pci_id->bus,
pci_id->device, pci_id->function, pci_register));
@@ -414,7 +415,7 @@ acpi_ex_cmos_space_handler(u32 function,
{
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("ex_cmos_space_handler");
+ ACPI_FUNCTION_TRACE(ex_cmos_space_handler);
return_ACPI_STATUS(status);
}
@@ -446,7 +447,7 @@ acpi_ex_pci_bar_space_handler(u32 function,
{
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("ex_pci_bar_space_handler");
+ ACPI_FUNCTION_TRACE(ex_pci_bar_space_handler);
return_ACPI_STATUS(status);
}
@@ -476,23 +477,16 @@ acpi_ex_data_table_space_handler(u32 function,
acpi_integer * value,
void *handler_context, void *region_context)
{
- acpi_status status = AE_OK;
- u32 byte_width = ACPI_DIV_8(bit_width);
- u32 i;
- char *logical_addr_ptr;
-
- ACPI_FUNCTION_TRACE("ex_data_table_space_handler");
-
- logical_addr_ptr = ACPI_PHYSADDR_TO_PTR(address);
+ ACPI_FUNCTION_TRACE(ex_data_table_space_handler);
/* Perform the memory read or write */
switch (function) {
case ACPI_READ:
- for (i = 0; i < byte_width; i++) {
- ((char *)value)[i] = logical_addr_ptr[i];
- }
+ ACPI_MEMCPY(ACPI_CAST_PTR(char, value),
+ ACPI_PHYSADDR_TO_PTR(address),
+ ACPI_DIV_8(bit_width));
break;
case ACPI_WRITE:
@@ -501,5 +495,5 @@ acpi_ex_data_table_space_handler(u32 function,
return_ACPI_STATUS(AE_SUPPORT);
}
- return_ACPI_STATUS(status);
+ return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/executer/exresnte.c b/drivers/acpi/executer/exresnte.c
index 01b26c80d22..3089b05a136 100644
--- a/drivers/acpi/executer/exresnte.c
+++ b/drivers/acpi/executer/exresnte.c
@@ -87,7 +87,7 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
struct acpi_namespace_node *node;
acpi_object_type entry_type;
- ACPI_FUNCTION_TRACE("ex_resolve_node_to_value");
+ ACPI_FUNCTION_TRACE(ex_resolve_node_to_value);
/*
* The stack pointer points to a struct acpi_namespace_node (Node). Get the
@@ -97,12 +97,13 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
source_desc = acpi_ns_get_attached_object(node);
entry_type = acpi_ns_get_type((acpi_handle) node);
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Entry=%p source_desc=%p [%s]\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Entry=%p SourceDesc=%p [%s]\n",
node, source_desc,
acpi_ut_get_type_name(entry_type)));
if ((entry_type == ACPI_TYPE_LOCAL_ALIAS) ||
(entry_type == ACPI_TYPE_LOCAL_METHOD_ALIAS)) {
+
/* There is always exactly one level of indirection */
node = ACPI_CAST_PTR(struct acpi_namespace_node, node->object);
@@ -113,10 +114,11 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
/*
* Several object types require no further processing:
- * 1) Devices rarely have an attached object, return the Node
+ * 1) Device/Thermal objects don't have a "real" subobject, return the Node
* 2) Method locals and arguments have a pseudo-Node
*/
- if (entry_type == ACPI_TYPE_DEVICE ||
+ if ((entry_type == ACPI_TYPE_DEVICE) ||
+ (entry_type == ACPI_TYPE_THERMAL) ||
(node->flags & (ANOBJ_METHOD_ARG | ANOBJ_METHOD_LOCAL))) {
return_ACPI_STATUS(AE_OK);
}
@@ -141,6 +143,7 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
status = acpi_ds_get_package_arguments(source_desc);
if (ACPI_SUCCESS(status)) {
+
/* Return an additional reference to the object */
obj_desc = source_desc;
@@ -158,6 +161,7 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
status = acpi_ds_get_buffer_arguments(source_desc);
if (ACPI_SUCCESS(status)) {
+
/* Return an additional reference to the object */
obj_desc = source_desc;
@@ -199,7 +203,7 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
case ACPI_TYPE_LOCAL_INDEX_FIELD:
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "field_read Node=%p source_desc=%p Type=%X\n",
+ "FieldRead Node=%p SourceDesc=%p Type=%X\n",
node, source_desc, entry_type));
status =
@@ -213,7 +217,6 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
case ACPI_TYPE_METHOD:
case ACPI_TYPE_POWER:
case ACPI_TYPE_PROCESSOR:
- case ACPI_TYPE_THERMAL:
case ACPI_TYPE_EVENT:
case ACPI_TYPE_REGION:
@@ -240,6 +243,8 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
/* This is a ddb_handle */
/* Return an additional reference to the object */
+ case AML_REF_OF_OP:
+
obj_desc = source_desc;
acpi_ut_add_reference(obj_desc);
break;
diff --git a/drivers/acpi/executer/exresolv.c b/drivers/acpi/executer/exresolv.c
index 1deed492fe8..6499de87801 100644
--- a/drivers/acpi/executer/exresolv.c
+++ b/drivers/acpi/executer/exresolv.c
@@ -78,7 +78,7 @@ acpi_ex_resolve_to_value(union acpi_operand_object **stack_ptr,
{
acpi_status status;
- ACPI_FUNCTION_TRACE_PTR("ex_resolve_to_value", stack_ptr);
+ ACPI_FUNCTION_TRACE_PTR(ex_resolve_to_value, stack_ptr);
if (!stack_ptr || !*stack_ptr) {
ACPI_ERROR((AE_INFO, "Internal - null pointer"));
@@ -144,7 +144,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
union acpi_operand_object *obj_desc;
u16 opcode;
- ACPI_FUNCTION_TRACE("ex_resolve_object_to_value");
+ ACPI_FUNCTION_TRACE(ex_resolve_object_to_value);
stack_desc = *stack_ptr;
@@ -190,7 +190,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
}
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "[Arg/Local %X] value_obj is %p\n",
+ "[Arg/Local %X] ValueObj is %p\n",
stack_desc->reference.offset,
obj_desc));
@@ -239,7 +239,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
/* Invalid reference object */
ACPI_ERROR((AE_INFO,
- "Unknown target_type %X in Index/Reference obj %p",
+ "Unknown TargetType %X in Index/Reference obj %p",
stack_desc->reference.target_type,
stack_desc));
status = AE_AML_INTERNAL;
@@ -257,10 +257,24 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
case AML_INT_NAMEPATH_OP: /* Reference to a named object */
- /* Get the object pointed to by the namespace node */
+ /* Dereference the name */
+
+ if ((stack_desc->reference.node->type ==
+ ACPI_TYPE_DEVICE)
+ || (stack_desc->reference.node->type ==
+ ACPI_TYPE_THERMAL)) {
+
+ /* These node types do not have 'real' subobjects */
+
+ *stack_ptr = (void *)stack_desc->reference.node;
+ } else {
+ /* Get the object pointed to by the namespace node */
+
+ *stack_ptr =
+ (stack_desc->reference.node)->object;
+ acpi_ut_add_reference(*stack_ptr);
+ }
- *stack_ptr = (stack_desc->reference.node)->object;
- acpi_ut_add_reference(*stack_ptr);
acpi_ut_remove_reference(stack_desc);
break;
@@ -293,7 +307,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
case ACPI_TYPE_LOCAL_INDEX_FIELD:
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "field_read source_desc=%p Type=%X\n",
+ "FieldRead SourceDesc=%p Type=%X\n",
stack_desc,
ACPI_GET_OBJECT_TYPE(stack_desc)));
@@ -337,7 +351,7 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
acpi_object_type type;
acpi_status status;
- ACPI_FUNCTION_TRACE("acpi_ex_resolve_multiple");
+ ACPI_FUNCTION_TRACE(acpi_ex_resolve_multiple);
/* Operand can be either a namespace node or an operand descriptor */
@@ -382,10 +396,16 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
while (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_LOCAL_REFERENCE) {
switch (obj_desc->reference.opcode) {
case AML_REF_OF_OP:
+ case AML_INT_NAMEPATH_OP:
/* Dereference the reference pointer */
- node = obj_desc->reference.object;
+ if (obj_desc->reference.opcode == AML_REF_OF_OP) {
+ node = obj_desc->reference.object;
+ } else { /* AML_INT_NAMEPATH_OP */
+
+ node = obj_desc->reference.node;
+ }
/* All "References" point to a NS node */
@@ -401,6 +421,7 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
obj_desc = acpi_ns_get_attached_object(node);
if (!obj_desc) {
+
/* No object, use the NS node type */
type = acpi_ns_get_type(node);
@@ -432,6 +453,7 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
*/
obj_desc = *(obj_desc->reference.where);
if (!obj_desc) {
+
/* NULL package elements are allowed */
type = 0; /* Uninitialized */
@@ -439,39 +461,6 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
}
break;
- case AML_INT_NAMEPATH_OP:
-
- /* Dereference the reference pointer */
-
- node = obj_desc->reference.node;
-
- /* All "References" point to a NS node */
-
- if (ACPI_GET_DESCRIPTOR_TYPE(node) !=
- ACPI_DESC_TYPE_NAMED) {
- ACPI_ERROR((AE_INFO, "Not a NS node %p [%s]",
- node,
- acpi_ut_get_descriptor_name(node)));
- return_ACPI_STATUS(AE_AML_INTERNAL);
- }
-
- /* Get the attached object */
-
- obj_desc = acpi_ns_get_attached_object(node);
- if (!obj_desc) {
- /* No object, use the NS node type */
-
- type = acpi_ns_get_type(node);
- goto exit;
- }
-
- /* Check for circular references */
-
- if (obj_desc == operand) {
- return_ACPI_STATUS(AE_AML_CIRCULAR_REFERENCE);
- }
- break;
-
case AML_LOCAL_OP:
case AML_ARG_OP:
@@ -513,7 +502,7 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
case AML_DEBUG_OP:
- /* The Debug Object is of type "debug_object" */
+ /* The Debug Object is of type "DebugObject" */
type = ACPI_TYPE_DEBUG_OBJECT;
goto exit;
diff --git a/drivers/acpi/executer/exresop.c b/drivers/acpi/executer/exresop.c
index a1c000f5a41..4c93d097233 100644
--- a/drivers/acpi/executer/exresop.c
+++ b/drivers/acpi/executer/exresop.c
@@ -77,6 +77,7 @@ acpi_ex_check_object_type(acpi_object_type type_needed,
ACPI_FUNCTION_ENTRY();
if (type_needed == ACPI_TYPE_ANY) {
+
/* All types OK, so we don't perform any typechecks */
return (AE_OK);
@@ -143,7 +144,7 @@ acpi_ex_resolve_operands(u16 opcode,
acpi_object_type type_needed;
u16 target_op = 0;
- ACPI_FUNCTION_TRACE_U32("ex_resolve_operands", opcode);
+ ACPI_FUNCTION_TRACE_U32(ex_resolve_operands, opcode);
op_info = acpi_ps_get_opcode_info(opcode);
if (op_info->class == AML_CLASS_UNKNOWN) {
@@ -158,7 +159,7 @@ acpi_ex_resolve_operands(u16 opcode,
}
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Opcode %X [%s] required_operand_types=%8.8X\n",
+ "Opcode %X [%s] RequiredOperandTypes=%8.8X\n",
opcode, op_info->name, arg_types));
/*
@@ -224,6 +225,7 @@ acpi_ex_resolve_operands(u16 opcode,
}
if (object_type == (u8) ACPI_TYPE_LOCAL_REFERENCE) {
+
/* Decode the Reference */
op_info = acpi_ps_get_opcode_info(opcode);
@@ -247,7 +249,7 @@ acpi_ex_resolve_operands(u16 opcode,
ACPI_DEBUG_ONLY_MEMBERS(ACPI_DEBUG_PRINT
((ACPI_DB_EXEC,
- "Operand is a Reference, ref_opcode [%s]\n",
+ "Operand is a Reference, RefOpcode [%s]\n",
(acpi_ps_get_opcode_info
(obj_desc->
reference.
@@ -332,6 +334,7 @@ acpi_ex_resolve_operands(u16 opcode,
}
if (obj_desc->reference.opcode == AML_NAME_OP) {
+
/* Convert a named reference to the actual named object */
temp_node = obj_desc->reference.object;
@@ -623,7 +626,7 @@ acpi_ex_resolve_operands(u16 opcode,
default:
ACPI_ERROR((AE_INFO,
- "Needed [Region/region_field], found [%s] %p",
+ "Needed [Region/RegionField], found [%s] %p",
acpi_ut_get_object_type_name
(obj_desc), obj_desc));
@@ -662,6 +665,7 @@ acpi_ex_resolve_operands(u16 opcode,
}
if (target_op == AML_DEBUG_OP) {
+
/* Allow store of any object to the Debug object */
break;
diff --git a/drivers/acpi/executer/exstore.c b/drivers/acpi/executer/exstore.c
index 3f020c0e2b9..0456405ba01 100644
--- a/drivers/acpi/executer/exstore.c
+++ b/drivers/acpi/executer/exstore.c
@@ -82,7 +82,7 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
{
u32 i;
- ACPI_FUNCTION_TRACE_PTR("ex_do_debug_object", source_desc);
+ ACPI_FUNCTION_TRACE_PTR(ex_do_debug_object, source_desc);
ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[ACPI Debug] %*s",
level, " "));
@@ -245,7 +245,7 @@ acpi_ex_store(union acpi_operand_object *source_desc,
acpi_status status = AE_OK;
union acpi_operand_object *ref_desc = dest_desc;
- ACPI_FUNCTION_TRACE_PTR("ex_store", dest_desc);
+ ACPI_FUNCTION_TRACE_PTR(ex_store, dest_desc);
/* Validate parameters */
@@ -297,7 +297,7 @@ acpi_ex_store(union acpi_operand_object *source_desc,
ACPI_DUMP_STACK_ENTRY(source_desc);
ACPI_DUMP_STACK_ENTRY(dest_desc);
- ACPI_DUMP_OPERANDS(&dest_desc, ACPI_IMODE_EXECUTE, "ex_store",
+ ACPI_DUMP_OPERANDS(&dest_desc, ACPI_IMODE_EXECUTE, "ExStore",
2,
"Target is not a Reference or Constant object");
@@ -396,7 +396,7 @@ acpi_ex_store_object_to_index(union acpi_operand_object *source_desc,
u8 value = 0;
u32 i;
- ACPI_FUNCTION_TRACE("ex_store_object_to_index");
+ ACPI_FUNCTION_TRACE(ex_store_object_to_index);
/*
* Destination must be a reference pointer, and
@@ -423,6 +423,7 @@ acpi_ex_store_object_to_index(union acpi_operand_object *source_desc,
}
if (obj_desc) {
+
/* Decrement reference count by the ref count of the parent package */
for (i = 0; i < ((union acpi_operand_object *)
@@ -502,8 +503,7 @@ acpi_ex_store_object_to_index(union acpi_operand_object *source_desc,
break;
default:
- ACPI_ERROR((AE_INFO,
- "Target is not a Package or buffer_field"));
+ ACPI_ERROR((AE_INFO, "Target is not a Package or BufferField"));
status = AE_AML_OPERAND_TYPE;
break;
}
@@ -548,7 +548,7 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
union acpi_operand_object *new_desc;
acpi_object_type target_type;
- ACPI_FUNCTION_TRACE_PTR("ex_store_object_to_node", source_desc);
+ ACPI_FUNCTION_TRACE_PTR(ex_store_object_to_node, source_desc);
/* Get current type of the node, and object attached to Node */
@@ -572,6 +572,7 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
/* If no implicit conversion, drop into the default case below */
if ((!implicit_conversion) || (walk_state->opcode == AML_COPY_OP)) {
+
/* Force execution of default (no implicit conversion) */
target_type = ACPI_TYPE_ANY;
diff --git a/drivers/acpi/executer/exstoren.c b/drivers/acpi/executer/exstoren.c
index 42967baf760..591aaf0e18b 100644
--- a/drivers/acpi/executer/exstoren.c
+++ b/drivers/acpi/executer/exstoren.c
@@ -72,7 +72,7 @@ acpi_ex_resolve_object(union acpi_operand_object **source_desc_ptr,
union acpi_operand_object *source_desc = *source_desc_ptr;
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("ex_resolve_object");
+ ACPI_FUNCTION_TRACE(ex_resolve_object);
/* Ensure we have a Target that can be stored to */
@@ -97,6 +97,7 @@ acpi_ex_resolve_object(union acpi_operand_object **source_desc_ptr,
*/
if (ACPI_GET_OBJECT_TYPE(source_desc) ==
ACPI_TYPE_LOCAL_REFERENCE) {
+
/* Resolve a reference object first */
status =
@@ -121,6 +122,7 @@ acpi_ex_resolve_object(union acpi_operand_object **source_desc_ptr,
!((ACPI_GET_OBJECT_TYPE(source_desc) ==
ACPI_TYPE_LOCAL_REFERENCE)
&& (source_desc->reference.opcode == AML_LOAD_OP))) {
+
/* Conversion successful but still not a valid type */
ACPI_ERROR((AE_INFO,
@@ -199,7 +201,7 @@ acpi_ex_store_object_to_object(union acpi_operand_object *source_desc,
union acpi_operand_object *actual_src_desc;
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE_PTR("ex_store_object_to_object", source_desc);
+ ACPI_FUNCTION_TRACE_PTR(ex_store_object_to_object, source_desc);
actual_src_desc = source_desc;
if (!dest_desc) {
@@ -289,6 +291,7 @@ acpi_ex_store_object_to_object(union acpi_operand_object *source_desc,
}
if (actual_src_desc != source_desc) {
+
/* Delete the intermediate (temporary) source object */
acpi_ut_remove_reference(actual_src_desc);
diff --git a/drivers/acpi/executer/exstorob.c b/drivers/acpi/executer/exstorob.c
index 6ab70708775..99ebe5adfcd 100644
--- a/drivers/acpi/executer/exstorob.c
+++ b/drivers/acpi/executer/exstorob.c
@@ -67,7 +67,7 @@ acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc,
u32 length;
u8 *buffer;
- ACPI_FUNCTION_TRACE_PTR("ex_store_buffer_to_buffer", source_desc);
+ ACPI_FUNCTION_TRACE_PTR(ex_store_buffer_to_buffer, source_desc);
/* We know that source_desc is a buffer by now */
@@ -80,7 +80,7 @@ acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc,
*/
if ((target_desc->buffer.length == 0) ||
(target_desc->common.flags & AOPOBJ_STATIC_POINTER)) {
- target_desc->buffer.pointer = ACPI_MEM_ALLOCATE(length);
+ target_desc->buffer.pointer = ACPI_ALLOCATE(length);
if (!target_desc->buffer.pointer) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -91,6 +91,7 @@ acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc,
/* Copy source buffer to target buffer */
if (length <= target_desc->buffer.length) {
+
/* Clear existing buffer and copy in the new one */
ACPI_MEMSET(target_desc->buffer.pointer, 0,
@@ -102,7 +103,7 @@ acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc,
* NOTE: ACPI versions up to 3.0 specified that the buffer must be
* truncated if the string is smaller than the buffer. However, "other"
* implementations of ACPI never did this and thus became the defacto
- * standard. ACPi 3.0_a changes this behavior such that the buffer
+ * standard. ACPI 3.0_a changes this behavior such that the buffer
* is no longer truncated.
*/
@@ -113,6 +114,7 @@ acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc,
* copy must not truncate the original buffer.
*/
if (original_src_type == ACPI_TYPE_STRING) {
+
/* Set the new length of the target */
target_desc->buffer.length = length;
@@ -156,7 +158,7 @@ acpi_ex_store_string_to_string(union acpi_operand_object *source_desc,
u32 length;
u8 *buffer;
- ACPI_FUNCTION_TRACE_PTR("ex_store_string_to_string", source_desc);
+ ACPI_FUNCTION_TRACE_PTR(ex_store_string_to_string, source_desc);
/* We know that source_desc is a string by now */
@@ -183,13 +185,14 @@ acpi_ex_store_string_to_string(union acpi_operand_object *source_desc,
*/
if (target_desc->string.pointer &&
(!(target_desc->common.flags & AOPOBJ_STATIC_POINTER))) {
+
/* Only free if not a pointer into the DSDT */
- ACPI_MEM_FREE(target_desc->string.pointer);
+ ACPI_FREE(target_desc->string.pointer);
}
- target_desc->string.pointer = ACPI_MEM_CALLOCATE((acpi_size)
- length + 1);
+ target_desc->string.pointer = ACPI_ALLOCATE_ZEROED((acpi_size)
+ length + 1);
if (!target_desc->string.pointer) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
diff --git a/drivers/acpi/executer/exsystem.c b/drivers/acpi/executer/exsystem.c
index ea9144f42e1..52beee3674a 100644
--- a/drivers/acpi/executer/exsystem.c
+++ b/drivers/acpi/executer/exsystem.c
@@ -68,7 +68,7 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_handle semaphore, u16 timeout)
acpi_status status;
acpi_status status2;
- ACPI_FUNCTION_TRACE("ex_system_wait_semaphore");
+ ACPI_FUNCTION_TRACE(ex_system_wait_semaphore);
status = acpi_os_wait_semaphore(semaphore, 1, 0);
if (ACPI_SUCCESS(status)) {
@@ -76,6 +76,7 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_handle semaphore, u16 timeout)
}
if (status == AE_TIME) {
+
/* We must wait, so unlock the interpreter */
acpi_ex_exit_interpreter();
@@ -90,6 +91,7 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_handle semaphore, u16 timeout)
status2 = acpi_ex_enter_interpreter();
if (ACPI_FAILURE(status2)) {
+
/* Report fatal error, could not acquire interpreter */
return_ACPI_STATUS(status2);
@@ -191,7 +193,7 @@ acpi_ex_system_acquire_mutex(union acpi_operand_object * time_desc,
{
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE_PTR("ex_system_acquire_mutex", obj_desc);
+ ACPI_FUNCTION_TRACE_PTR(ex_system_acquire_mutex, obj_desc);
if (!obj_desc) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -229,7 +231,7 @@ acpi_status acpi_ex_system_release_mutex(union acpi_operand_object *obj_desc)
{
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("ex_system_release_mutex");
+ ACPI_FUNCTION_TRACE(ex_system_release_mutex);
if (!obj_desc) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -263,7 +265,7 @@ acpi_status acpi_ex_system_signal_event(union acpi_operand_object *obj_desc)
{
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("ex_system_signal_event");
+ ACPI_FUNCTION_TRACE(ex_system_signal_event);
if (obj_desc) {
status = acpi_os_signal_semaphore(obj_desc->event.semaphore, 1);
@@ -293,7 +295,7 @@ acpi_ex_system_wait_event(union acpi_operand_object *time_desc,
{
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("ex_system_wait_event");
+ ACPI_FUNCTION_TRACE(ex_system_wait_event);
if (obj_desc) {
status =
diff --git a/drivers/acpi/executer/exutils.c b/drivers/acpi/executer/exutils.c
index f73a61aeb7e..982c8b65876 100644
--- a/drivers/acpi/executer/exutils.c
+++ b/drivers/acpi/executer/exutils.c
@@ -87,9 +87,9 @@ acpi_status acpi_ex_enter_interpreter(void)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ex_enter_interpreter");
+ ACPI_FUNCTION_TRACE(ex_enter_interpreter);
- status = acpi_ut_acquire_mutex(ACPI_MTX_EXECUTE);
+ status = acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER);
if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO, "Could not acquire interpreter mutex"));
}
@@ -123,9 +123,9 @@ void acpi_ex_exit_interpreter(void)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ex_exit_interpreter");
+ ACPI_FUNCTION_TRACE(ex_exit_interpreter);
- status = acpi_ut_release_mutex(ACPI_MTX_EXECUTE);
+ status = acpi_ut_release_mutex(ACPI_MTX_INTERPRETER);
if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO, "Could not release interpreter mutex"));
}
@@ -189,11 +189,12 @@ u8 acpi_ex_acquire_global_lock(u32 field_flags)
u8 locked = FALSE;
acpi_status status;
- ACPI_FUNCTION_TRACE("ex_acquire_global_lock");
+ ACPI_FUNCTION_TRACE(ex_acquire_global_lock);
/* Only attempt lock if the always_lock bit is set */
if (field_flags & AML_FIELD_LOCK_RULE_MASK) {
+
/* We should attempt to get the lock, wait forever */
status = acpi_ev_acquire_global_lock(ACPI_WAIT_FOREVER);
@@ -225,15 +226,17 @@ void acpi_ex_release_global_lock(u8 locked_by_me)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ex_release_global_lock");
+ ACPI_FUNCTION_TRACE(ex_release_global_lock);
/* Only attempt unlock if the caller locked it */
if (locked_by_me) {
+
/* OK, now release the lock */
status = acpi_ev_release_global_lock();
if (ACPI_FAILURE(status)) {
+
/* Report the error, but there isn't much else we can do */
ACPI_EXCEPTION((AE_INFO, status,
@@ -263,7 +266,7 @@ static u32 acpi_ex_digits_needed(acpi_integer value, u32 base)
u32 num_digits;
acpi_integer current_value;
- ACPI_FUNCTION_TRACE("ex_digits_needed");
+ ACPI_FUNCTION_TRACE(ex_digits_needed);
/* acpi_integer is unsigned, so we don't worry about a '-' prefix */
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index e8165c4f162..1cd25784b7a 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -48,6 +48,8 @@ MODULE_LICENSE("GPL");
static int acpi_fan_add(struct acpi_device *device);
static int acpi_fan_remove(struct acpi_device *device, int type);
+static int acpi_fan_suspend(struct acpi_device *device, int state);
+static int acpi_fan_resume(struct acpi_device *device, int state);
static struct acpi_driver acpi_fan_driver = {
.name = ACPI_FAN_DRIVER_NAME,
@@ -56,6 +58,8 @@ static struct acpi_driver acpi_fan_driver = {
.ops = {
.add = acpi_fan_add,
.remove = acpi_fan_remove,
+ .suspend = acpi_fan_suspend,
+ .resume = acpi_fan_resume,
},
};
@@ -206,6 +210,10 @@ static int acpi_fan_add(struct acpi_device *device)
goto end;
}
+ device->flags.force_power_state = 1;
+ acpi_bus_set_power(device->handle, state);
+ device->flags.force_power_state = 0;
+
result = acpi_fan_add_fs(device);
if (result)
goto end;
@@ -239,6 +247,38 @@ static int acpi_fan_remove(struct acpi_device *device, int type)
return_VALUE(0);
}
+static int acpi_fan_suspend(struct acpi_device *device, int state)
+{
+ if (!device)
+ return -EINVAL;
+
+ acpi_bus_set_power(device->handle, ACPI_STATE_D0);
+
+ return AE_OK;
+}
+
+static int acpi_fan_resume(struct acpi_device *device, int state)
+{
+ int result = 0;
+ int power_state = 0;
+
+ if (!device)
+ return -EINVAL;
+
+ result = acpi_bus_get_power(device->handle, &power_state);
+ if (result) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ "Error reading fan power state\n"));
+ return result;
+ }
+
+ device->flags.force_power_state = 1;
+ acpi_bus_set_power(device->handle, power_state);
+ device->flags.force_power_state = 0;
+
+ return result;
+}
+
static int __init acpi_fan_init(void)
{
int result = 0;
diff --git a/drivers/acpi/hardware/hwacpi.c b/drivers/acpi/hardware/hwacpi.c
index ea2f13271ff..de50fab2a91 100644
--- a/drivers/acpi/hardware/hwacpi.c
+++ b/drivers/acpi/hardware/hwacpi.c
@@ -63,7 +63,7 @@ acpi_status acpi_hw_initialize(void)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("hw_initialize");
+ ACPI_FUNCTION_TRACE(hw_initialize);
/* We must have the ACPI tables by the time we get here */
@@ -100,7 +100,7 @@ acpi_status acpi_hw_set_mode(u32 mode)
acpi_status status;
u32 retry;
- ACPI_FUNCTION_TRACE("hw_set_mode");
+ ACPI_FUNCTION_TRACE(hw_set_mode);
/*
* ACPI 2.0 clarified that if SMI_CMD in FADT is zero,
@@ -198,7 +198,7 @@ u32 acpi_hw_get_mode(void)
acpi_status status;
u32 value;
- ACPI_FUNCTION_TRACE("hw_get_mode");
+ ACPI_FUNCTION_TRACE(hw_get_mode);
/*
* ACPI 2.0 clarified that if SMI_CMD in FADT is zero,
diff --git a/drivers/acpi/hardware/hwgpe.c b/drivers/acpi/hardware/hwgpe.c
index d84942d22dd..608a3a60ee1 100644
--- a/drivers/acpi/hardware/hwgpe.c
+++ b/drivers/acpi/hardware/hwgpe.c
@@ -214,6 +214,7 @@ acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info * gpe_xrupt_info,
/* Examine each GPE Register within the block */
for (i = 0; i < gpe_block->register_count; i++) {
+
/* Disable all GPEs in this register */
status = acpi_hw_low_level_write(8, 0x00,
@@ -250,6 +251,7 @@ acpi_hw_clear_gpe_block(struct acpi_gpe_xrupt_info * gpe_xrupt_info,
/* Examine each GPE Register within the block */
for (i = 0; i < gpe_block->register_count; i++) {
+
/* Clear status on all GPEs in this register */
status = acpi_hw_low_level_write(8, 0xFF,
@@ -368,7 +370,7 @@ acpi_status acpi_hw_disable_all_gpes(void)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("hw_disable_all_gpes");
+ ACPI_FUNCTION_TRACE(hw_disable_all_gpes);
status = acpi_ev_walk_gpe_list(acpi_hw_disable_gpe_block);
status = acpi_ev_walk_gpe_list(acpi_hw_clear_gpe_block);
@@ -391,7 +393,7 @@ acpi_status acpi_hw_enable_all_runtime_gpes(void)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("hw_enable_all_runtime_gpes");
+ ACPI_FUNCTION_TRACE(hw_enable_all_runtime_gpes);
status = acpi_ev_walk_gpe_list(acpi_hw_enable_runtime_gpe_block);
return_ACPI_STATUS(status);
@@ -413,7 +415,7 @@ acpi_status acpi_hw_enable_all_wakeup_gpes(void)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("hw_enable_all_wakeup_gpes");
+ ACPI_FUNCTION_TRACE(hw_enable_all_wakeup_gpes);
status = acpi_ev_walk_gpe_list(acpi_hw_enable_wakeup_gpe_block);
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/hardware/hwregs.c b/drivers/acpi/hardware/hwregs.c
index e1fe7549841..ae142de1950 100644
--- a/drivers/acpi/hardware/hwregs.c
+++ b/drivers/acpi/hardware/hwregs.c
@@ -43,8 +43,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/module.h>
-
#include <acpi/acpi.h>
#include <acpi/acnamesp.h>
#include <acpi/acevents.h>
@@ -63,23 +61,21 @@ ACPI_MODULE_NAME("hwregs")
* DESCRIPTION: Clears all fixed and general purpose status bits
* THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
*
+ * NOTE: TBD: Flags parameter is obsolete, to be removed
+ *
******************************************************************************/
acpi_status acpi_hw_clear_acpi_status(u32 flags)
{
acpi_status status;
+ acpi_cpu_flags lock_flags = 0;
- ACPI_FUNCTION_TRACE("hw_clear_acpi_status");
+ ACPI_FUNCTION_TRACE(hw_clear_acpi_status);
ACPI_DEBUG_PRINT((ACPI_DB_IO, "About to write %04X to %04X\n",
ACPI_BITMASK_ALL_FIXED_STATUS,
(u16) acpi_gbl_FADT->xpm1a_evt_blk.address));
- if (flags & ACPI_MTX_LOCK) {
- status = acpi_ut_acquire_mutex(ACPI_MTX_HARDWARE);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
+ lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
ACPI_REGISTER_PM1_STATUS,
@@ -104,9 +100,7 @@ acpi_status acpi_hw_clear_acpi_status(u32 flags)
status = acpi_ev_walk_gpe_list(acpi_hw_clear_gpe_block);
unlock_and_exit:
- if (flags & ACPI_MTX_LOCK) {
- (void)acpi_ut_release_mutex(ACPI_MTX_HARDWARE);
- }
+ acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
return_ACPI_STATUS(status);
}
@@ -129,10 +123,9 @@ acpi_status
acpi_get_sleep_type_data(u8 sleep_state, u8 * sleep_type_a, u8 * sleep_type_b)
{
acpi_status status = AE_OK;
- struct acpi_parameter_info info;
- char *sleep_state_name;
+ struct acpi_evaluate_info *info;
- ACPI_FUNCTION_TRACE("acpi_get_sleep_type_data");
+ ACPI_FUNCTION_TRACE(acpi_get_sleep_type_data);
/* Validate parameters */
@@ -140,34 +133,39 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 * sleep_type_a, u8 * sleep_type_b)
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- /* Evaluate the namespace object containing the values for this state */
+ /* Allocate the evaluation information block */
+
+ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+ if (!info) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
- info.parameters = NULL;
- info.return_object = NULL;
- sleep_state_name =
+ info->pathname =
ACPI_CAST_PTR(char, acpi_gbl_sleep_state_names[sleep_state]);
- status = acpi_ns_evaluate_by_name(sleep_state_name, &info);
+ /* Evaluate the namespace object containing the values for this state */
+
+ status = acpi_ns_evaluate(info);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "%s while evaluating sleep_state [%s]\n",
+ "%s while evaluating SleepState [%s]\n",
acpi_format_exception(status),
- sleep_state_name));
+ info->pathname));
- return_ACPI_STATUS(status);
+ goto cleanup;
}
/* Must have a return object */
- if (!info.return_object) {
+ if (!info->return_object) {
ACPI_ERROR((AE_INFO, "No Sleep State object returned from [%s]",
- sleep_state_name));
+ info->pathname));
status = AE_NOT_EXIST;
}
/* It must be of type Package */
- else if (ACPI_GET_OBJECT_TYPE(info.return_object) != ACPI_TYPE_PACKAGE) {
+ else if (ACPI_GET_OBJECT_TYPE(info->return_object) != ACPI_TYPE_PACKAGE) {
ACPI_ERROR((AE_INFO,
"Sleep State return object is not a Package"));
status = AE_AML_OPERAND_TYPE;
@@ -180,7 +178,7 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 * sleep_type_a, u8 * sleep_type_b)
* by BIOS vendors seems to be to have 2 or more elements, at least
* one per sleep type (A/B).
*/
- else if (info.return_object->package.count < 2) {
+ else if (info->return_object->package.count < 2) {
ACPI_ERROR((AE_INFO,
"Sleep State return package does not have at least two elements"));
status = AE_AML_NO_OPERAND;
@@ -188,39 +186,42 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 * sleep_type_a, u8 * sleep_type_b)
/* The first two elements must both be of type Integer */
- else if ((ACPI_GET_OBJECT_TYPE(info.return_object->package.elements[0])
+ else if ((ACPI_GET_OBJECT_TYPE(info->return_object->package.elements[0])
!= ACPI_TYPE_INTEGER) ||
- (ACPI_GET_OBJECT_TYPE(info.return_object->package.elements[1])
+ (ACPI_GET_OBJECT_TYPE(info->return_object->package.elements[1])
!= ACPI_TYPE_INTEGER)) {
ACPI_ERROR((AE_INFO,
"Sleep State return package elements are not both Integers (%s, %s)",
- acpi_ut_get_object_type_name(info.return_object->
+ acpi_ut_get_object_type_name(info->return_object->
package.elements[0]),
- acpi_ut_get_object_type_name(info.return_object->
+ acpi_ut_get_object_type_name(info->return_object->
package.elements[1])));
status = AE_AML_OPERAND_TYPE;
} else {
/* Valid _Sx_ package size, type, and value */
*sleep_type_a = (u8)
- (info.return_object->package.elements[0])->integer.value;
+ (info->return_object->package.elements[0])->integer.value;
*sleep_type_b = (u8)
- (info.return_object->package.elements[1])->integer.value;
+ (info->return_object->package.elements[1])->integer.value;
}
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
- "While evaluating sleep_state [%s], bad Sleep object %p type %s",
- sleep_state_name, info.return_object,
- acpi_ut_get_object_type_name(info.
+ "While evaluating SleepState [%s], bad Sleep object %p type %s",
+ info->pathname, info->return_object,
+ acpi_ut_get_object_type_name(info->
return_object)));
}
- acpi_ut_remove_reference(info.return_object);
+ acpi_ut_remove_reference(info->return_object);
+
+ cleanup:
+ ACPI_FREE(info);
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_get_sleep_type_data);
+ACPI_EXPORT_SYMBOL(acpi_get_sleep_type_data)
/*******************************************************************************
*
@@ -233,13 +234,12 @@ EXPORT_SYMBOL(acpi_get_sleep_type_data);
* DESCRIPTION: Map register_id into a register bitmask.
*
******************************************************************************/
-
struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id)
{
ACPI_FUNCTION_ENTRY();
if (register_id > ACPI_BITREG_MAX) {
- ACPI_ERROR((AE_INFO, "Invalid bit_register ID: %X",
+ ACPI_ERROR((AE_INFO, "Invalid BitRegister ID: %X",
register_id));
return (NULL);
}
@@ -260,6 +260,8 @@ struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id)
*
* DESCRIPTION: ACPI bit_register read function.
*
+ * NOTE: TBD: Flags parameter is obsolete, to be removed
+ *
******************************************************************************/
acpi_status acpi_get_register(u32 register_id, u32 * return_value, u32 flags)
@@ -268,7 +270,7 @@ acpi_status acpi_get_register(u32 register_id, u32 * return_value, u32 flags)
struct acpi_bit_register_info *bit_reg_info;
acpi_status status;
- ACPI_FUNCTION_TRACE("acpi_get_register");
+ ACPI_FUNCTION_TRACE(acpi_get_register);
/* Get the info structure corresponding to the requested ACPI Register */
@@ -277,24 +279,14 @@ acpi_status acpi_get_register(u32 register_id, u32 * return_value, u32 flags)
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- if (flags & ACPI_MTX_LOCK) {
- status = acpi_ut_acquire_mutex(ACPI_MTX_HARDWARE);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
-
/* Read from the register */
- status = acpi_hw_register_read(ACPI_MTX_DO_NOT_LOCK,
+ status = acpi_hw_register_read(ACPI_MTX_LOCK,
bit_reg_info->parent_register,
&register_value);
- if (flags & ACPI_MTX_LOCK) {
- (void)acpi_ut_release_mutex(ACPI_MTX_HARDWARE);
- }
-
if (ACPI_SUCCESS(status)) {
+
/* Normalize the value that was read */
register_value =
@@ -311,7 +303,7 @@ acpi_status acpi_get_register(u32 register_id, u32 * return_value, u32 flags)
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_get_register);
+ACPI_EXPORT_SYMBOL(acpi_get_register)
/*******************************************************************************
*
@@ -326,31 +318,28 @@ EXPORT_SYMBOL(acpi_get_register);
*
* DESCRIPTION: ACPI Bit Register write function.
*
+ * NOTE: TBD: Flags parameter is obsolete, to be removed
+ *
******************************************************************************/
-
acpi_status acpi_set_register(u32 register_id, u32 value, u32 flags)
{
u32 register_value = 0;
struct acpi_bit_register_info *bit_reg_info;
acpi_status status;
+ acpi_cpu_flags lock_flags;
- ACPI_FUNCTION_TRACE_U32("acpi_set_register", register_id);
+ ACPI_FUNCTION_TRACE_U32(acpi_set_register, register_id);
/* Get the info structure corresponding to the requested ACPI Register */
bit_reg_info = acpi_hw_get_bit_register_info(register_id);
if (!bit_reg_info) {
- ACPI_ERROR((AE_INFO, "Bad ACPI HW register_id: %X",
+ ACPI_ERROR((AE_INFO, "Bad ACPI HW RegisterId: %X",
register_id));
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- if (flags & ACPI_MTX_LOCK) {
- status = acpi_ut_acquire_mutex(ACPI_MTX_HARDWARE);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
+ lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
/* Always do a register read first so we can insert the new bits */
@@ -458,9 +447,7 @@ acpi_status acpi_set_register(u32 register_id, u32 value, u32 flags)
unlock_and_exit:
- if (flags & ACPI_MTX_LOCK) {
- (void)acpi_ut_release_mutex(ACPI_MTX_HARDWARE);
- }
+ acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
/* Normalize the value that was read */
@@ -474,7 +461,7 @@ acpi_status acpi_set_register(u32 register_id, u32 value, u32 flags)
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_set_register);
+ACPI_EXPORT_SYMBOL(acpi_set_register)
/******************************************************************************
*
@@ -490,21 +477,18 @@ EXPORT_SYMBOL(acpi_set_register);
* given offset.
*
******************************************************************************/
-
acpi_status
acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
{
u32 value1 = 0;
u32 value2 = 0;
acpi_status status;
+ acpi_cpu_flags lock_flags = 0;
- ACPI_FUNCTION_TRACE("hw_register_read");
+ ACPI_FUNCTION_TRACE(hw_register_read);
if (ACPI_MTX_LOCK == use_lock) {
- status = acpi_ut_acquire_mutex(ACPI_MTX_HARDWARE);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
+ lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
}
switch (register_id) {
@@ -582,7 +566,7 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
unlock_and_exit:
if (ACPI_MTX_LOCK == use_lock) {
- (void)acpi_ut_release_mutex(ACPI_MTX_HARDWARE);
+ acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
}
if (ACPI_SUCCESS(status)) {
@@ -610,14 +594,12 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value)
{
acpi_status status;
+ acpi_cpu_flags lock_flags = 0;
- ACPI_FUNCTION_TRACE("hw_register_write");
+ ACPI_FUNCTION_TRACE(hw_register_write);
if (ACPI_MTX_LOCK == use_lock) {
- status = acpi_ut_acquire_mutex(ACPI_MTX_HARDWARE);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
+ lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
}
switch (register_id) {
@@ -707,7 +689,7 @@ acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value)
unlock_and_exit:
if (ACPI_MTX_LOCK == use_lock) {
- (void)acpi_ut_release_mutex(ACPI_MTX_HARDWARE);
+ acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
}
return_ACPI_STATUS(status);
@@ -733,7 +715,7 @@ acpi_hw_low_level_read(u32 width, u32 * value, struct acpi_generic_address *reg)
u64 address;
acpi_status status;
- ACPI_FUNCTION_NAME("hw_low_level_read");
+ ACPI_FUNCTION_NAME(hw_low_level_read);
/*
* Must have a valid pointer to a GAS structure, and
@@ -805,7 +787,7 @@ acpi_hw_low_level_write(u32 width, u32 value, struct acpi_generic_address * reg)
u64 address;
acpi_status status;
- ACPI_FUNCTION_NAME("hw_low_level_write");
+ ACPI_FUNCTION_NAME(hw_low_level_write);
/*
* Must have a valid pointer to a GAS structure, and
diff --git a/drivers/acpi/hardware/hwsleep.c b/drivers/acpi/hardware/hwsleep.c
index 89269272fd6..8bb43cae60c 100644
--- a/drivers/acpi/hardware/hwsleep.c
+++ b/drivers/acpi/hardware/hwsleep.c
@@ -42,7 +42,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/module.h>
#include <acpi/acpi.h>
#define _COMPONENT ACPI_HARDWARE
@@ -64,7 +63,7 @@ acpi_status
acpi_set_firmware_waking_vector(acpi_physical_address physical_address)
{
- ACPI_FUNCTION_TRACE("acpi_set_firmware_waking_vector");
+ ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector);
/* Set the vector */
@@ -79,6 +78,8 @@ acpi_set_firmware_waking_vector(acpi_physical_address physical_address)
return_ACPI_STATUS(AE_OK);
}
+ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector)
+
/*******************************************************************************
*
* FUNCTION: acpi_get_firmware_waking_vector
@@ -92,13 +93,12 @@ acpi_set_firmware_waking_vector(acpi_physical_address physical_address)
* DESCRIPTION: Access function for the firmware_waking_vector field in FACS
*
******************************************************************************/
-
#ifdef ACPI_FUTURE_USAGE
acpi_status
acpi_get_firmware_waking_vector(acpi_physical_address * physical_address)
{
- ACPI_FUNCTION_TRACE("acpi_get_firmware_waking_vector");
+ ACPI_FUNCTION_TRACE(acpi_get_firmware_waking_vector);
if (!physical_address) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -118,6 +118,8 @@ acpi_get_firmware_waking_vector(acpi_physical_address * physical_address)
return_ACPI_STATUS(AE_OK);
}
+
+ACPI_EXPORT_SYMBOL(acpi_get_firmware_waking_vector)
#endif
/*******************************************************************************
@@ -134,14 +136,13 @@ acpi_get_firmware_waking_vector(acpi_physical_address * physical_address)
* various OS-specific tasks between the two steps.
*
******************************************************************************/
-
acpi_status acpi_enter_sleep_state_prep(u8 sleep_state)
{
acpi_status status;
struct acpi_object_list arg_list;
union acpi_object arg;
- ACPI_FUNCTION_TRACE("acpi_enter_sleep_state_prep");
+ ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_prep);
/*
* _PSW methods could be run here to enable wake-on keyboard, LAN, etc.
@@ -206,6 +207,8 @@ acpi_status acpi_enter_sleep_state_prep(u8 sleep_state)
return_ACPI_STATUS(AE_OK);
}
+ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep)
+
/*******************************************************************************
*
* FUNCTION: acpi_enter_sleep_state
@@ -218,7 +221,6 @@ acpi_status acpi_enter_sleep_state_prep(u8 sleep_state)
* THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
*
******************************************************************************/
-
acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
{
u32 PM1Acontrol;
@@ -228,7 +230,7 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
u32 in_value;
acpi_status status;
- ACPI_FUNCTION_TRACE("acpi_enter_sleep_state");
+ ACPI_FUNCTION_TRACE(acpi_enter_sleep_state);
if ((acpi_gbl_sleep_type_a > ACPI_SLEEP_TYPE_MAX) ||
(acpi_gbl_sleep_type_b > ACPI_SLEEP_TYPE_MAX)) {
@@ -378,7 +380,7 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
return_ACPI_STATUS(AE_OK);
}
-EXPORT_SYMBOL(acpi_enter_sleep_state);
+ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state)
/*******************************************************************************
*
@@ -392,13 +394,12 @@ EXPORT_SYMBOL(acpi_enter_sleep_state);
* THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
*
******************************************************************************/
-
acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void)
{
u32 in_value;
acpi_status status;
- ACPI_FUNCTION_TRACE("acpi_enter_sleep_state_s4bios");
+ ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_s4bios);
status =
acpi_set_register(ACPI_BITREG_WAKE_STATUS, 1, ACPI_MTX_DO_NOT_LOCK);
@@ -443,7 +444,7 @@ acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void)
return_ACPI_STATUS(AE_OK);
}
-EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios);
+ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios)
/*******************************************************************************
*
@@ -457,7 +458,6 @@ EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios);
* Called with interrupts ENABLED.
*
******************************************************************************/
-
acpi_status acpi_leave_sleep_state(u8 sleep_state)
{
struct acpi_object_list arg_list;
@@ -468,7 +468,7 @@ acpi_status acpi_leave_sleep_state(u8 sleep_state)
u32 PM1Acontrol;
u32 PM1Bcontrol;
- ACPI_FUNCTION_TRACE("acpi_leave_sleep_state");
+ ACPI_FUNCTION_TRACE(acpi_leave_sleep_state);
/*
* Set SLP_TYPE and SLP_EN to state S0.
@@ -490,6 +490,7 @@ acpi_status acpi_leave_sleep_state(u8 sleep_state)
ACPI_REGISTER_PM1_CONTROL,
&PM1Acontrol);
if (ACPI_SUCCESS(status)) {
+
/* Clear SLP_EN and SLP_TYP fields */
PM1Acontrol &= ~(sleep_type_reg_info->access_bit_mask |
@@ -583,3 +584,5 @@ acpi_status acpi_leave_sleep_state(u8 sleep_state)
return_ACPI_STATUS(status);
}
+
+ACPI_EXPORT_SYMBOL(acpi_leave_sleep_state)
diff --git a/drivers/acpi/hardware/hwtimer.c b/drivers/acpi/hardware/hwtimer.c
index fc10b7cb456..c4ec47c939f 100644
--- a/drivers/acpi/hardware/hwtimer.c
+++ b/drivers/acpi/hardware/hwtimer.c
@@ -42,7 +42,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/module.h>
#include <acpi/acpi.h>
#define _COMPONENT ACPI_HARDWARE
@@ -61,13 +60,13 @@ ACPI_MODULE_NAME("hwtimer")
******************************************************************************/
acpi_status acpi_get_timer_resolution(u32 * resolution)
{
- ACPI_FUNCTION_TRACE("acpi_get_timer_resolution");
+ ACPI_FUNCTION_TRACE(acpi_get_timer_resolution);
if (!resolution) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- if (0 == acpi_gbl_FADT->tmr_val_ext) {
+ if (acpi_gbl_FADT->tmr_val_ext == 0) {
*resolution = 24;
} else {
*resolution = 32;
@@ -76,6 +75,8 @@ acpi_status acpi_get_timer_resolution(u32 * resolution)
return_ACPI_STATUS(AE_OK);
}
+ACPI_EXPORT_SYMBOL(acpi_get_timer_resolution)
+
/******************************************************************************
*
* FUNCTION: acpi_get_timer
@@ -87,12 +88,11 @@ acpi_status acpi_get_timer_resolution(u32 * resolution)
* DESCRIPTION: Obtains current value of ACPI PM Timer (in ticks).
*
******************************************************************************/
-
acpi_status acpi_get_timer(u32 * ticks)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("acpi_get_timer");
+ ACPI_FUNCTION_TRACE(acpi_get_timer);
if (!ticks) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -103,7 +103,7 @@ acpi_status acpi_get_timer(u32 * ticks)
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_get_timer);
+ACPI_EXPORT_SYMBOL(acpi_get_timer)
/******************************************************************************
*
@@ -133,7 +133,6 @@ EXPORT_SYMBOL(acpi_get_timer);
* 2**32 Ticks / 3,600,000 Ticks/Sec = 1193 sec or 19.88 minutes
*
******************************************************************************/
-
acpi_status
acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
{
@@ -141,7 +140,7 @@ acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
u32 delta_ticks;
acpi_integer quotient;
- ACPI_FUNCTION_TRACE("acpi_get_timer_duration");
+ ACPI_FUNCTION_TRACE(acpi_get_timer_duration);
if (!time_elapsed) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -154,7 +153,8 @@ acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
if (start_ticks < end_ticks) {
delta_ticks = end_ticks - start_ticks;
} else if (start_ticks > end_ticks) {
- if (0 == acpi_gbl_FADT->tmr_val_ext) {
+ if (acpi_gbl_FADT->tmr_val_ext == 0) {
+
/* 24-bit Timer */
delta_ticks =
@@ -183,4 +183,4 @@ acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_get_timer_duration);
+ACPI_EXPORT_SYMBOL(acpi_get_timer_duration)
diff --git a/drivers/acpi/hotkey.c b/drivers/acpi/hotkey.c
index 2e2e4051dfa..c25b2b92edc 100644
--- a/drivers/acpi/hotkey.c
+++ b/drivers/acpi/hotkey.c
@@ -723,6 +723,8 @@ get_parms(char *config_record,
goto do_fail;
count = tmp1 - tmp;
*action_handle = (char *)kmalloc(count + 1, GFP_KERNEL);
+ if (!*action_handle)
+ goto do_fail;
strncpy(*action_handle, tmp, count);
*(*action_handle + count) = 0;
diff --git a/drivers/acpi/ibm_acpi.c b/drivers/acpi/ibm_acpi.c
index 262b1f41335..15fc12482ba 100644
--- a/drivers/acpi/ibm_acpi.c
+++ b/drivers/acpi/ibm_acpi.c
@@ -567,6 +567,69 @@ static int bluetooth_write(char *buf)
return 0;
}
+static int wan_supported;
+
+static int wan_init(void)
+{
+ wan_supported = hkey_handle &&
+ acpi_evalf(hkey_handle, NULL, "GWAN", "qv");
+
+ return 0;
+}
+
+static int wan_status(void)
+{
+ int status;
+
+ if (!wan_supported ||
+ !acpi_evalf(hkey_handle, &status, "GWAN", "d"))
+ status = 0;
+
+ return status;
+}
+
+static int wan_read(char *p)
+{
+ int len = 0;
+ int status = wan_status();
+
+ if (!wan_supported)
+ len += sprintf(p + len, "status:\t\tnot supported\n");
+ else if (!(status & 1))
+ len += sprintf(p + len, "status:\t\tnot installed\n");
+ else {
+ len += sprintf(p + len, "status:\t\t%s\n", enabled(status, 1));
+ len += sprintf(p + len, "commands:\tenable, disable\n");
+ }
+
+ return len;
+}
+
+static int wan_write(char *buf)
+{
+ int status = wan_status();
+ char *cmd;
+ int do_cmd = 0;
+
+ if (!wan_supported)
+ return -ENODEV;
+
+ while ((cmd = next_cmd(&buf))) {
+ if (strlencmp(cmd, "enable") == 0) {
+ status |= 2;
+ } else if (strlencmp(cmd, "disable") == 0) {
+ status &= ~2;
+ } else
+ return -EINVAL;
+ do_cmd = 1;
+ }
+
+ if (do_cmd && !acpi_evalf(hkey_handle, NULL, "SWAN", "vd", status))
+ return -EIO;
+
+ return 0;
+}
+
static int video_supported;
static int video_orig_autosw;
@@ -1563,6 +1626,13 @@ static struct ibm_struct ibms[] = {
.write = bluetooth_write,
},
{
+ .name = "wan",
+ .init = wan_init,
+ .read = wan_read,
+ .write = wan_write,
+ .experimental = 1,
+ },
+ {
.name = "video",
.init = video_init,
.read = video_read,
diff --git a/drivers/acpi/motherboard.c b/drivers/acpi/motherboard.c
index 468244147ec..d51d68f5dd8 100644
--- a/drivers/acpi/motherboard.c
+++ b/drivers/acpi/motherboard.c
@@ -37,7 +37,7 @@ ACPI_MODULE_NAME("acpi_motherboard")
#define ACPI_MB_HID2 "PNP0C02"
/**
* Doesn't care about legacy IO ports, only IO ports beyond 0x1000 are reserved
- * Doesn't care about the failure of 'request_region', since other may reserve
+ * Doesn't care about the failure of 'request_region', since other may reserve
* the io ports as well
*/
#define IS_RESERVED_ADDR(base, len) \
@@ -46,7 +46,7 @@ ACPI_MODULE_NAME("acpi_motherboard")
/*
* Clearing the flag (IORESOURCE_BUSY) allows drivers to use
* the io ports if they really know they can use it, while
- * still preventing hotplug PCI devices from using it.
+ * still preventing hotplug PCI devices from using it.
*/
static acpi_status acpi_reserve_io_ranges(struct acpi_resource *res, void *data)
{
@@ -123,49 +123,54 @@ static struct acpi_driver acpi_motherboard_driver2 = {
},
};
+static void __init acpi_request_region (struct acpi_generic_address *addr,
+ unsigned int length, char *desc)
+{
+ if (!addr->address || !length)
+ return;
+
+ if (addr->address_space_id == ACPI_ADR_SPACE_SYSTEM_IO)
+ request_region(addr->address, length, desc);
+ else if (addr->address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ request_mem_region(addr->address, length, desc);
+}
+
static void __init acpi_reserve_resources(void)
{
- if (acpi_gbl_FADT->xpm1a_evt_blk.address && acpi_gbl_FADT->pm1_evt_len)
- request_region(acpi_gbl_FADT->xpm1a_evt_blk.address,
- acpi_gbl_FADT->pm1_evt_len, "PM1a_EVT_BLK");
+ acpi_request_region(&acpi_gbl_FADT->xpm1a_evt_blk,
+ acpi_gbl_FADT->pm1_evt_len, "ACPI PM1a_EVT_BLK");
- if (acpi_gbl_FADT->xpm1b_evt_blk.address && acpi_gbl_FADT->pm1_evt_len)
- request_region(acpi_gbl_FADT->xpm1b_evt_blk.address,
- acpi_gbl_FADT->pm1_evt_len, "PM1b_EVT_BLK");
+ acpi_request_region(&acpi_gbl_FADT->xpm1b_evt_blk,
+ acpi_gbl_FADT->pm1_evt_len, "ACPI PM1b_EVT_BLK");
- if (acpi_gbl_FADT->xpm1a_cnt_blk.address && acpi_gbl_FADT->pm1_cnt_len)
- request_region(acpi_gbl_FADT->xpm1a_cnt_blk.address,
- acpi_gbl_FADT->pm1_cnt_len, "PM1a_CNT_BLK");
+ acpi_request_region(&acpi_gbl_FADT->xpm1a_cnt_blk,
+ acpi_gbl_FADT->pm1_cnt_len, "ACPI PM1a_CNT_BLK");
- if (acpi_gbl_FADT->xpm1b_cnt_blk.address && acpi_gbl_FADT->pm1_cnt_len)
- request_region(acpi_gbl_FADT->xpm1b_cnt_blk.address,
- acpi_gbl_FADT->pm1_cnt_len, "PM1b_CNT_BLK");
+ acpi_request_region(&acpi_gbl_FADT->xpm1b_cnt_blk,
+ acpi_gbl_FADT->pm1_cnt_len, "ACPI PM1b_CNT_BLK");
- if (acpi_gbl_FADT->xpm_tmr_blk.address && acpi_gbl_FADT->pm_tm_len == 4)
- request_region(acpi_gbl_FADT->xpm_tmr_blk.address, 4, "PM_TMR");
+ if (acpi_gbl_FADT->pm_tm_len == 4)
+ acpi_request_region(&acpi_gbl_FADT->xpm_tmr_blk, 4, "ACPI PM_TMR");
- if (acpi_gbl_FADT->xpm2_cnt_blk.address && acpi_gbl_FADT->pm2_cnt_len)
- request_region(acpi_gbl_FADT->xpm2_cnt_blk.address,
- acpi_gbl_FADT->pm2_cnt_len, "PM2_CNT_BLK");
+ acpi_request_region(&acpi_gbl_FADT->xpm2_cnt_blk,
+ acpi_gbl_FADT->pm2_cnt_len, "ACPI PM2_CNT_BLK");
/* Length of GPE blocks must be a non-negative multiple of 2 */
- if (acpi_gbl_FADT->xgpe0_blk.address && acpi_gbl_FADT->gpe0_blk_len &&
- !(acpi_gbl_FADT->gpe0_blk_len & 0x1))
- request_region(acpi_gbl_FADT->xgpe0_blk.address,
- acpi_gbl_FADT->gpe0_blk_len, "GPE0_BLK");
+ if (!(acpi_gbl_FADT->gpe0_blk_len & 0x1))
+ acpi_request_region(&acpi_gbl_FADT->xgpe0_blk,
+ acpi_gbl_FADT->gpe0_blk_len, "ACPI GPE0_BLK");
- if (acpi_gbl_FADT->xgpe1_blk.address && acpi_gbl_FADT->gpe1_blk_len &&
- !(acpi_gbl_FADT->gpe1_blk_len & 0x1))
- request_region(acpi_gbl_FADT->xgpe1_blk.address,
- acpi_gbl_FADT->gpe1_blk_len, "GPE1_BLK");
+ if (!(acpi_gbl_FADT->gpe1_blk_len & 0x1))
+ acpi_request_region(&acpi_gbl_FADT->xgpe1_blk,
+ acpi_gbl_FADT->gpe1_blk_len, "ACPI GPE1_BLK");
}
static int __init acpi_motherboard_init(void)
{
acpi_bus_register_driver(&acpi_motherboard_driver1);
acpi_bus_register_driver(&acpi_motherboard_driver2);
- /*
+ /*
* Guarantee motherboard IO reservation first
* This module must run after scan.c
*/
diff --git a/drivers/acpi/namespace/nsaccess.c b/drivers/acpi/namespace/nsaccess.c
index 1149bc18fb3..48fadade52e 100644
--- a/drivers/acpi/namespace/nsaccess.c
+++ b/drivers/acpi/namespace/nsaccess.c
@@ -70,7 +70,7 @@ acpi_status acpi_ns_root_initialize(void)
union acpi_operand_object *obj_desc;
acpi_string val = NULL;
- ACPI_FUNCTION_TRACE("ns_root_initialize");
+ ACPI_FUNCTION_TRACE(ns_root_initialize);
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
@@ -98,6 +98,7 @@ acpi_status acpi_ns_root_initialize(void)
"Entering predefined entries into namespace\n"));
for (init_val = acpi_gbl_pre_defined_names; init_val->name; init_val++) {
+
/* _OSI is optional for now, will be permanent later */
if (!ACPI_STRCMP(init_val->name, "_OSI")
@@ -156,7 +157,7 @@ acpi_status acpi_ns_root_initialize(void)
#if defined (ACPI_ASL_COMPILER)
- /* save the parameter count for the i_aSL compiler */
+ /* Save the parameter count for the i_aSL compiler */
new_node->value = obj_desc->method.param_count;
#else
@@ -258,10 +259,8 @@ acpi_status acpi_ns_root_initialize(void)
/* Save a handle to "_GPE", it is always present */
if (ACPI_SUCCESS(status)) {
- status =
- acpi_ns_get_node_by_path("\\_GPE", NULL,
- ACPI_NS_NO_UPSEARCH,
- &acpi_gbl_fadt_gpe_device);
+ status = acpi_ns_get_node(NULL, "\\_GPE", ACPI_NS_NO_UPSEARCH,
+ &acpi_gbl_fadt_gpe_device);
}
return_ACPI_STATUS(status);
@@ -310,17 +309,17 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
acpi_object_type type_to_check_for;
acpi_object_type this_search_type;
u32 search_parent_flag = ACPI_NS_SEARCH_PARENT;
- u32 local_flags = flags & ~(ACPI_NS_ERROR_IF_FOUND |
- ACPI_NS_SEARCH_PARENT);
+ u32 local_flags;
- ACPI_FUNCTION_TRACE("ns_lookup");
+ ACPI_FUNCTION_TRACE(ns_lookup);
if (!return_node) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- acpi_gbl_ns_lookup_count++;
+ local_flags = flags & ~(ACPI_NS_ERROR_IF_FOUND | ACPI_NS_SEARCH_PARENT);
*return_node = ACPI_ENTRY_NOT_FOUND;
+ acpi_gbl_ns_lookup_count++;
if (!acpi_gbl_root_node) {
return_ACPI_STATUS(AE_NO_NAMESPACE);
@@ -346,14 +345,17 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
return_ACPI_STATUS(AE_AML_INTERNAL);
}
- /*
- * This node might not be a actual "scope" node (such as a
- * Device/Method, etc.) It could be a Package or other object node.
- * Backup up the tree to find the containing scope node.
- */
- while (!acpi_ns_opens_scope(prefix_node->type) &&
- prefix_node->type != ACPI_TYPE_ANY) {
- prefix_node = acpi_ns_get_parent_node(prefix_node);
+ if (!(flags & ACPI_NS_PREFIX_IS_SCOPE)) {
+ /*
+ * This node might not be a actual "scope" node (such as a
+ * Device/Method, etc.) It could be a Package or other object node.
+ * Backup up the tree to find the containing scope node.
+ */
+ while (!acpi_ns_opens_scope(prefix_node->type) &&
+ prefix_node->type != ACPI_TYPE_ANY) {
+ prefix_node =
+ acpi_ns_get_parent_node(prefix_node);
+ }
}
}
@@ -365,6 +367,7 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
* Begin examination of the actual pathname
*/
if (!pathname) {
+
/* A Null name_path is allowed and refers to the root */
num_segments = 0;
@@ -389,6 +392,7 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
* to the current scope).
*/
if (*path == (u8) AML_ROOT_PREFIX) {
+
/* Pathname is fully qualified, start from the root */
this_node = acpi_gbl_root_node;
@@ -416,6 +420,7 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
this_node = prefix_node;
num_carats = 0;
while (*path == (u8) AML_PARENT_PREFIX) {
+
/* Name is fully qualified, no search rules apply */
search_parent_flag = ACPI_NS_NO_UPSEARCH;
@@ -430,6 +435,7 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
num_carats++;
this_node = acpi_ns_get_parent_node(this_node);
if (!this_node) {
+
/* Current scope has no parent scope */
ACPI_ERROR((AE_INFO,
@@ -569,6 +575,7 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
&this_node);
if (ACPI_FAILURE(status)) {
if (status == AE_NOT_FOUND) {
+
/* Name not found in ACPI namespace */
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
@@ -602,10 +609,11 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
(type_to_check_for != ACPI_TYPE_LOCAL_SCOPE) &&
(this_node->type != ACPI_TYPE_ANY) &&
(this_node->type != type_to_check_for)) {
+
/* Complain about a type mismatch */
ACPI_WARNING((AE_INFO,
- "ns_lookup: Type mismatch on %4.4s (%s), searching for (%s)",
+ "NsLookup: Type mismatch on %4.4s (%s), searching for (%s)",
ACPI_CAST_PTR(char, &simple_name),
acpi_ut_get_type_name(this_node->type),
acpi_ut_get_type_name
diff --git a/drivers/acpi/namespace/nsalloc.c b/drivers/acpi/namespace/nsalloc.c
index 9b871f38b61..dc3f0739a46 100644
--- a/drivers/acpi/namespace/nsalloc.c
+++ b/drivers/acpi/namespace/nsalloc.c
@@ -47,9 +47,6 @@
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsalloc")
-/* Local prototypes */
-static void acpi_ns_remove_reference(struct acpi_namespace_node *node);
-
/*******************************************************************************
*
* FUNCTION: acpi_ns_create_node
@@ -61,14 +58,13 @@ static void acpi_ns_remove_reference(struct acpi_namespace_node *node);
* DESCRIPTION: Create a namespace node
*
******************************************************************************/
-
struct acpi_namespace_node *acpi_ns_create_node(u32 name)
{
struct acpi_namespace_node *node;
- ACPI_FUNCTION_TRACE("ns_create_node");
+ ACPI_FUNCTION_TRACE(ns_create_node);
- node = ACPI_MEM_CALLOCATE(sizeof(struct acpi_namespace_node));
+ node = acpi_os_acquire_object(acpi_gbl_namespace_cache);
if (!node) {
return_PTR(NULL);
}
@@ -76,9 +72,7 @@ struct acpi_namespace_node *acpi_ns_create_node(u32 name)
ACPI_MEM_TRACKING(acpi_gbl_ns_node_list->total_allocated++);
node->name.integer = name;
- node->reference_count = 1;
ACPI_SET_DESCRIPTOR_TYPE(node, ACPI_DESC_TYPE_NAMED);
-
return_PTR(node);
}
@@ -100,7 +94,7 @@ void acpi_ns_delete_node(struct acpi_namespace_node *node)
struct acpi_namespace_node *prev_node;
struct acpi_namespace_node *next_node;
- ACPI_FUNCTION_TRACE_PTR("ns_delete_node", node);
+ ACPI_FUNCTION_TRACE_PTR(ns_delete_node, node);
parent_node = acpi_ns_get_parent_node(node);
@@ -115,6 +109,7 @@ void acpi_ns_delete_node(struct acpi_namespace_node *node)
}
if (prev_node) {
+
/* Node is not first child, unlink it */
prev_node->peer = next_node->peer;
@@ -125,6 +120,7 @@ void acpi_ns_delete_node(struct acpi_namespace_node *node)
/* Node is first child (has no previous peer) */
if (next_node->flags & ANOBJ_END_OF_PEER_LIST) {
+
/* No peers at all */
parent_node->child = NULL;
@@ -137,10 +133,10 @@ void acpi_ns_delete_node(struct acpi_namespace_node *node)
ACPI_MEM_TRACKING(acpi_gbl_ns_node_list->total_freed++);
/*
- * Detach an object if there is one then delete the node
+ * Detach an object if there is one, then delete the node
*/
acpi_ns_detach_object(node);
- ACPI_MEM_FREE(node);
+ (void)acpi_os_release_object(acpi_gbl_namespace_cache, node);
return_VOID;
}
@@ -171,7 +167,7 @@ void acpi_ns_install_node(struct acpi_walk_state *walk_state, struct acpi_namesp
acpi_owner_id owner_id = 0;
struct acpi_namespace_node *child_node;
- ACPI_FUNCTION_TRACE("ns_install_node");
+ ACPI_FUNCTION_TRACE(ns_install_node);
/*
* Get the owner ID from the Walk state
@@ -216,14 +212,6 @@ void acpi_ns_install_node(struct acpi_walk_state *walk_state, struct acpi_namesp
acpi_ut_get_type_name(parent_node->type),
parent_node));
- /*
- * Increment the reference count(s) of all parents up to
- * the root!
- */
- while ((node = acpi_ns_get_parent_node(node)) != NULL) {
- node->reference_count++;
- }
-
return_VOID;
}
@@ -244,10 +232,9 @@ void acpi_ns_delete_children(struct acpi_namespace_node *parent_node)
{
struct acpi_namespace_node *child_node;
struct acpi_namespace_node *next_node;
- struct acpi_namespace_node *node;
u8 flags;
- ACPI_FUNCTION_TRACE_PTR("ns_delete_children", parent_node);
+ ACPI_FUNCTION_TRACE_PTR(ns_delete_children, parent_node);
if (!parent_node) {
return_VOID;
@@ -264,6 +251,7 @@ void acpi_ns_delete_children(struct acpi_namespace_node *parent_node)
* Deallocate all children at this level
*/
do {
+
/* Get the things we need */
next_node = child_node->peer;
@@ -289,26 +277,10 @@ void acpi_ns_delete_children(struct acpi_namespace_node *parent_node)
*/
acpi_ns_detach_object(child_node);
- /*
- * Decrement the reference count(s) of all parents up to
- * the root! (counts were incremented when the node was created)
- */
- node = child_node;
- while ((node = acpi_ns_get_parent_node(node)) != NULL) {
- node->reference_count--;
- }
-
- /* There should be only one reference remaining on this node */
-
- if (child_node->reference_count != 1) {
- ACPI_WARNING((AE_INFO,
- "Existing references (%d) on node being deleted (%p)",
- child_node->reference_count, child_node));
- }
-
/* Now we can delete the node */
- ACPI_MEM_FREE(child_node);
+ (void)acpi_os_release_object(acpi_gbl_namespace_cache,
+ child_node);
/* And move on to the next child in the list */
@@ -341,7 +313,7 @@ void acpi_ns_delete_namespace_subtree(struct acpi_namespace_node *parent_node)
struct acpi_namespace_node *child_node = NULL;
u32 level = 1;
- ACPI_FUNCTION_TRACE("ns_delete_namespace_subtree");
+ ACPI_FUNCTION_TRACE(ns_delete_namespace_subtree);
if (!parent_node) {
return_VOID;
@@ -352,11 +324,14 @@ void acpi_ns_delete_namespace_subtree(struct acpi_namespace_node *parent_node)
* to where we started.
*/
while (level > 0) {
+
/* Get the next node in this scope (NULL if none) */
- child_node = acpi_ns_get_next_node(ACPI_TYPE_ANY, parent_node,
- child_node);
+ child_node =
+ acpi_ns_get_next_node(ACPI_TYPE_ANY, parent_node,
+ child_node);
if (child_node) {
+
/* Found a child node - detach any attached object */
acpi_ns_detach_object(child_node);
@@ -401,55 +376,6 @@ void acpi_ns_delete_namespace_subtree(struct acpi_namespace_node *parent_node)
/*******************************************************************************
*
- * FUNCTION: acpi_ns_remove_reference
- *
- * PARAMETERS: Node - Named node whose reference count is to be
- * decremented
- *
- * RETURN: None.
- *
- * DESCRIPTION: Remove a Node reference. Decrements the reference count
- * of all parent Nodes up to the root. Any node along
- * the way that reaches zero references is freed.
- *
- ******************************************************************************/
-
-static void acpi_ns_remove_reference(struct acpi_namespace_node *node)
-{
- struct acpi_namespace_node *parent_node;
- struct acpi_namespace_node *this_node;
-
- ACPI_FUNCTION_ENTRY();
-
- /*
- * Decrement the reference count(s) of this node and all
- * nodes up to the root, Delete anything with zero remaining references.
- */
- this_node = node;
- while (this_node) {
- /* Prepare to move up to parent */
-
- parent_node = acpi_ns_get_parent_node(this_node);
-
- /* Decrement the reference count on this node */
-
- this_node->reference_count--;
-
- /* Delete the node if no more references */
-
- if (!this_node->reference_count) {
- /* Delete all children and delete the node */
-
- acpi_ns_delete_children(this_node);
- acpi_ns_delete_node(this_node);
- }
-
- this_node = parent_node;
- }
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ns_delete_namespace_by_owner
*
* PARAMETERS: owner_id - All nodes with this owner will be deleted
@@ -469,15 +395,15 @@ void acpi_ns_delete_namespace_by_owner(acpi_owner_id owner_id)
u32 level;
struct acpi_namespace_node *parent_node;
- ACPI_FUNCTION_TRACE_U32("ns_delete_namespace_by_owner", owner_id);
+ ACPI_FUNCTION_TRACE_U32(ns_delete_namespace_by_owner, owner_id);
if (owner_id == 0) {
return_VOID;
}
+ deletion_node = NULL;
parent_node = acpi_gbl_root_node;
child_node = NULL;
- deletion_node = NULL;
level = 1;
/*
@@ -494,12 +420,14 @@ void acpi_ns_delete_namespace_by_owner(acpi_owner_id owner_id)
child_node);
if (deletion_node) {
- acpi_ns_remove_reference(deletion_node);
+ acpi_ns_delete_children(deletion_node);
+ acpi_ns_delete_node(deletion_node);
deletion_node = NULL;
}
if (child_node) {
if (child_node->owner_id == owner_id) {
+
/* Found a matching child node - detach any attached object */
acpi_ns_detach_object(child_node);
diff --git a/drivers/acpi/namespace/nsdump.c b/drivers/acpi/namespace/nsdump.c
index a2807317a84..d72df66aa96 100644
--- a/drivers/acpi/namespace/nsdump.c
+++ b/drivers/acpi/namespace/nsdump.c
@@ -75,7 +75,7 @@ void acpi_ns_print_pathname(u32 num_segments, char *pathname)
{
acpi_native_uint i;
- ACPI_FUNCTION_NAME("ns_print_pathname");
+ ACPI_FUNCTION_NAME(ns_print_pathname);
if (!(acpi_dbg_level & ACPI_LV_NAMES)
|| !(acpi_dbg_layer & ACPI_NAMESPACE)) {
@@ -123,7 +123,7 @@ void
acpi_ns_dump_pathname(acpi_handle handle, char *msg, u32 level, u32 component)
{
- ACPI_FUNCTION_TRACE("ns_dump_pathname");
+ ACPI_FUNCTION_TRACE(ns_dump_pathname);
/* Do this only if the requested debug level and component are enabled */
@@ -167,7 +167,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
u32 dbg_level;
u32 i;
- ACPI_FUNCTION_NAME("ns_dump_one_object");
+ ACPI_FUNCTION_NAME(ns_dump_one_object);
/* Is output enabled? */
@@ -191,6 +191,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
}
if (!(info->display_type & ACPI_DISPLAY_SHORT)) {
+
/* Indent the object according to the level */
acpi_os_printf("%2d%*s", (u32) level - 1, (int)level * 2, " ");
@@ -203,6 +204,9 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
}
if (!acpi_ut_valid_acpi_name(this_node->name.integer)) {
+ this_node->name.integer =
+ acpi_ut_repair_name(this_node->name.integer);
+
ACPI_WARNING((AE_INFO, "Invalid ACPI Name %08X",
this_node->name.integer));
}
@@ -226,6 +230,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
case ACPI_DISPLAY_SUMMARY:
if (!obj_desc) {
+
/* No attached object, we are done */
acpi_os_printf("\n");
@@ -419,6 +424,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
acpi_os_printf("O:%p", obj_desc);
if (!obj_desc) {
+
/* No attached object, we are done */
acpi_os_printf("\n");
@@ -669,7 +675,7 @@ void acpi_ns_dump_tables(acpi_handle search_base, u32 max_depth)
{
acpi_handle search_handle = search_base;
- ACPI_FUNCTION_TRACE("ns_dump_tables");
+ ACPI_FUNCTION_TRACE(ns_dump_tables);
if (!acpi_gbl_root_node) {
/*
@@ -682,6 +688,7 @@ void acpi_ns_dump_tables(acpi_handle search_base, u32 max_depth)
}
if (ACPI_NS_ALL == search_base) {
+
/* Entire namespace */
search_handle = acpi_gbl_root_node;
diff --git a/drivers/acpi/namespace/nsdumpdv.c b/drivers/acpi/namespace/nsdumpdv.c
index aff899a935e..c6bf5d30fca 100644
--- a/drivers/acpi/namespace/nsdumpdv.c
+++ b/drivers/acpi/namespace/nsdumpdv.c
@@ -74,7 +74,7 @@ acpi_ns_dump_one_device(acpi_handle obj_handle,
acpi_status status;
u32 i;
- ACPI_FUNCTION_NAME("ns_dump_one_device");
+ ACPI_FUNCTION_NAME(ns_dump_one_device);
status =
acpi_ns_dump_one_object(obj_handle, level, context, return_value);
@@ -92,7 +92,7 @@ acpi_ns_dump_one_device(acpi_handle obj_handle,
info->hardware_id.value,
ACPI_FORMAT_UINT64(info->address),
info->current_status));
- ACPI_MEM_FREE(info);
+ ACPI_FREE(info);
}
return (status);
@@ -115,7 +115,7 @@ void acpi_ns_dump_root_devices(void)
acpi_handle sys_bus_handle;
acpi_status status;
- ACPI_FUNCTION_NAME("ns_dump_root_devices");
+ ACPI_FUNCTION_NAME(ns_dump_root_devices);
/* Only dump the table if tracing is enabled */
diff --git a/drivers/acpi/namespace/nseval.c b/drivers/acpi/namespace/nseval.c
index 19d7b94d40c..4b0a4a8c984 100644
--- a/drivers/acpi/namespace/nseval.c
+++ b/drivers/acpi/namespace/nseval.c
@@ -1,7 +1,6 @@
/*******************************************************************************
*
- * Module Name: nseval - Object evaluation interfaces -- includes control
- * method lookup and execution.
+ * Module Name: nseval - Object evaluation, includes control method execution
*
******************************************************************************/
@@ -50,196 +49,14 @@
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nseval")
-/* Local prototypes */
-static acpi_status
-acpi_ns_execute_control_method(struct acpi_parameter_info *info);
-
-static acpi_status acpi_ns_get_object_value(struct acpi_parameter_info *info);
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ns_evaluate_relative
- *
- * PARAMETERS: Pathname - Name of method to execute, If NULL, the
- * handle is the object to execute
- * Info - Method info block, contains:
- * return_object - Where to put method's return value (if
- * any). If NULL, no value is returned.
- * Params - List of parameters to pass to the method,
- * terminated by NULL. Params itself may be
- * NULL if no parameters are being passed.
- *
- * RETURN: Status
- *
- * DESCRIPTION: Evaluate the object or find and execute the requested method
- *
- * MUTEX: Locks Namespace
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ns_evaluate_relative(char *pathname, struct acpi_parameter_info *info)
-{
- acpi_status status;
- struct acpi_namespace_node *node = NULL;
- union acpi_generic_state *scope_info;
- char *internal_path = NULL;
-
- ACPI_FUNCTION_TRACE("ns_evaluate_relative");
-
- /*
- * Must have a valid object handle
- */
- if (!info || !info->node) {
- return_ACPI_STATUS(AE_BAD_PARAMETER);
- }
-
- /* Build an internal name string for the method */
-
- status = acpi_ns_internalize_name(pathname, &internal_path);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- scope_info = acpi_ut_create_generic_state();
- if (!scope_info) {
- goto cleanup1;
- }
-
- /* Get the prefix handle and Node */
-
- status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
- if (ACPI_FAILURE(status)) {
- goto cleanup;
- }
-
- info->node = acpi_ns_map_handle_to_node(info->node);
- if (!info->node) {
- (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
- status = AE_BAD_PARAMETER;
- goto cleanup;
- }
-
- /* Lookup the name in the namespace */
-
- scope_info->scope.node = info->node;
- status = acpi_ns_lookup(scope_info, internal_path, ACPI_TYPE_ANY,
- ACPI_IMODE_EXECUTE, ACPI_NS_NO_UPSEARCH, NULL,
- &node);
-
- (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-
- if (ACPI_FAILURE(status)) {
- ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "Object [%s] not found [%s]\n",
- pathname, acpi_format_exception(status)));
- goto cleanup;
- }
-
- /*
- * Now that we have a handle to the object, we can attempt to evaluate it.
- */
- ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "%s [%p] Value %p\n",
- pathname, node, acpi_ns_get_attached_object(node)));
-
- info->node = node;
- status = acpi_ns_evaluate_by_handle(info);
-
- ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
- "*** Completed eval of object %s ***\n", pathname));
-
- cleanup:
- acpi_ut_delete_generic_state(scope_info);
-
- cleanup1:
- ACPI_MEM_FREE(internal_path);
- return_ACPI_STATUS(status);
-}
-
/*******************************************************************************
*
- * FUNCTION: acpi_ns_evaluate_by_name
+ * FUNCTION: acpi_ns_evaluate
*
- * PARAMETERS: Pathname - Fully qualified pathname to the object
- * Info - Method info block, contains:
- * return_object - Where to put method's return value (if
- * any). If NULL, no value is returned.
- * Params - List of parameters to pass to the method,
- * terminated by NULL. Params itself may be
- * NULL if no parameters are being passed.
- *
- * RETURN: Status
- *
- * DESCRIPTION: Evaluate the object or rind and execute the requested method
- * passing the given parameters
- *
- * MUTEX: Locks Namespace
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ns_evaluate_by_name(char *pathname, struct acpi_parameter_info *info)
-{
- acpi_status status;
- char *internal_path = NULL;
-
- ACPI_FUNCTION_TRACE("ns_evaluate_by_name");
-
- /* Build an internal name string for the method */
-
- status = acpi_ns_internalize_name(pathname, &internal_path);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
- if (ACPI_FAILURE(status)) {
- goto cleanup;
- }
-
- /* Lookup the name in the namespace */
-
- status = acpi_ns_lookup(NULL, internal_path, ACPI_TYPE_ANY,
- ACPI_IMODE_EXECUTE, ACPI_NS_NO_UPSEARCH, NULL,
- &info->node);
-
- (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-
- if (ACPI_FAILURE(status)) {
- ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
- "Object at [%s] was not found, status=%.4X\n",
- pathname, status));
- goto cleanup;
- }
-
- /*
- * Now that we have a handle to the object, we can attempt to evaluate it.
- */
- ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "%s [%p] Value %p\n",
- pathname, info->node,
- acpi_ns_get_attached_object(info->node)));
-
- status = acpi_ns_evaluate_by_handle(info);
-
- ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
- "*** Completed eval of object %s ***\n", pathname));
-
- cleanup:
-
- /* Cleanup */
-
- if (internal_path) {
- ACPI_MEM_FREE(internal_path);
- }
-
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ns_evaluate_by_handle
- *
- * PARAMETERS: Info - Method info block, contains:
- * Node - Method/Object Node to execute
+ * PARAMETERS: Info - Evaluation info block, contains:
+ * prefix_node - Prefix or Method/Object Node to execute
+ * Pathname - Name of method to execute, If NULL, the
+ * Node is the object to execute
* Parameters - List of parameters to pass to the method,
* terminated by NULL. Params itself may be
* NULL if no parameters are being passed.
@@ -248,29 +65,21 @@ acpi_ns_evaluate_by_name(char *pathname, struct acpi_parameter_info *info)
* parameter_type - Type of Parameter list
* return_object - Where to put method's return value (if
* any). If NULL, no value is returned.
+ * Flags - ACPI_IGNORE_RETURN_VALUE to delete return
*
* RETURN: Status
*
- * DESCRIPTION: Evaluate object or execute the requested method passing the
- * given parameters
+ * DESCRIPTION: Execute a control method or return the current value of an
+ * ACPI namespace object.
*
- * MUTEX: Locks Namespace
+ * MUTEX: Locks interpreter
*
******************************************************************************/
-
-acpi_status acpi_ns_evaluate_by_handle(struct acpi_parameter_info *info)
+acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ns_evaluate_by_handle");
-
- /* Check if namespace has been initialized */
-
- if (!acpi_gbl_root_node) {
- return_ACPI_STATUS(AE_NO_NAMESPACE);
- }
-
- /* Parameter Validation */
+ ACPI_FUNCTION_TRACE(ns_evaluate);
if (!info) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -280,202 +89,120 @@ acpi_status acpi_ns_evaluate_by_handle(struct acpi_parameter_info *info)
info->return_object = NULL;
- /* Get the prefix handle and Node */
-
- status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ /*
+ * Get the actual namespace node for the target object. Handles these cases:
+ *
+ * 1) Null node, Pathname (absolute path)
+ * 2) Node, Pathname (path relative to Node)
+ * 3) Node, Null Pathname
+ */
+ status = acpi_ns_get_node(info->prefix_node, info->pathname,
+ ACPI_NS_NO_UPSEARCH, &info->resolved_node);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
- info->node = acpi_ns_map_handle_to_node(info->node);
- if (!info->node) {
- (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
- return_ACPI_STATUS(AE_BAD_PARAMETER);
- }
-
/*
* For a method alias, we must grab the actual method node so that proper
* scoping context will be established before execution.
*/
- if (acpi_ns_get_type(info->node) == ACPI_TYPE_LOCAL_METHOD_ALIAS) {
- info->node =
+ if (acpi_ns_get_type(info->resolved_node) ==
+ ACPI_TYPE_LOCAL_METHOD_ALIAS) {
+ info->resolved_node =
ACPI_CAST_PTR(struct acpi_namespace_node,
- info->node->object);
+ info->resolved_node->object);
}
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES, "%s [%p] Value %p\n", info->pathname,
+ info->resolved_node,
+ acpi_ns_get_attached_object(info->resolved_node)));
+
/*
* Two major cases here:
- * 1) The object is an actual control method -- execute it.
- * 2) The object is not a method -- just return it's current value
*
- * In both cases, the namespace is unlocked by the acpi_ns* procedure
+ * 1) The object is a control method -- execute it
+ * 2) The object is not a method -- just return it's current value
*/
- if (acpi_ns_get_type(info->node) == ACPI_TYPE_METHOD) {
- /*
- * Case 1) We have an actual control method to execute
- */
- status = acpi_ns_execute_control_method(info);
- } else {
+ if (acpi_ns_get_type(info->resolved_node) == ACPI_TYPE_METHOD) {
/*
- * Case 2) Object is NOT a method, just return its current value
+ * 1) Object is a control method - execute it
*/
- status = acpi_ns_get_object_value(info);
- }
-
- /*
- * Check if there is a return value on the stack that must be dealt with
- */
- if (status == AE_CTRL_RETURN_VALUE) {
- /* Map AE_CTRL_RETURN_VALUE to AE_OK, we are done with it */
-
- status = AE_OK;
- }
-
- /*
- * Namespace was unlocked by the handling acpi_ns* function, so we
- * just return
- */
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ns_execute_control_method
- *
- * PARAMETERS: Info - Method info block, contains:
- * Node - Method Node to execute
- * obj_desc - Method object
- * Parameters - List of parameters to pass to the method,
- * terminated by NULL. Params itself may be
- * NULL if no parameters are being passed.
- * return_object - Where to put method's return value (if
- * any). If NULL, no value is returned.
- * parameter_type - Type of Parameter list
- * return_object - Where to put method's return value (if
- * any). If NULL, no value is returned.
- *
- * RETURN: Status
- *
- * DESCRIPTION: Execute the requested method passing the given parameters
- *
- * MUTEX: Assumes namespace is locked
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ns_execute_control_method(struct acpi_parameter_info *info)
-{
- acpi_status status;
-
- ACPI_FUNCTION_TRACE("ns_execute_control_method");
-
- /* Verify that there is a method associated with this object */
-
- info->obj_desc = acpi_ns_get_attached_object(info->node);
- if (!info->obj_desc) {
- ACPI_ERROR((AE_INFO, "No attached method object"));
- (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
- return_ACPI_STATUS(AE_NULL_OBJECT);
- }
-
- ACPI_DUMP_PATHNAME(info->node, "Execute Method:",
- ACPI_LV_INFO, _COMPONENT);
-
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Method at AML address %p Length %X\n",
- info->obj_desc->method.aml_start + 1,
- info->obj_desc->method.aml_length - 1));
-
- /*
- * Unlock the namespace before execution. This allows namespace access
- * via the external Acpi* interfaces while a method is being executed.
- * However, any namespace deletion must acquire both the namespace and
- * interpreter locks to ensure that no thread is using the portion of the
- * namespace that is being deleted.
- */
- status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
+ /* Verify that there is a method object associated with this node */
- /*
- * Execute the method via the interpreter. The interpreter is locked
- * here before calling into the AML parser
- */
- status = acpi_ex_enter_interpreter();
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
+ info->obj_desc =
+ acpi_ns_get_attached_object(info->resolved_node);
+ if (!info->obj_desc) {
+ ACPI_ERROR((AE_INFO,
+ "Control method has no attached sub-object"));
+ return_ACPI_STATUS(AE_NULL_OBJECT);
+ }
- status = acpi_ps_execute_method(info);
- acpi_ex_exit_interpreter();
+ ACPI_DUMP_PATHNAME(info->resolved_node, "Execute Method:",
+ ACPI_LV_INFO, _COMPONENT);
- return_ACPI_STATUS(status);
-}
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Method at AML address %p Length %X\n",
+ info->obj_desc->method.aml_start + 1,
+ info->obj_desc->method.aml_length - 1));
-/*******************************************************************************
- *
- * FUNCTION: acpi_ns_get_object_value
- *
- * PARAMETERS: Info - Method info block, contains:
- * Node - Object's NS node
- * return_object - Where to put object value (if
- * any). If NULL, no value is returned.
- *
- * RETURN: Status
- *
- * DESCRIPTION: Return the current value of the object
- *
- * MUTEX: Assumes namespace is locked, leaves namespace unlocked
- *
- ******************************************************************************/
+ /*
+ * Any namespace deletion must acquire both the namespace and
+ * interpreter locks to ensure that no thread is using the portion of
+ * the namespace that is being deleted.
+ *
+ * Execute the method via the interpreter. The interpreter is locked
+ * here before calling into the AML parser
+ */
+ status = acpi_ex_enter_interpreter();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
-static acpi_status acpi_ns_get_object_value(struct acpi_parameter_info *info)
-{
- acpi_status status = AE_OK;
- struct acpi_namespace_node *resolved_node = info->node;
+ status = acpi_ps_execute_method(info);
+ acpi_ex_exit_interpreter();
+ } else {
+ /*
+ * 2) Object is not a method, return its current value
+ */
- ACPI_FUNCTION_TRACE("ns_get_object_value");
+ /*
+ * Objects require additional resolution steps (e.g., the Node may be
+ * a field that must be read, etc.) -- we can't just grab the object
+ * out of the node.
+ *
+ * Use resolve_node_to_value() to get the associated value.
+ *
+ * NOTE: we can get away with passing in NULL for a walk state because
+ * resolved_node is guaranteed to not be a reference to either a method
+ * local or a method argument (because this interface is never called
+ * from a running method.)
+ *
+ * Even though we do not directly invoke the interpreter for object
+ * resolution, we must lock it because we could access an opregion.
+ * The opregion access code assumes that the interpreter is locked.
+ */
+ status = acpi_ex_enter_interpreter();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
- /*
- * Objects require additional resolution steps (e.g., the Node may be a
- * field that must be read, etc.) -- we can't just grab the object out of
- * the node.
- */
+ /* Function has a strange interface */
- /*
- * Use resolve_node_to_value() to get the associated value. This call always
- * deletes obj_desc (allocated above).
- *
- * NOTE: we can get away with passing in NULL for a walk state because
- * obj_desc is guaranteed to not be a reference to either a method local or
- * a method argument (because this interface can only be called from the
- * acpi_evaluate external interface, never called from a running method.)
- *
- * Even though we do not directly invoke the interpreter for this, we must
- * enter it because we could access an opregion. The opregion access code
- * assumes that the interpreter is locked.
- *
- * We must release the namespace lock before entering the intepreter.
- */
- status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
+ status =
+ acpi_ex_resolve_node_to_value(&info->resolved_node, NULL);
+ acpi_ex_exit_interpreter();
- status = acpi_ex_enter_interpreter();
- if (ACPI_SUCCESS(status)) {
- status = acpi_ex_resolve_node_to_value(&resolved_node, NULL);
/*
* If acpi_ex_resolve_node_to_value() succeeded, the return value was placed
* in resolved_node.
*/
- acpi_ex_exit_interpreter();
-
if (ACPI_SUCCESS(status)) {
status = AE_CTRL_RETURN_VALUE;
- info->return_object = ACPI_CAST_PTR
- (union acpi_operand_object, resolved_node);
+ info->return_object =
+ ACPI_CAST_PTR(union acpi_operand_object,
+ info->resolved_node);
+
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
"Returning object %p [%s]\n",
info->return_object,
@@ -484,7 +211,30 @@ static acpi_status acpi_ns_get_object_value(struct acpi_parameter_info *info)
}
}
- /* Namespace is unlocked */
+ /*
+ * Check if there is a return value that must be dealt with
+ */
+ if (status == AE_CTRL_RETURN_VALUE) {
+
+ /* If caller does not want the return value, delete it */
+ if (info->flags & ACPI_IGNORE_RETURN_VALUE) {
+ acpi_ut_remove_reference(info->return_object);
+ info->return_object = NULL;
+ }
+
+ /* Map AE_CTRL_RETURN_VALUE to AE_OK, we are done with it */
+
+ status = AE_OK;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+ "*** Completed evaluation of object %s ***\n",
+ info->pathname));
+
+ /*
+ * Namespace was unlocked by the handling acpi_ns* function, so we
+ * just return
+ */
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/namespace/nsinit.c b/drivers/acpi/namespace/nsinit.c
index 9f929e479fd..aec8488c001 100644
--- a/drivers/acpi/namespace/nsinit.c
+++ b/drivers/acpi/namespace/nsinit.c
@@ -58,6 +58,10 @@ static acpi_status
acpi_ns_init_one_device(acpi_handle obj_handle,
u32 nesting_level, void *context, void **return_value);
+static acpi_status
+acpi_ns_find_ini_methods(acpi_handle obj_handle,
+ u32 nesting_level, void *context, void **return_value);
+
/*******************************************************************************
*
* FUNCTION: acpi_ns_initialize_objects
@@ -76,7 +80,7 @@ acpi_status acpi_ns_initialize_objects(void)
acpi_status status;
struct acpi_init_walk_info info;
- ACPI_FUNCTION_TRACE("ns_initialize_objects");
+ ACPI_FUNCTION_TRACE(ns_initialize_objects);
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"**** Starting initialization of namespace objects ****\n"));
@@ -93,7 +97,7 @@ acpi_status acpi_ns_initialize_objects(void)
ACPI_UINT32_MAX, acpi_ns_init_one_object,
&info, NULL);
if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "During walk_namespace"));
+ ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace"));
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
@@ -133,7 +137,7 @@ acpi_status acpi_ns_initialize_devices(void)
acpi_status status;
struct acpi_device_walk_info info;
- ACPI_FUNCTION_TRACE("ns_initialize_devices");
+ ACPI_FUNCTION_TRACE(ns_initialize_devices);
/* Init counters */
@@ -142,30 +146,46 @@ acpi_status acpi_ns_initialize_devices(void)
info.num_INI = 0;
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
- "Executing all Device _STA and_INI methods:"));
+ "Initializing Device/Processor/Thermal objects by executing _INI methods:"));
+
+ /* Tree analysis: find all subtrees that contain _INI methods */
- status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, FALSE,
+ acpi_ns_find_ini_methods, &info, NULL);
if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
+ goto error_exit;
}
- /* Walk namespace for all objects */
+ /* Allocate the evaluation information block */
+
+ info.evaluate_info =
+ ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+ if (!info.evaluate_info) {
+ status = AE_NO_MEMORY;
+ goto error_exit;
+ }
+
+ /* Walk namespace to execute all _INIs on present devices */
status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, TRUE,
+ ACPI_UINT32_MAX, FALSE,
acpi_ns_init_one_device, &info, NULL);
- (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-
+ ACPI_FREE(info.evaluate_info);
if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "During walk_namespace"));
+ goto error_exit;
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
- "\n%hd Devices found - executed %hd _STA, %hd _INI methods\n",
- info.device_count, info.num_STA, info.num_INI));
+ "\nExecuted %hd _INI methods requiring %hd _STA executions (examined %hd objects)\n",
+ info.num_INI, info.num_STA, info.device_count));
return_ACPI_STATUS(status);
+
+ error_exit:
+ ACPI_EXCEPTION((AE_INFO, status, "During device initialization"));
+ return_ACPI_STATUS(status);
}
/*******************************************************************************
@@ -200,7 +220,7 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
(struct acpi_namespace_node *)obj_handle;
union acpi_operand_object *obj_desc;
- ACPI_FUNCTION_NAME("ns_init_one_object");
+ ACPI_FUNCTION_NAME(ns_init_one_object);
info->object_count++;
@@ -311,6 +331,72 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
/*******************************************************************************
*
+ * FUNCTION: acpi_ns_find_ini_methods
+ *
+ * PARAMETERS: acpi_walk_callback
+ *
+ * RETURN: acpi_status
+ *
+ * DESCRIPTION: Called during namespace walk. Finds objects named _INI under
+ * device/processor/thermal objects, and marks the entire subtree
+ * with a SUBTREE_HAS_INI flag. This flag is used during the
+ * subsequent device initialization walk to avoid entire subtrees
+ * that do not contain an _INI.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_find_ini_methods(acpi_handle obj_handle,
+ u32 nesting_level, void *context, void **return_value)
+{
+ struct acpi_device_walk_info *info =
+ ACPI_CAST_PTR(struct acpi_device_walk_info, context);
+ struct acpi_namespace_node *node;
+ struct acpi_namespace_node *parent_node;
+
+ /* Keep count of device/processor/thermal objects */
+
+ node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_handle);
+ if ((node->type == ACPI_TYPE_DEVICE) ||
+ (node->type == ACPI_TYPE_PROCESSOR) ||
+ (node->type == ACPI_TYPE_THERMAL)) {
+ info->device_count++;
+ return (AE_OK);
+ }
+
+ /* We are only looking for methods named _INI */
+
+ if (!ACPI_COMPARE_NAME(node->name.ascii, METHOD_NAME__INI)) {
+ return (AE_OK);
+ }
+
+ /*
+ * The only _INI methods that we care about are those that are
+ * present under Device, Processor, and Thermal objects.
+ */
+ parent_node = acpi_ns_get_parent_node(node);
+ switch (parent_node->type) {
+ case ACPI_TYPE_DEVICE:
+ case ACPI_TYPE_PROCESSOR:
+ case ACPI_TYPE_THERMAL:
+
+ /* Mark parent and bubble up the INI present flag to the root */
+
+ while (parent_node) {
+ parent_node->flags |= ANOBJ_SUBTREE_HAS_INI;
+ parent_node = acpi_ns_get_parent_node(parent_node);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_ns_init_one_device
*
* PARAMETERS: acpi_walk_callback
@@ -327,119 +413,165 @@ static acpi_status
acpi_ns_init_one_device(acpi_handle obj_handle,
u32 nesting_level, void *context, void **return_value)
{
- struct acpi_device_walk_info *info =
- (struct acpi_device_walk_info *)context;
- struct acpi_parameter_info pinfo;
+ struct acpi_device_walk_info *walk_info =
+ ACPI_CAST_PTR(struct acpi_device_walk_info, context);
+ struct acpi_evaluate_info *info = walk_info->evaluate_info;
u32 flags;
acpi_status status;
- struct acpi_namespace_node *ini_node;
struct acpi_namespace_node *device_node;
- ACPI_FUNCTION_TRACE("ns_init_one_device");
+ ACPI_FUNCTION_TRACE(ns_init_one_device);
- device_node = acpi_ns_map_handle_to_node(obj_handle);
- if (!device_node) {
- return_ACPI_STATUS(AE_BAD_PARAMETER);
- }
+ /* We are interested in Devices, Processors and thermal_zones only */
- /*
- * We will run _STA/_INI on Devices, Processors and thermal_zones only
- */
+ device_node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_handle);
if ((device_node->type != ACPI_TYPE_DEVICE) &&
(device_node->type != ACPI_TYPE_PROCESSOR) &&
(device_node->type != ACPI_TYPE_THERMAL)) {
return_ACPI_STATUS(AE_OK);
}
- if ((acpi_dbg_level <= ACPI_LV_ALL_EXCEPTIONS) &&
- (!(acpi_dbg_level & ACPI_LV_INFO))) {
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "."));
- }
-
- info->device_count++;
-
/*
- * Check if the _INI method exists for this device -
- * if _INI does not exist, there is no need to run _STA
- * No _INI means device requires no initialization
+ * Because of an earlier namespace analysis, all subtrees that contain an
+ * _INI method are tagged.
+ *
+ * If this device subtree does not contain any _INI methods, we
+ * can exit now and stop traversing this entire subtree.
*/
- status = acpi_ns_search_node(*ACPI_CAST_PTR(u32, METHOD_NAME__INI),
- device_node, ACPI_TYPE_METHOD, &ini_node);
- if (ACPI_FAILURE(status)) {
- /* No _INI method found - move on to next device */
-
- return_ACPI_STATUS(AE_OK);
+ if (!(device_node->flags & ANOBJ_SUBTREE_HAS_INI)) {
+ return_ACPI_STATUS(AE_CTRL_DEPTH);
}
/*
- * Run _STA to determine if we can run _INI on the device -
- * the device must be present before _INI can be run.
- * However, _STA is not required - assume device present if no _STA
+ * Run _STA to determine if this device is present and functioning. We
+ * must know this information for two important reasons (from ACPI spec):
+ *
+ * 1) We can only run _INI if the device is present.
+ * 2) We must abort the device tree walk on this subtree if the device is
+ * not present and is not functional (we will not examine the children)
+ *
+ * The _STA method is not required to be present under the device, we
+ * assume the device is present if _STA does not exist.
*/
- ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname(ACPI_TYPE_METHOD,
- device_node,
- METHOD_NAME__STA));
-
- pinfo.node = device_node;
- pinfo.parameters = NULL;
- pinfo.parameter_type = ACPI_PARAM_ARGS;
+ ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
+ (ACPI_TYPE_METHOD, device_node, METHOD_NAME__STA));
- status = acpi_ut_execute_STA(pinfo.node, &flags);
+ status = acpi_ut_execute_STA(device_node, &flags);
if (ACPI_FAILURE(status)) {
+
/* Ignore error and move on to next device */
return_ACPI_STATUS(AE_OK);
}
+ /*
+ * Flags == -1 means that _STA was not found. In this case, we assume that
+ * the device is both present and functional.
+ *
+ * From the ACPI spec, description of _STA:
+ *
+ * "If a device object (including the processor object) does not have an
+ * _STA object, then OSPM assumes that all of the above bits are set (in
+ * other words, the device is present, ..., and functioning)"
+ */
if (flags != ACPI_UINT32_MAX) {
- info->num_STA++;
+ walk_info->num_STA++;
}
+ /*
+ * Examine the PRESENT and FUNCTIONING status bits
+ *
+ * Note: ACPI spec does not seem to specify behavior for the present but
+ * not functioning case, so we assume functioning if present.
+ */
if (!(flags & ACPI_STA_DEVICE_PRESENT)) {
- /* Don't look at children of a not present device */
- return_ACPI_STATUS(AE_CTRL_DEPTH);
+ /* Device is not present, we must examine the Functioning bit */
+
+ if (flags & ACPI_STA_DEVICE_FUNCTIONING) {
+ /*
+ * Device is not present but is "functioning". In this case,
+ * we will not run _INI, but we continue to examine the children
+ * of this device.
+ *
+ * From the ACPI spec, description of _STA: (Note - no mention
+ * of whether to run _INI or not on the device in question)
+ *
+ * "_STA may return bit 0 clear (not present) with bit 3 set
+ * (device is functional). This case is used to indicate a valid
+ * device for which no device driver should be loaded (for example,
+ * a bridge device.) Children of this device may be present and
+ * valid. OSPM should continue enumeration below a device whose
+ * _STA returns this bit combination"
+ */
+ return_ACPI_STATUS(AE_OK);
+ } else {
+ /*
+ * Device is not present and is not functioning. We must abort the
+ * walk of this subtree immediately -- don't look at the children
+ * of such a device.
+ *
+ * From the ACPI spec, description of _INI:
+ *
+ * "If the _STA method indicates that the device is not present,
+ * OSPM will not run the _INI and will not examine the children
+ * of the device for _INI methods"
+ */
+ return_ACPI_STATUS(AE_CTRL_DEPTH);
+ }
}
/*
- * The device is present and _INI exists. Run the _INI method.
- * (We already have the _INI node from above)
+ * The device is present or is assumed present if no _STA exists.
+ * Run the _INI if it exists (not required to exist)
+ *
+ * Note: We know there is an _INI within this subtree, but it may not be
+ * under this particular device, it may be lower in the branch.
*/
- ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname(ACPI_TYPE_METHOD,
- pinfo.node,
- METHOD_NAME__INI));
+ ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
+ (ACPI_TYPE_METHOD, device_node, METHOD_NAME__INI));
+
+ info->prefix_node = device_node;
+ info->pathname = METHOD_NAME__INI;
+ info->parameters = NULL;
+ info->parameter_type = ACPI_PARAM_ARGS;
+ info->flags = ACPI_IGNORE_RETURN_VALUE;
+
+ status = acpi_ns_evaluate(info);
+ if (ACPI_SUCCESS(status)) {
+ walk_info->num_INI++;
+
+ if ((acpi_dbg_level <= ACPI_LV_ALL_EXCEPTIONS) &&
+ (!(acpi_dbg_level & ACPI_LV_INFO))) {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "."));
+ }
+ }
+#ifdef ACPI_DEBUG_OUTPUT
+ else if (status != AE_NOT_FOUND) {
- pinfo.node = ini_node;
- status = acpi_ns_evaluate_by_handle(&pinfo);
- if (ACPI_FAILURE(status)) {
/* Ignore error and move on to next device */
-#ifdef ACPI_DEBUG_OUTPUT
- char *scope_name = acpi_ns_get_external_pathname(ini_node);
-
- ACPI_WARNING((AE_INFO, "%s._INI failed: %s",
- scope_name, acpi_format_exception(status)));
+ char *scope_name =
+ acpi_ns_get_external_pathname(info->resolved_node);
- ACPI_MEM_FREE(scope_name);
+ ACPI_EXCEPTION((AE_INFO, status, "during %s._INI execution",
+ scope_name));
+ ACPI_FREE(scope_name);
+ }
#endif
- } else {
- /* Delete any return object (especially if implicit_return is enabled) */
- if (pinfo.return_object) {
- acpi_ut_remove_reference(pinfo.return_object);
- }
+ /* Ignore errors from above */
- /* Count of successful INIs */
-
- info->num_INI++;
- }
+ status = AE_OK;
+ /*
+ * The _INI method has been run if present; call the Global Initialization
+ * Handler for this device.
+ */
if (acpi_gbl_init_handler) {
- /* External initialization handler is present, call it */
-
status =
- acpi_gbl_init_handler(pinfo.node, ACPI_INIT_DEVICE_INI);
+ acpi_gbl_init_handler(device_node, ACPI_INIT_DEVICE_INI);
}
- return_ACPI_STATUS(AE_OK);
+ return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/namespace/nsload.c b/drivers/acpi/namespace/nsload.c
index 4e0b0524c18..fe75d888e18 100644
--- a/drivers/acpi/namespace/nsload.c
+++ b/drivers/acpi/namespace/nsload.c
@@ -77,13 +77,14 @@ acpi_ns_load_table(struct acpi_table_desc *table_desc,
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ns_load_table");
+ ACPI_FUNCTION_TRACE(ns_load_table);
/* Check if table contains valid AML (must be DSDT, PSDT, SSDT, etc.) */
if (!
(acpi_gbl_table_data[table_desc->type].
flags & ACPI_TABLE_EXECUTABLE)) {
+
/* Just ignore this table */
return_ACPI_STATUS(AE_OK);
@@ -168,7 +169,7 @@ static acpi_status acpi_ns_load_table_by_type(acpi_table_type table_type)
acpi_status status;
struct acpi_table_desc *table_desc;
- ACPI_FUNCTION_TRACE("ns_load_table_by_type");
+ ACPI_FUNCTION_TRACE(ns_load_table_by_type);
status = acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
if (ACPI_FAILURE(status)) {
@@ -180,11 +181,11 @@ static acpi_status acpi_ns_load_table_by_type(acpi_table_type table_type)
* DSDT (one), SSDT/PSDT (multiple)
*/
switch (table_type) {
- case ACPI_TABLE_DSDT:
+ case ACPI_TABLE_ID_DSDT:
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Namespace load: DSDT\n"));
- table_desc = acpi_gbl_table_lists[ACPI_TABLE_DSDT].next;
+ table_desc = acpi_gbl_table_lists[ACPI_TABLE_ID_DSDT].next;
/* If table already loaded into namespace, just return */
@@ -200,8 +201,8 @@ static acpi_status acpi_ns_load_table_by_type(acpi_table_type table_type)
}
break;
- case ACPI_TABLE_SSDT:
- case ACPI_TABLE_PSDT:
+ case ACPI_TABLE_ID_SSDT:
+ case ACPI_TABLE_ID_PSDT:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Namespace load: %d SSDT or PSDTs\n",
@@ -258,7 +259,7 @@ acpi_status acpi_ns_load_namespace(void)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("acpi_load_name_space");
+ ACPI_FUNCTION_TRACE(acpi_load_name_space);
/* There must be at least a DSDT installed */
@@ -271,15 +272,15 @@ acpi_status acpi_ns_load_namespace(void)
* Load the namespace. The DSDT is required,
* but the SSDT and PSDT tables are optional.
*/
- status = acpi_ns_load_table_by_type(ACPI_TABLE_DSDT);
+ status = acpi_ns_load_table_by_type(ACPI_TABLE_ID_DSDT);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Ignore exceptions from these */
- (void)acpi_ns_load_table_by_type(ACPI_TABLE_SSDT);
- (void)acpi_ns_load_table_by_type(ACPI_TABLE_PSDT);
+ (void)acpi_ns_load_table_by_type(ACPI_TABLE_ID_SSDT);
+ (void)acpi_ns_load_table_by_type(ACPI_TABLE_ID_PSDT);
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
"ACPI Namespace successfully loaded at root %p\n",
@@ -314,7 +315,7 @@ static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle)
acpi_handle dummy;
u32 level;
- ACPI_FUNCTION_TRACE("ns_delete_subtree");
+ ACPI_FUNCTION_TRACE(ns_delete_subtree);
parent_handle = start_handle;
child_handle = NULL;
@@ -325,6 +326,7 @@ static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle)
* to where we started.
*/
while (level > 0) {
+
/* Attempt to get the next object in this scope */
status = acpi_get_next_object(ACPI_TYPE_ANY, parent_handle,
@@ -335,6 +337,7 @@ static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle)
/* Did we get a new object? */
if (ACPI_SUCCESS(status)) {
+
/* Check if this object has any children */
if (ACPI_SUCCESS
@@ -392,7 +395,7 @@ acpi_status acpi_ns_unload_namespace(acpi_handle handle)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ns_unload_name_space");
+ ACPI_FUNCTION_TRACE(ns_unload_name_space);
/* Parameter validation */
diff --git a/drivers/acpi/namespace/nsnames.c b/drivers/acpi/namespace/nsnames.c
index 639f653b4b6..97b8332c974 100644
--- a/drivers/acpi/namespace/nsnames.c
+++ b/drivers/acpi/namespace/nsnames.c
@@ -48,11 +48,6 @@
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsnames")
-/* Local prototypes */
-static void
-acpi_ns_build_external_path(struct acpi_namespace_node *node,
- acpi_size size, char *name_buffer);
-
/*******************************************************************************
*
* FUNCTION: acpi_ns_build_external_path
@@ -67,8 +62,7 @@ acpi_ns_build_external_path(struct acpi_namespace_node *node,
* DESCRIPTION: Generate a full pathaname
*
******************************************************************************/
-
-static void
+void
acpi_ns_build_external_path(struct acpi_namespace_node *node,
acpi_size size, char *name_buffer)
{
@@ -138,7 +132,7 @@ char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
char *name_buffer;
acpi_size size;
- ACPI_FUNCTION_TRACE_PTR("ns_get_external_pathname", node);
+ ACPI_FUNCTION_TRACE_PTR(ns_get_external_pathname, node);
/* Calculate required buffer size based on depth below root */
@@ -146,7 +140,7 @@ char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
/* Allocate a buffer to be returned to caller */
- name_buffer = ACPI_MEM_CALLOCATE(size);
+ name_buffer = ACPI_ALLOCATE_ZEROED(size);
if (!name_buffer) {
ACPI_ERROR((AE_INFO, "Allocation failure"));
return_PTR(NULL);
@@ -219,7 +213,7 @@ acpi_ns_handle_to_pathname(acpi_handle target_handle,
struct acpi_namespace_node *node;
acpi_size required_size;
- ACPI_FUNCTION_TRACE_PTR("ns_handle_to_pathname", target_handle);
+ ACPI_FUNCTION_TRACE_PTR(ns_handle_to_pathname, target_handle);
node = acpi_ns_map_handle_to_node(target_handle);
if (!node) {
diff --git a/drivers/acpi/namespace/nsobject.c b/drivers/acpi/namespace/nsobject.c
index 10ae6292bca..aabe8794b90 100644
--- a/drivers/acpi/namespace/nsobject.c
+++ b/drivers/acpi/namespace/nsobject.c
@@ -76,19 +76,21 @@ acpi_ns_attach_object(struct acpi_namespace_node *node,
union acpi_operand_object *last_obj_desc;
acpi_object_type object_type = ACPI_TYPE_ANY;
- ACPI_FUNCTION_TRACE("ns_attach_object");
+ ACPI_FUNCTION_TRACE(ns_attach_object);
/*
* Parameter validation
*/
if (!node) {
+
/* Invalid handle */
- ACPI_ERROR((AE_INFO, "Null named_obj handle"));
+ ACPI_ERROR((AE_INFO, "Null NamedObj handle"));
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
if (!object && (ACPI_TYPE_ANY != type)) {
+
/* Null object */
ACPI_ERROR((AE_INFO,
@@ -97,6 +99,7 @@ acpi_ns_attach_object(struct acpi_namespace_node *node,
}
if (ACPI_GET_DESCRIPTOR_TYPE(node) != ACPI_DESC_TYPE_NAMED) {
+
/* Not a name handle */
ACPI_ERROR((AE_INFO, "Invalid handle %p [%s]",
@@ -108,7 +111,7 @@ acpi_ns_attach_object(struct acpi_namespace_node *node,
if (node->object == object) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Obj %p already installed in name_obj %p\n",
+ "Obj %p already installed in NameObj %p\n",
object, node));
return_ACPI_STATUS(AE_OK);
@@ -201,7 +204,7 @@ void acpi_ns_detach_object(struct acpi_namespace_node *node)
{
union acpi_operand_object *obj_desc;
- ACPI_FUNCTION_TRACE("ns_detach_object");
+ ACPI_FUNCTION_TRACE(ns_detach_object);
obj_desc = node->object;
@@ -252,7 +255,7 @@ union acpi_operand_object *acpi_ns_get_attached_object(struct
acpi_namespace_node
*node)
{
- ACPI_FUNCTION_TRACE_PTR("ns_get_attached_object", node);
+ ACPI_FUNCTION_TRACE_PTR(ns_get_attached_object, node);
if (!node) {
ACPI_WARNING((AE_INFO, "Null Node ptr"));
@@ -287,7 +290,7 @@ union acpi_operand_object *acpi_ns_get_secondary_object(union
acpi_operand_object
*obj_desc)
{
- ACPI_FUNCTION_TRACE_PTR("ns_get_secondary_object", obj_desc);
+ ACPI_FUNCTION_TRACE_PTR(ns_get_secondary_object, obj_desc);
if ((!obj_desc) ||
(ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_LOCAL_DATA) ||
diff --git a/drivers/acpi/namespace/nsparse.c b/drivers/acpi/namespace/nsparse.c
index 232be430365..155505a4ef6 100644
--- a/drivers/acpi/namespace/nsparse.c
+++ b/drivers/acpi/namespace/nsparse.c
@@ -62,13 +62,13 @@ ACPI_MODULE_NAME("nsparse")
*
******************************************************************************/
acpi_status
-acpi_ns_one_complete_parse(u8 pass_number, struct acpi_table_desc * table_desc)
+acpi_ns_one_complete_parse(u8 pass_number, struct acpi_table_desc *table_desc)
{
union acpi_parse_object *parse_root;
acpi_status status;
struct acpi_walk_state *walk_state;
- ACPI_FUNCTION_TRACE("ns_one_complete_parse");
+ ACPI_FUNCTION_TRACE(ns_one_complete_parse);
/* Create and init a Root Node */
@@ -124,7 +124,7 @@ acpi_ns_parse_table(struct acpi_table_desc *table_desc,
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ns_parse_table");
+ ACPI_FUNCTION_TRACE(ns_parse_table);
/*
* AML Parse, pass 1
diff --git a/drivers/acpi/namespace/nssearch.c b/drivers/acpi/namespace/nssearch.c
index d64b78952f2..500e2bbcfaf 100644
--- a/drivers/acpi/namespace/nssearch.c
+++ b/drivers/acpi/namespace/nssearch.c
@@ -56,16 +56,16 @@ acpi_ns_search_parent_tree(u32 target_name,
/*******************************************************************************
*
- * FUNCTION: acpi_ns_search_node
+ * FUNCTION: acpi_ns_search_one_scope
*
* PARAMETERS: target_name - Ascii ACPI name to search for
- * Node - Starting node where search will begin
+ * parent_node - Starting node where search will begin
* Type - Object type to match
* return_node - Where the matched Named obj is returned
*
* RETURN: Status
*
- * DESCRIPTION: Search a single level of the namespace. Performs a
+ * DESCRIPTION: Search a single level of the namespace. Performs a
* simple search of the specified level, and does not add
* entries or search parents.
*
@@ -75,35 +75,40 @@ acpi_ns_search_parent_tree(u32 target_name,
*
* All namespace searching is linear in this implementation, but
* could be easily modified to support any improved search
- * algorithm. However, the linear search was chosen for simplicity
+ * algorithm. However, the linear search was chosen for simplicity
* and because the trees are small and the other interpreter
* execution overhead is relatively high.
*
+ * Note: CPU execution analysis has shown that the AML interpreter spends
+ * a very small percentage of its time searching the namespace. Therefore,
+ * the linear search seems to be sufficient, as there would seem to be
+ * little value in improving the search.
+ *
******************************************************************************/
acpi_status
-acpi_ns_search_node(u32 target_name,
- struct acpi_namespace_node *node,
- acpi_object_type type,
- struct acpi_namespace_node **return_node)
+acpi_ns_search_one_scope(u32 target_name,
+ struct acpi_namespace_node *parent_node,
+ acpi_object_type type,
+ struct acpi_namespace_node **return_node)
{
- struct acpi_namespace_node *next_node;
+ struct acpi_namespace_node *node;
- ACPI_FUNCTION_TRACE("ns_search_node");
+ ACPI_FUNCTION_TRACE(ns_search_one_scope);
#ifdef ACPI_DEBUG_OUTPUT
if (ACPI_LV_NAMES & acpi_dbg_level) {
char *scope_name;
- scope_name = acpi_ns_get_external_pathname(node);
+ scope_name = acpi_ns_get_external_pathname(parent_node);
if (scope_name) {
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
"Searching %s (%p) For [%4.4s] (%s)\n",
- scope_name, node, ACPI_CAST_PTR(char,
- &target_name),
+ scope_name, parent_node,
+ ACPI_CAST_PTR(char, &target_name),
acpi_ut_get_type_name(type)));
- ACPI_MEM_FREE(scope_name);
+ ACPI_FREE(scope_name);
}
}
#endif
@@ -112,32 +117,33 @@ acpi_ns_search_node(u32 target_name,
* Search for name at this namespace level, which is to say that we
* must search for the name among the children of this object
*/
- next_node = node->child;
- while (next_node) {
+ node = parent_node->child;
+ while (node) {
+
/* Check for match against the name */
- if (next_node->name.integer == target_name) {
+ if (node->name.integer == target_name) {
+
/* Resolve a control method alias if any */
- if (acpi_ns_get_type(next_node) ==
+ if (acpi_ns_get_type(node) ==
ACPI_TYPE_LOCAL_METHOD_ALIAS) {
- next_node =
+ node =
ACPI_CAST_PTR(struct acpi_namespace_node,
- next_node->object);
+ node->object);
}
- /*
- * Found matching entry.
- */
+ /* Found matching entry */
+
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
"Name [%4.4s] (%s) %p found in scope [%4.4s] %p\n",
ACPI_CAST_PTR(char, &target_name),
- acpi_ut_get_type_name(next_node->
- type),
- next_node,
- acpi_ut_get_node_name(node), node));
+ acpi_ut_get_type_name(node->type),
+ node,
+ acpi_ut_get_node_name(parent_node),
+ parent_node));
- *return_node = next_node;
+ *return_node = node;
return_ACPI_STATUS(AE_OK);
}
@@ -145,7 +151,8 @@ acpi_ns_search_node(u32 target_name,
* The last entry in the list points back to the parent,
* so a flag is used to indicate the end-of-list
*/
- if (next_node->flags & ANOBJ_END_OF_PEER_LIST) {
+ if (node->flags & ANOBJ_END_OF_PEER_LIST) {
+
/* Searched entire list, we are done */
break;
@@ -153,7 +160,7 @@ acpi_ns_search_node(u32 target_name,
/* Didn't match name, move on to the next peer object */
- next_node = next_node->peer;
+ node = node->peer;
}
/* Searched entire namespace level, not found */
@@ -162,7 +169,8 @@ acpi_ns_search_node(u32 target_name,
"Name [%4.4s] (%s) not found in search in scope [%4.4s] %p first child %p\n",
ACPI_CAST_PTR(char, &target_name),
acpi_ut_get_type_name(type),
- acpi_ut_get_node_name(node), node, node->child));
+ acpi_ut_get_node_name(parent_node), parent_node,
+ parent_node->child));
return_ACPI_STATUS(AE_NOT_FOUND);
}
@@ -179,14 +187,14 @@ acpi_ns_search_node(u32 target_name,
* RETURN: Status
*
* DESCRIPTION: Called when a name has not been found in the current namespace
- * level. Before adding it or giving up, ACPI scope rules require
+ * level. Before adding it or giving up, ACPI scope rules require
* searching enclosing scopes in cases identified by acpi_ns_local().
*
* "A name is located by finding the matching name in the current
* name space, and then in the parent name space. If the parent
* name space does not contain the name, the search continues
* recursively until either the name is found or the name space
- * does not have a parent (the root of the name space). This
+ * does not have a parent (the root of the name space). This
* indicates that the name is not found" (From ACPI Specification,
* section 5.3)
*
@@ -201,7 +209,7 @@ acpi_ns_search_parent_tree(u32 target_name,
acpi_status status;
struct acpi_namespace_node *parent_node;
- ACPI_FUNCTION_TRACE("ns_search_parent_tree");
+ ACPI_FUNCTION_TRACE(ns_search_parent_tree);
parent_node = acpi_ns_get_parent_node(node);
@@ -235,20 +243,19 @@ acpi_ns_search_parent_tree(u32 target_name,
*/
while (parent_node) {
/*
- * Search parent scope. Use TYPE_ANY because we don't care about the
+ * Search parent scope. Use TYPE_ANY because we don't care about the
* object type at this point, we only care about the existence of
- * the actual name we are searching for. Typechecking comes later.
+ * the actual name we are searching for. Typechecking comes later.
*/
- status = acpi_ns_search_node(target_name, parent_node,
+ status =
+ acpi_ns_search_one_scope(target_name, parent_node,
ACPI_TYPE_ANY, return_node);
if (ACPI_SUCCESS(status)) {
return_ACPI_STATUS(status);
}
- /*
- * Not found here, go up another level
- * (until we reach the root)
- */
+ /* Not found here, go up another level (until we reach the root) */
+
parent_node = acpi_ns_get_parent_node(parent_node);
}
@@ -273,7 +280,7 @@ acpi_ns_search_parent_tree(u32 target_name,
* RETURN: Status
*
* DESCRIPTION: Search for a name segment in a single namespace level,
- * optionally adding it if it is not found. If the passed
+ * optionally adding it if it is not found. If the passed
* Type is not Any and the type previously stored in the
* entry was Any (i.e. unknown), update the stored type.
*
@@ -293,29 +300,46 @@ acpi_ns_search_and_enter(u32 target_name,
acpi_status status;
struct acpi_namespace_node *new_node;
- ACPI_FUNCTION_TRACE("ns_search_and_enter");
+ ACPI_FUNCTION_TRACE(ns_search_and_enter);
/* Parameter validation */
if (!node || !target_name || !return_node) {
ACPI_ERROR((AE_INFO,
- "Null param: Node %p Name %X return_node %p",
+ "Null parameter: Node %p Name %X ReturnNode %p",
node, target_name, return_node));
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- /* Name must consist of printable characters */
-
+ /*
+ * Name must consist of valid ACPI characters. We will repair the name if
+ * necessary because we don't want to abort because of this, but we want
+ * all namespace names to be printable. A warning message is appropriate.
+ *
+ * This issue came up because there are in fact machines that exhibit
+ * this problem, and we want to be able to enable ACPI support for them,
+ * even though there are a few bad names.
+ */
if (!acpi_ut_valid_acpi_name(target_name)) {
- ACPI_ERROR((AE_INFO, "Bad character in ACPI Name: %X",
- target_name));
- return_ACPI_STATUS(AE_BAD_CHARACTER);
+ target_name = acpi_ut_repair_name(target_name);
+
+ /* Report warning only if in strict mode or debug mode */
+
+ if (!acpi_gbl_enable_interpreter_slack) {
+ ACPI_WARNING((AE_INFO,
+ "Found bad character(s) in name, repaired: [%4.4s]\n",
+ ACPI_CAST_PTR(char, &target_name)));
+ } else {
+ ACPI_DEBUG_PRINT((ACPI_DB_WARN,
+ "Found bad character(s) in name, repaired: [%4.4s]\n",
+ ACPI_CAST_PTR(char, &target_name)));
+ }
}
/* Try to find the name in the namespace level specified by the caller */
*return_node = ACPI_ENTRY_NOT_FOUND;
- status = acpi_ns_search_node(target_name, node, type, return_node);
+ status = acpi_ns_search_one_scope(target_name, node, type, return_node);
if (status != AE_NOT_FOUND) {
/*
* If we found it AND the request specifies that a find is an error,
@@ -325,18 +349,16 @@ acpi_ns_search_and_enter(u32 target_name,
status = AE_ALREADY_EXISTS;
}
- /*
- * Either found it or there was an error
- * -- finished either way
- */
+ /* Either found it or there was an error: finished either way */
+
return_ACPI_STATUS(status);
}
/*
- * The name was not found. If we are NOT performing the first pass
+ * The name was not found. If we are NOT performing the first pass
* (name entry) of loading the namespace, search the parent tree (all the
* way to the root if necessary.) We don't want to perform the parent
- * search when the namespace is actually being loaded. We want to perform
+ * search when the namespace is actually being loaded. We want to perform
* the search when namespace references are being resolved (load pass 2)
* and during the execution phase.
*/
@@ -354,9 +376,8 @@ acpi_ns_search_and_enter(u32 target_name,
}
}
- /*
- * In execute mode, just search, never add names. Exit now.
- */
+ /* In execute mode, just search, never add names. Exit now */
+
if (interpreter_mode == ACPI_IMODE_EXECUTE) {
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
"%4.4s Not found in %p [Not adding]\n",
@@ -371,11 +392,18 @@ acpi_ns_search_and_enter(u32 target_name,
if (!new_node) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
+#ifdef ACPI_ASL_COMPILER
+ /*
+ * Node is an object defined by an External() statement
+ */
+ if (flags & ACPI_NS_EXTERNAL) {
+ new_node->flags |= ANOBJ_IS_EXTERNAL;
+ }
+#endif
/* Install the new object into the parent's list of children */
acpi_ns_install_node(walk_state, node, new_node, type);
*return_node = new_node;
-
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/namespace/nsutils.c b/drivers/acpi/namespace/nsutils.c
index 3e7cad549a3..aa4e799d9a8 100644
--- a/drivers/acpi/namespace/nsutils.c
+++ b/drivers/acpi/namespace/nsutils.c
@@ -78,15 +78,17 @@ acpi_ns_report_error(char *module_name,
char *internal_name, acpi_status lookup_status)
{
acpi_status status;
+ u32 bad_name;
char *name = NULL;
- acpi_ut_report_error(module_name, line_number);
+ acpi_os_printf("ACPI Error (%s-%04d): ", module_name, line_number);
if (lookup_status == AE_BAD_CHARACTER) {
+
/* There is a non-ascii character in the name */
- acpi_os_printf("[0x%4.4X] (NON-ASCII)",
- *(ACPI_CAST_PTR(u32, internal_name)));
+ ACPI_MOVE_32_TO_32(&bad_name, internal_name);
+ acpi_os_printf("[0x%4.4X] (NON-ASCII)", bad_name);
} else {
/* Convert path to external format */
@@ -102,7 +104,7 @@ acpi_ns_report_error(char *module_name,
}
if (name) {
- ACPI_MEM_FREE(name);
+ ACPI_FREE(name);
}
}
@@ -137,11 +139,12 @@ acpi_ns_report_method_error(char *module_name,
acpi_status status;
struct acpi_namespace_node *node = prefix_node;
- acpi_ut_report_error(module_name, line_number);
+ acpi_os_printf("ACPI Error (%s-%04d): ", module_name, line_number);
if (path) {
- status = acpi_ns_get_node_by_path(path, prefix_node,
- ACPI_NS_NO_UPSEARCH, &node);
+ status =
+ acpi_ns_get_node(prefix_node, path, ACPI_NS_NO_UPSEARCH,
+ &node);
if (ACPI_FAILURE(status)) {
acpi_os_printf("[Could not get node by pathname]");
}
@@ -185,7 +188,7 @@ acpi_ns_print_node_pathname(struct acpi_namespace_node *node, char *message)
}
acpi_os_printf("[%s] (Node %p)", (char *)buffer.pointer, node);
- ACPI_MEM_FREE(buffer.pointer);
+ ACPI_FREE(buffer.pointer);
}
}
@@ -239,7 +242,7 @@ static u8 acpi_ns_valid_path_separator(char sep)
acpi_object_type acpi_ns_get_type(struct acpi_namespace_node * node)
{
- ACPI_FUNCTION_TRACE("ns_get_type");
+ ACPI_FUNCTION_TRACE(ns_get_type);
if (!node) {
ACPI_WARNING((AE_INFO, "Null Node parameter"));
@@ -264,9 +267,10 @@ acpi_object_type acpi_ns_get_type(struct acpi_namespace_node * node)
u32 acpi_ns_local(acpi_object_type type)
{
- ACPI_FUNCTION_TRACE("ns_local");
+ ACPI_FUNCTION_TRACE(ns_local);
if (!acpi_ut_valid_object_type(type)) {
+
/* Type code out of range */
ACPI_WARNING((AE_INFO, "Invalid Object Type %X", type));
@@ -363,7 +367,7 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
char *result = NULL;
acpi_native_uint i;
- ACPI_FUNCTION_TRACE("ns_build_internal_name");
+ ACPI_FUNCTION_TRACE(ns_build_internal_name);
/* Setup the correct prefixes, counts, and pointers */
@@ -411,6 +415,7 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
for (i = 0; i < ACPI_NAME_SIZE; i++) {
if (acpi_ns_valid_path_separator(*external_name) ||
(*external_name == 0)) {
+
/* Pad the segment with underscore(s) if segment is short */
result[i] = '_';
@@ -473,7 +478,7 @@ acpi_status acpi_ns_internalize_name(char *external_name, char **converted_name)
struct acpi_namestring_info info;
acpi_status status;
- ACPI_FUNCTION_TRACE("ns_internalize_name");
+ ACPI_FUNCTION_TRACE(ns_internalize_name);
if ((!external_name) || (*external_name == 0) || (!converted_name)) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -486,7 +491,7 @@ acpi_status acpi_ns_internalize_name(char *external_name, char **converted_name)
/* We need a segment to store the internal name */
- internal_name = ACPI_MEM_CALLOCATE(info.length);
+ internal_name = ACPI_ALLOCATE_ZEROED(info.length);
if (!internal_name) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -496,7 +501,7 @@ acpi_status acpi_ns_internalize_name(char *external_name, char **converted_name)
info.internal_name = internal_name;
status = acpi_ns_build_internal_name(&info);
if (ACPI_FAILURE(status)) {
- ACPI_MEM_FREE(internal_name);
+ ACPI_FREE(internal_name);
return_ACPI_STATUS(status);
}
@@ -533,7 +538,7 @@ acpi_ns_externalize_name(u32 internal_name_length,
acpi_native_uint i = 0;
acpi_native_uint j = 0;
- ACPI_FUNCTION_TRACE("ns_externalize_name");
+ ACPI_FUNCTION_TRACE(ns_externalize_name);
if (!internal_name_length || !internal_name || !converted_name) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -628,7 +633,7 @@ acpi_ns_externalize_name(u32 internal_name_length,
/*
* Build converted_name
*/
- *converted_name = ACPI_MEM_CALLOCATE(required_length);
+ *converted_name = ACPI_ALLOCATE_ZEROED(required_length);
if (!(*converted_name)) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -681,13 +686,9 @@ struct acpi_namespace_node *acpi_ns_map_handle_to_node(acpi_handle handle)
ACPI_FUNCTION_ENTRY();
/*
- * Simple implementation.
+ * Simple implementation
*/
- if (!handle) {
- return (NULL);
- }
-
- if (handle == ACPI_ROOT_OBJECT) {
+ if ((!handle) || (handle == ACPI_ROOT_OBJECT)) {
return (acpi_gbl_root_node);
}
@@ -697,7 +698,7 @@ struct acpi_namespace_node *acpi_ns_map_handle_to_node(acpi_handle handle)
return (NULL);
}
- return ((struct acpi_namespace_node *)handle);
+ return (ACPI_CAST_PTR(struct acpi_namespace_node, handle));
}
/*******************************************************************************
@@ -752,7 +753,7 @@ void acpi_ns_terminate(void)
{
union acpi_operand_object *obj_desc;
- ACPI_FUNCTION_TRACE("ns_terminate");
+ ACPI_FUNCTION_TRACE(ns_terminate);
/*
* 1) Free the entire namespace -- all nodes and objects
@@ -792,9 +793,10 @@ void acpi_ns_terminate(void)
u32 acpi_ns_opens_scope(acpi_object_type type)
{
- ACPI_FUNCTION_TRACE_STR("ns_opens_scope", acpi_ut_get_type_name(type));
+ ACPI_FUNCTION_TRACE_STR(ns_opens_scope, acpi_ut_get_type_name(type));
if (!acpi_ut_valid_object_type(type)) {
+
/* type code out of range */
ACPI_WARNING((AE_INFO, "Invalid Object Type %X", type));
@@ -806,12 +808,12 @@ u32 acpi_ns_opens_scope(acpi_object_type type)
/*******************************************************************************
*
- * FUNCTION: acpi_ns_get_node_by_path
+ * FUNCTION: acpi_ns_get_node
*
* PARAMETERS: *Pathname - Name to be found, in external (ASL) format. The
* \ (backslash) and ^ (carat) prefixes, and the
* . (period) to separate segments are supported.
- * start_node - Root of subtree to be searched, or NS_ALL for the
+ * prefix_node - Root of subtree to be searched, or NS_ALL for the
* root of the name space. If Name is fully
* qualified (first s8 is '\'), the passed value
* of Scope will not be accessed.
@@ -827,23 +829,29 @@ u32 acpi_ns_opens_scope(acpi_object_type type)
******************************************************************************/
acpi_status
-acpi_ns_get_node_by_path(char *pathname,
- struct acpi_namespace_node *start_node,
- u32 flags, struct acpi_namespace_node **return_node)
+acpi_ns_get_node(struct acpi_namespace_node *prefix_node,
+ char *pathname,
+ u32 flags, struct acpi_namespace_node **return_node)
{
union acpi_generic_state scope_info;
acpi_status status;
- char *internal_path = NULL;
-
- ACPI_FUNCTION_TRACE_PTR("ns_get_node_by_path", pathname);
+ char *internal_path;
- if (pathname) {
- /* Convert path to internal representation */
+ ACPI_FUNCTION_TRACE_PTR(ns_get_node, pathname);
- status = acpi_ns_internalize_name(pathname, &internal_path);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
+ if (!pathname) {
+ *return_node = prefix_node;
+ if (!prefix_node) {
+ *return_node = acpi_gbl_root_node;
}
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Convert path to internal representation */
+
+ status = acpi_ns_internalize_name(pathname, &internal_path);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
}
/* Must lock namespace during lookup */
@@ -855,26 +863,23 @@ acpi_ns_get_node_by_path(char *pathname,
/* Setup lookup scope (search starting point) */
- scope_info.scope.node = start_node;
+ scope_info.scope.node = prefix_node;
/* Lookup the name in the namespace */
- status = acpi_ns_lookup(&scope_info, internal_path,
- ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE,
- (flags | ACPI_NS_DONT_OPEN_SCOPE),
- NULL, return_node);
+ status = acpi_ns_lookup(&scope_info, internal_path, ACPI_TYPE_ANY,
+ ACPI_IMODE_EXECUTE,
+ (flags | ACPI_NS_DONT_OPEN_SCOPE), NULL,
+ return_node);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "%s, %s\n",
- internal_path,
- acpi_format_exception(status)));
+ pathname, acpi_format_exception(status)));
}
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
cleanup:
- if (internal_path) {
- ACPI_MEM_FREE(internal_path);
- }
+ ACPI_FREE(internal_path);
return_ACPI_STATUS(status);
}
@@ -960,9 +965,10 @@ acpi_name acpi_ns_find_parent_name(struct acpi_namespace_node * child_node)
{
struct acpi_namespace_node *parent_node;
- ACPI_FUNCTION_TRACE("ns_find_parent_name");
+ ACPI_FUNCTION_TRACE(ns_find_parent_name);
if (child_node) {
+
/* Valid entry. Get the parent Node */
parent_node = acpi_ns_get_parent_node(child_node);
diff --git a/drivers/acpi/namespace/nswalk.c b/drivers/acpi/namespace/nswalk.c
index fcab1e784b8..c8f6bef16ed 100644
--- a/drivers/acpi/namespace/nswalk.c
+++ b/drivers/acpi/namespace/nswalk.c
@@ -76,6 +76,7 @@ struct acpi_namespace_node *acpi_ns_get_next_node(acpi_object_type type,
ACPI_FUNCTION_ENTRY();
if (!child_node) {
+
/* It's really the parent's _scope_ that we want */
if (parent_node->child) {
@@ -92,6 +93,7 @@ struct acpi_namespace_node *acpi_ns_get_next_node(acpi_object_type type,
/* If any type is OK, we are done */
if (type == ACPI_TYPE_ANY) {
+
/* next_node is NULL if we are at the end-of-list */
return (next_node);
@@ -100,6 +102,7 @@ struct acpi_namespace_node *acpi_ns_get_next_node(acpi_object_type type,
/* Must search for the node -- but within this scope only */
while (next_node) {
+
/* If type matches, we are done */
if (next_node->type == type) {
@@ -161,7 +164,7 @@ acpi_ns_walk_namespace(acpi_object_type type,
acpi_object_type child_type;
u32 level;
- ACPI_FUNCTION_TRACE("ns_walk_namespace");
+ ACPI_FUNCTION_TRACE(ns_walk_namespace);
/* Special case for the namespace Root Node */
@@ -182,6 +185,7 @@ acpi_ns_walk_namespace(acpi_object_type type,
* bubbled up to (and passed) the original parent handle (start_entry)
*/
while (level > 0) {
+
/* Get the next node in this scope. Null if not found */
status = AE_OK;
diff --git a/drivers/acpi/namespace/nsxfeval.c b/drivers/acpi/namespace/nsxfeval.c
index a95f636dc35..6d9bd45af30 100644
--- a/drivers/acpi/namespace/nsxfeval.c
+++ b/drivers/acpi/namespace/nsxfeval.c
@@ -42,8 +42,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/module.h>
-
#include <acpi/acpi.h>
#include <acpi/acnamesp.h>
#include <acpi/acinterp.h>
@@ -51,6 +49,7 @@
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsxfeval")
+#ifdef ACPI_FUTURE_USAGE
/*******************************************************************************
*
* FUNCTION: acpi_evaluate_object_typed
@@ -71,18 +70,17 @@ ACPI_MODULE_NAME("nsxfeval")
* be valid (non-null)
*
******************************************************************************/
-#ifdef ACPI_FUTURE_USAGE
acpi_status
acpi_evaluate_object_typed(acpi_handle handle,
acpi_string pathname,
- struct acpi_object_list *external_params,
- struct acpi_buffer *return_buffer,
+ struct acpi_object_list * external_params,
+ struct acpi_buffer * return_buffer,
acpi_object_type return_type)
{
acpi_status status;
u8 must_free = FALSE;
- ACPI_FUNCTION_TRACE("acpi_evaluate_object_typed");
+ ACPI_FUNCTION_TRACE(acpi_evaluate_object_typed);
/* Return buffer must be valid */
@@ -110,6 +108,7 @@ acpi_evaluate_object_typed(acpi_handle handle,
}
if (return_buffer->length == 0) {
+
/* Error because caller specifically asked for a return value */
ACPI_ERROR((AE_INFO, "No return value"));
@@ -131,6 +130,7 @@ acpi_evaluate_object_typed(acpi_handle handle,
acpi_ut_get_type_name(return_type)));
if (must_free) {
+
/* Caller used ACPI_ALLOCATE_BUFFER, free the return buffer */
acpi_os_free(return_buffer->pointer);
@@ -140,6 +140,8 @@ acpi_evaluate_object_typed(acpi_handle handle,
return_buffer->length = 0;
return_ACPI_STATUS(AE_TYPE);
}
+
+ACPI_EXPORT_SYMBOL(acpi_evaluate_object_typed)
#endif /* ACPI_FUTURE_USAGE */
/*******************************************************************************
@@ -161,7 +163,6 @@ acpi_evaluate_object_typed(acpi_handle handle,
* be valid (non-null)
*
******************************************************************************/
-
acpi_status
acpi_evaluate_object(acpi_handle handle,
acpi_string pathname,
@@ -170,51 +171,61 @@ acpi_evaluate_object(acpi_handle handle,
{
acpi_status status;
acpi_status status2;
- struct acpi_parameter_info info;
+ struct acpi_evaluate_info *info;
acpi_size buffer_space_needed;
u32 i;
- ACPI_FUNCTION_TRACE("acpi_evaluate_object");
+ ACPI_FUNCTION_TRACE(acpi_evaluate_object);
- info.node = handle;
- info.parameters = NULL;
- info.return_object = NULL;
- info.parameter_type = ACPI_PARAM_ARGS;
+ /* Allocate and initialize the evaluation information block */
+
+ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+ if (!info) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ info->pathname = pathname;
+ info->parameter_type = ACPI_PARAM_ARGS;
+
+ /* Convert and validate the device handle */
+
+ info->prefix_node = acpi_ns_map_handle_to_node(handle);
+ if (!info->prefix_node) {
+ status = AE_BAD_PARAMETER;
+ goto cleanup;
+ }
/*
- * If there are parameters to be passed to the object
- * (which must be a control method), the external objects
- * must be converted to internal objects
+ * If there are parameters to be passed to a control method, the external
+ * objects must all be converted to internal objects
*/
if (external_params && external_params->count) {
/*
* Allocate a new parameter block for the internal objects
* Add 1 to count to allow for null terminated internal list
*/
- info.parameters = ACPI_MEM_CALLOCATE(((acpi_size)
- external_params->count +
- 1) * sizeof(void *));
- if (!info.parameters) {
- return_ACPI_STATUS(AE_NO_MEMORY);
+ info->parameters = ACPI_ALLOCATE_ZEROED(((acpi_size)
+ external_params->
+ count +
+ 1) * sizeof(void *));
+ if (!info->parameters) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
}
- /*
- * Convert each external object in the list to an
- * internal object
- */
+ /* Convert each external object in the list to an internal object */
+
for (i = 0; i < external_params->count; i++) {
status =
acpi_ut_copy_eobject_to_iobject(&external_params->
pointer[i],
- &info.
+ &info->
parameters[i]);
if (ACPI_FAILURE(status)) {
- acpi_ut_delete_internal_object_list(info.
- parameters);
- return_ACPI_STATUS(status);
+ goto cleanup;
}
}
- info.parameters[external_params->count] = NULL;
+ info->parameters[external_params->count] = NULL;
}
/*
@@ -224,43 +235,31 @@ acpi_evaluate_object(acpi_handle handle,
* 3) Valid handle
*/
if ((pathname) && (acpi_ns_valid_root_prefix(pathname[0]))) {
- /*
- * The path is fully qualified, just evaluate by name
- */
- status = acpi_ns_evaluate_by_name(pathname, &info);
+
+ /* The path is fully qualified, just evaluate by name */
+
+ info->prefix_node = NULL;
+ status = acpi_ns_evaluate(info);
} else if (!handle) {
/*
- * A handle is optional iff a fully qualified pathname
- * is specified. Since we've already handled fully
- * qualified names above, this is an error
+ * A handle is optional iff a fully qualified pathname is specified.
+ * Since we've already handled fully qualified names above, this is
+ * an error
*/
if (!pathname) {
- ACPI_ERROR((AE_INFO,
- "Both Handle and Pathname are NULL"));
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Both Handle and Pathname are NULL"));
} else {
- ACPI_ERROR((AE_INFO,
- "Handle is NULL and Pathname is relative"));
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Null Handle with relative pathname [%s]",
+ pathname));
}
status = AE_BAD_PARAMETER;
} else {
- /*
- * We get here if we have a handle -- and if we have a
- * pathname it is relative. The handle will be validated
- * in the lower procedures
- */
- if (!pathname) {
- /*
- * The null pathname case means the handle is for
- * the actual object to be evaluated
- */
- status = acpi_ns_evaluate_by_handle(&info);
- } else {
- /*
- * Both a Handle and a relative Pathname
- */
- status = acpi_ns_evaluate_relative(pathname, &info);
- }
+ /* We have a namespace a node and a possible relative path */
+
+ status = acpi_ns_evaluate(info);
}
/*
@@ -268,10 +267,10 @@ acpi_evaluate_object(acpi_handle handle,
* copy the return value to an external object.
*/
if (return_buffer) {
- if (!info.return_object) {
+ if (!info->return_object) {
return_buffer->length = 0;
} else {
- if (ACPI_GET_DESCRIPTOR_TYPE(info.return_object) ==
+ if (ACPI_GET_DESCRIPTOR_TYPE(info->return_object) ==
ACPI_DESC_TYPE_NAMED) {
/*
* If we received a NS Node as a return object, this means that
@@ -282,19 +281,19 @@ acpi_evaluate_object(acpi_handle handle,
* support for various types at a later date if necessary.
*/
status = AE_TYPE;
- info.return_object = NULL; /* No need to delete a NS Node */
+ info->return_object = NULL; /* No need to delete a NS Node */
return_buffer->length = 0;
}
if (ACPI_SUCCESS(status)) {
- /*
- * Find out how large a buffer is needed
- * to contain the returned object
- */
+
+ /* Get the size of the returned object */
+
status =
- acpi_ut_get_object_size(info.return_object,
+ acpi_ut_get_object_size(info->return_object,
&buffer_space_needed);
if (ACPI_SUCCESS(status)) {
+
/* Validate/Allocate/Clear caller buffer */
status =
@@ -303,7 +302,8 @@ acpi_evaluate_object(acpi_handle handle,
buffer_space_needed);
if (ACPI_FAILURE(status)) {
/*
- * Caller's buffer is too small or a new one can't be allocated
+ * Caller's buffer is too small or a new one can't
+ * be allocated
*/
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Needed buffer size %X, %s\n",
@@ -312,12 +312,11 @@ acpi_evaluate_object(acpi_handle handle,
acpi_format_exception
(status)));
} else {
- /*
- * We have enough space for the object, build it
- */
+ /* We have enough space for the object, build it */
+
status =
acpi_ut_copy_iobject_to_eobject
- (info.return_object,
+ (info->return_object,
return_buffer);
}
}
@@ -325,35 +324,37 @@ acpi_evaluate_object(acpi_handle handle,
}
}
- if (info.return_object) {
+ if (info->return_object) {
/*
- * Delete the internal return object. NOTE: Interpreter
- * must be locked to avoid race condition.
+ * Delete the internal return object. NOTE: Interpreter must be
+ * locked to avoid race condition.
*/
status2 = acpi_ex_enter_interpreter();
if (ACPI_SUCCESS(status2)) {
- /*
- * Delete the internal return object. (Or at least
- * decrement the reference count by one)
- */
- acpi_ut_remove_reference(info.return_object);
+
+ /* Remove one reference on the return object (should delete it) */
+
+ acpi_ut_remove_reference(info->return_object);
acpi_ex_exit_interpreter();
}
}
- /*
- * Free the input parameter list (if we created one),
- */
- if (info.parameters) {
+ cleanup:
+
+ /* Free the input parameter list (if we created one) */
+
+ if (info->parameters) {
+
/* Free the allocated parameter block */
- acpi_ut_delete_internal_object_list(info.parameters);
+ acpi_ut_delete_internal_object_list(info->parameters);
}
+ ACPI_FREE(info);
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_evaluate_object);
+ACPI_EXPORT_SYMBOL(acpi_evaluate_object)
/*******************************************************************************
*
@@ -384,7 +385,6 @@ EXPORT_SYMBOL(acpi_evaluate_object);
* function, etc.
*
******************************************************************************/
-
acpi_status
acpi_walk_namespace(acpi_object_type type,
acpi_handle start_object,
@@ -394,7 +394,7 @@ acpi_walk_namespace(acpi_object_type type,
{
acpi_status status;
- ACPI_FUNCTION_TRACE("acpi_walk_namespace");
+ ACPI_FUNCTION_TRACE(acpi_walk_namespace);
/* Parameter validation */
@@ -421,7 +421,7 @@ acpi_walk_namespace(acpi_object_type type,
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_walk_namespace);
+ACPI_EXPORT_SYMBOL(acpi_walk_namespace)
/*******************************************************************************
*
@@ -436,7 +436,6 @@ EXPORT_SYMBOL(acpi_walk_namespace);
* on that.
*
******************************************************************************/
-
static acpi_status
acpi_ns_get_device_callback(acpi_handle obj_handle,
u32 nesting_level,
@@ -473,6 +472,7 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
}
if (!(flags & ACPI_STA_DEVICE_PRESENT)) {
+
/* Don't examine children of the device if not present */
return (AE_CTRL_DEPTH);
@@ -489,6 +489,7 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
}
if (ACPI_STRNCMP(hid.value, info->hid, sizeof(hid.value)) != 0) {
+
/* Get the list of Compatible IDs */
status = acpi_ut_execute_CID(node, &cid);
@@ -505,11 +506,11 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
sizeof(struct
acpi_compatible_id)) !=
0) {
- ACPI_MEM_FREE(cid);
+ ACPI_FREE(cid);
return (AE_OK);
}
}
- ACPI_MEM_FREE(cid);
+ ACPI_FREE(cid);
}
}
@@ -551,7 +552,7 @@ acpi_get_devices(char *HID,
acpi_status status;
struct acpi_get_devices_info info;
- ACPI_FUNCTION_TRACE("acpi_get_devices");
+ ACPI_FUNCTION_TRACE(acpi_get_devices);
/* Parameter validation */
@@ -563,9 +564,9 @@ acpi_get_devices(char *HID,
* We're going to call their callback from OUR callback, so we need
* to know what it is, and their context parameter.
*/
+ info.hid = HID;
info.context = context;
info.user_function = user_function;
- info.hid = HID;
/*
* Lock the namespace around the walk.
@@ -578,9 +579,8 @@ acpi_get_devices(char *HID,
return_ACPI_STATUS(status);
}
- status = acpi_ns_walk_namespace(ACPI_TYPE_DEVICE,
- ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
- ACPI_NS_WALK_UNLOCK,
+ status = acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK,
acpi_ns_get_device_callback, &info,
return_value);
@@ -588,7 +588,7 @@ acpi_get_devices(char *HID,
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_get_devices);
+ACPI_EXPORT_SYMBOL(acpi_get_devices)
/*******************************************************************************
*
@@ -603,7 +603,6 @@ EXPORT_SYMBOL(acpi_get_devices);
* DESCRIPTION: Attach arbitrary data and handler to a namespace node.
*
******************************************************************************/
-
acpi_status
acpi_attach_data(acpi_handle obj_handle,
acpi_object_handler handler, void *data)
@@ -637,6 +636,8 @@ acpi_attach_data(acpi_handle obj_handle,
return (status);
}
+ACPI_EXPORT_SYMBOL(acpi_attach_data)
+
/*******************************************************************************
*
* FUNCTION: acpi_detach_data
@@ -649,7 +650,6 @@ acpi_attach_data(acpi_handle obj_handle,
* DESCRIPTION: Remove data that was previously attached to a node.
*
******************************************************************************/
-
acpi_status
acpi_detach_data(acpi_handle obj_handle, acpi_object_handler handler)
{
@@ -682,6 +682,8 @@ acpi_detach_data(acpi_handle obj_handle, acpi_object_handler handler)
return (status);
}
+ACPI_EXPORT_SYMBOL(acpi_detach_data)
+
/*******************************************************************************
*
* FUNCTION: acpi_get_data
@@ -695,7 +697,6 @@ acpi_detach_data(acpi_handle obj_handle, acpi_object_handler handler)
* DESCRIPTION: Retrieve data that was previously attached to a namespace node.
*
******************************************************************************/
-
acpi_status
acpi_get_data(acpi_handle obj_handle, acpi_object_handler handler, void **data)
{
@@ -727,3 +728,5 @@ acpi_get_data(acpi_handle obj_handle, acpi_object_handler handler, void **data)
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return (status);
}
+
+ACPI_EXPORT_SYMBOL(acpi_get_data)
diff --git a/drivers/acpi/namespace/nsxfname.c b/drivers/acpi/namespace/nsxfname.c
index 8cd8675a47c..978213a6c19 100644
--- a/drivers/acpi/namespace/nsxfname.c
+++ b/drivers/acpi/namespace/nsxfname.c
@@ -42,8 +42,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/module.h>
-
#include <acpi/acpi.h>
#include <acpi/acnamesp.h>
@@ -114,9 +112,8 @@ acpi_get_handle(acpi_handle parent,
/*
* Find the Node and convert to a handle
*/
- status =
- acpi_ns_get_node_by_path(pathname, prefix_node, ACPI_NS_NO_UPSEARCH,
- &node);
+ status = acpi_ns_get_node(prefix_node, pathname, ACPI_NS_NO_UPSEARCH,
+ &node);
*ret_handle = NULL;
if (ACPI_SUCCESS(status)) {
@@ -126,7 +123,7 @@ acpi_get_handle(acpi_handle parent,
return (status);
}
-EXPORT_SYMBOL(acpi_get_handle);
+ACPI_EXPORT_SYMBOL(acpi_get_handle)
/******************************************************************************
*
@@ -143,7 +140,6 @@ EXPORT_SYMBOL(acpi_get_handle);
* complementary functions.
*
******************************************************************************/
-
acpi_status
acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer * buffer)
{
@@ -162,6 +158,7 @@ acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer * buffer)
}
if (name_type == ACPI_FULL_PATHNAME) {
+
/* Get the full pathname (From the namespace root) */
status = acpi_ns_handle_to_pathname(handle, buffer);
@@ -203,7 +200,7 @@ acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer * buffer)
return (status);
}
-EXPORT_SYMBOL(acpi_get_name);
+ACPI_EXPORT_SYMBOL(acpi_get_name)
/******************************************************************************
*
@@ -219,7 +216,6 @@ EXPORT_SYMBOL(acpi_get_name);
* control methods (Such as in the case of a device.)
*
******************************************************************************/
-
acpi_status
acpi_get_object_info(acpi_handle handle, struct acpi_buffer * buffer)
{
@@ -241,7 +237,7 @@ acpi_get_object_info(acpi_handle handle, struct acpi_buffer * buffer)
return (status);
}
- info = ACPI_MEM_CALLOCATE(sizeof(struct acpi_device_info));
+ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_device_info));
if (!info) {
return (AE_NO_MEMORY);
}
@@ -345,11 +341,11 @@ acpi_get_object_info(acpi_handle handle, struct acpi_buffer * buffer)
}
cleanup:
- ACPI_MEM_FREE(info);
+ ACPI_FREE(info);
if (cid_list) {
- ACPI_MEM_FREE(cid_list);
+ ACPI_FREE(cid_list);
}
return (status);
}
-EXPORT_SYMBOL(acpi_get_object_info);
+ACPI_EXPORT_SYMBOL(acpi_get_object_info)
diff --git a/drivers/acpi/namespace/nsxfobj.c b/drivers/acpi/namespace/nsxfobj.c
index a0332595677..a163e1d3708 100644
--- a/drivers/acpi/namespace/nsxfobj.c
+++ b/drivers/acpi/namespace/nsxfobj.c
@@ -42,8 +42,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/module.h>
-
#include <acpi/acpi.h>
#include <acpi/acnamesp.h>
@@ -101,7 +99,7 @@ acpi_status acpi_get_type(acpi_handle handle, acpi_object_type * ret_type)
return (status);
}
-EXPORT_SYMBOL(acpi_get_type);
+ACPI_EXPORT_SYMBOL(acpi_get_type)
/*******************************************************************************
*
@@ -116,7 +114,6 @@ EXPORT_SYMBOL(acpi_get_type);
* Handle.
*
******************************************************************************/
-
acpi_status acpi_get_parent(acpi_handle handle, acpi_handle * ret_handle)
{
struct acpi_namespace_node *node;
@@ -162,7 +159,7 @@ acpi_status acpi_get_parent(acpi_handle handle, acpi_handle * ret_handle)
return (status);
}
-EXPORT_SYMBOL(acpi_get_parent);
+ACPI_EXPORT_SYMBOL(acpi_get_parent)
/*******************************************************************************
*
@@ -181,7 +178,6 @@ EXPORT_SYMBOL(acpi_get_parent);
* Scope is returned.
*
******************************************************************************/
-
acpi_status
acpi_get_next_object(acpi_object_type type,
acpi_handle parent,
@@ -206,6 +202,7 @@ acpi_get_next_object(acpi_object_type type,
/* If null handle, use the parent */
if (!child) {
+
/* Start search at the beginning of the specified scope */
parent_node = acpi_ns_map_handle_to_node(parent);
@@ -242,4 +239,4 @@ acpi_get_next_object(acpi_object_type type,
return (status);
}
-EXPORT_SYMBOL(acpi_get_next_object);
+ACPI_EXPORT_SYMBOL(acpi_get_next_object)
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 64b98e82feb..e2c1a16078c 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -36,12 +36,60 @@
#define _COMPONENT ACPI_NUMA
ACPI_MODULE_NAME("numa")
+static nodemask_t nodes_found_map = NODE_MASK_NONE;
+#define PXM_INVAL -1
+#define NID_INVAL -1
+
+/* maps to convert between proximity domain and logical node ID */
+int __cpuinitdata pxm_to_node_map[MAX_PXM_DOMAINS]
+ = { [0 ... MAX_PXM_DOMAINS - 1] = NID_INVAL };
+int __cpuinitdata node_to_pxm_map[MAX_NUMNODES]
+ = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
+
extern int __init acpi_table_parse_madt_family(enum acpi_table_id id,
unsigned long madt_size,
int entry_id,
acpi_madt_entry_handler handler,
unsigned int max_entries);
+int __cpuinit pxm_to_node(int pxm)
+{
+ if (pxm < 0)
+ return NID_INVAL;
+ return pxm_to_node_map[pxm];
+}
+
+int __cpuinit node_to_pxm(int node)
+{
+ if (node < 0)
+ return PXM_INVAL;
+ return node_to_pxm_map[node];
+}
+
+int __cpuinit acpi_map_pxm_to_node(int pxm)
+{
+ int node = pxm_to_node_map[pxm];
+
+ if (node < 0){
+ if (nodes_weight(nodes_found_map) >= MAX_NUMNODES)
+ return NID_INVAL;
+ node = first_unset_node(nodes_found_map);
+ pxm_to_node_map[pxm] = node;
+ node_to_pxm_map[node] = pxm;
+ node_set(node, nodes_found_map);
+ }
+
+ return node;
+}
+
+void __cpuinit acpi_unmap_pxm_to_node(int node)
+{
+ int pxm = node_to_pxm_map[node];
+ pxm_to_node_map[pxm] = NID_INVAL;
+ node_to_pxm_map[node] = PXM_INVAL;
+ node_clear(node, nodes_found_map);
+}
+
void __init acpi_table_print_srat_entry(acpi_table_entry_header * header)
{
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 13b5fd5854a..1bb558adee6 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -37,6 +37,7 @@
#include <linux/delay.h>
#include <linux/workqueue.h>
#include <linux/nmi.h>
+#include <linux/kthread.h>
#include <acpi/acpi.h>
#include <asm/io.h>
#include <acpi/acpi_bus.h>
@@ -600,23 +601,41 @@ static void acpi_os_execute_deferred(void *context)
return_VOID;
}
-acpi_status
-acpi_os_queue_for_execution(u32 priority,
+static int acpi_os_execute_thread(void *context)
+{
+ struct acpi_os_dpc *dpc = (struct acpi_os_dpc *)context;
+ if (dpc) {
+ dpc->function(dpc->context);
+ kfree(dpc);
+ }
+ do_exit(0);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_os_execute
+ *
+ * PARAMETERS: Type - Type of the callback
+ * Function - Function to be executed
+ * Context - Function parameters
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Depending on type, either queues function for deferred execution or
+ * immediately executes function on a separate thread.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_os_execute(acpi_execute_type type,
acpi_osd_exec_callback function, void *context)
{
acpi_status status = AE_OK;
struct acpi_os_dpc *dpc;
struct work_struct *task;
-
- ACPI_FUNCTION_TRACE("os_queue_for_execution");
-
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Scheduling function [%p(%p)] for deferred execution.\n",
- function, context));
+ struct task_struct *p;
if (!function)
- return_ACPI_STATUS(AE_BAD_PARAMETER);
-
+ return AE_BAD_PARAMETER;
/*
* Allocate/initialize DPC structure. Note that this memory will be
* freed by the callee. The kernel handles the tq_struct list in a
@@ -627,30 +646,37 @@ acpi_os_queue_for_execution(u32 priority,
* We can save time and code by allocating the DPC and tq_structs
* from the same memory.
*/
-
- dpc =
- kmalloc(sizeof(struct acpi_os_dpc) + sizeof(struct work_struct),
- GFP_ATOMIC);
+ if (type == OSL_NOTIFY_HANDLER) {
+ dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_KERNEL);
+ } else {
+ dpc = kmalloc(sizeof(struct acpi_os_dpc) +
+ sizeof(struct work_struct), GFP_ATOMIC);
+ }
if (!dpc)
- return_ACPI_STATUS(AE_NO_MEMORY);
-
+ return AE_NO_MEMORY;
dpc->function = function;
dpc->context = context;
- task = (void *)(dpc + 1);
- INIT_WORK(task, acpi_os_execute_deferred, (void *)dpc);
-
- if (!queue_work(kacpid_wq, task)) {
- ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
- "Call to queue_work() failed.\n"));
- kfree(dpc);
- status = AE_ERROR;
+ if (type == OSL_NOTIFY_HANDLER) {
+ p = kthread_create(acpi_os_execute_thread, dpc, "kacpid_notify");
+ if (!IS_ERR(p)) {
+ wake_up_process(p);
+ } else {
+ status = AE_NO_MEMORY;
+ kfree(dpc);
+ }
+ } else {
+ task = (void *)(dpc + 1);
+ INIT_WORK(task, acpi_os_execute_deferred, (void *)dpc);
+ if (!queue_work(kacpid_wq, task)) {
+ status = AE_ERROR;
+ kfree(dpc);
+ }
}
-
- return_ACPI_STATUS(status);
+ return status;
}
-EXPORT_SYMBOL(acpi_os_queue_for_execution);
+EXPORT_SYMBOL(acpi_os_execute);
void acpi_os_wait_events_complete(void *context)
{
@@ -769,9 +795,6 @@ acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
handle, units, timeout));
- if (in_atomic())
- timeout = 0;
-
switch (timeout) {
/*
* No Wait:
@@ -896,14 +919,6 @@ u8 acpi_os_writable(void *ptr, acpi_size len)
}
#endif
-u32 acpi_os_get_thread_id(void)
-{
- if (!in_atomic())
- return current->pid;
-
- return 0;
-}
-
acpi_status acpi_os_signal(u32 function, void *info)
{
switch (function) {
@@ -1050,12 +1065,12 @@ void acpi_os_release_lock(acpi_handle handle, acpi_cpu_flags flags)
*
* FUNCTION: acpi_os_create_cache
*
- * PARAMETERS: CacheName - Ascii name for the cache
- * ObjectSize - Size of each cached object
- * MaxDepth - Maximum depth of the cache (in objects)
- * ReturnCache - Where the new cache object is returned
+ * PARAMETERS: name - Ascii name for the cache
+ * size - Size of each cached object
+ * depth - Maximum depth of the cache (in objects) <ignored>
+ * cache - Where the new cache object is returned
*
- * RETURN: Status
+ * RETURN: status
*
* DESCRIPTION: Create a cache object
*
@@ -1065,7 +1080,10 @@ acpi_status
acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
{
*cache = kmem_cache_create(name, size, 0, 0, NULL, NULL);
- return AE_OK;
+ if (cache == NULL)
+ return AE_ERROR;
+ else
+ return AE_OK;
}
/*******************************************************************************
@@ -1134,16 +1152,63 @@ acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
*
* RETURN: Status
*
- * DESCRIPTION: Get an object from the specified cache. If cache is empty,
- * the object is allocated.
+ * DESCRIPTION: Return a zero-filled object.
*
******************************************************************************/
void *acpi_os_acquire_object(acpi_cache_t * cache)
{
- void *object = kmem_cache_alloc(cache, GFP_KERNEL);
+ void *object = kmem_cache_zalloc(cache, GFP_KERNEL);
WARN_ON(!object);
return object;
}
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_validate_interface
+ *
+ * PARAMETERS: interface - Requested interface to be validated
+ *
+ * RETURN: AE_OK if interface is supported, AE_SUPPORT otherwise
+ *
+ * DESCRIPTION: Match an interface string to the interfaces supported by the
+ * host. Strings originate from an AML call to the _OSI method.
+ *
+ *****************************************************************************/
+
+acpi_status
+acpi_os_validate_interface (char *interface)
+{
+
+ return AE_SUPPORT;
+}
+
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_os_validate_address
+ *
+ * PARAMETERS: space_id - ACPI space ID
+ * address - Physical address
+ * length - Address length
+ *
+ * RETURN: AE_OK if address/length is valid for the space_id. Otherwise,
+ * should return AE_AML_ILLEGAL_ADDRESS.
+ *
+ * DESCRIPTION: Validate a system address via the host OS. Used to validate
+ * the addresses accessed by AML operation regions.
+ *
+ *****************************************************************************/
+
+acpi_status
+acpi_os_validate_address (
+ u8 space_id,
+ acpi_physical_address address,
+ acpi_size length)
+{
+
+ return AE_OK;
+}
+
+
#endif
diff --git a/drivers/acpi/parser/psargs.c b/drivers/acpi/parser/psargs.c
index de573be5271..bf88e076c3e 100644
--- a/drivers/acpi/parser/psargs.c
+++ b/drivers/acpi/parser/psargs.c
@@ -79,7 +79,7 @@ acpi_ps_get_next_package_length(struct acpi_parse_state *parser_state)
acpi_native_uint byte_count;
u8 byte_zero_mask = 0x3F; /* Default [0:5] */
- ACPI_FUNCTION_TRACE("ps_get_next_package_length");
+ ACPI_FUNCTION_TRACE(ps_get_next_package_length);
/*
* Byte 0 bits [6:7] contain the number of additional bytes
@@ -128,7 +128,7 @@ u8 *acpi_ps_get_next_package_end(struct acpi_parse_state *parser_state)
u8 *start = parser_state->aml;
u32 package_length;
- ACPI_FUNCTION_TRACE("ps_get_next_package_end");
+ ACPI_FUNCTION_TRACE(ps_get_next_package_end);
/* Function below updates parser_state->Aml */
@@ -157,7 +157,7 @@ char *acpi_ps_get_next_namestring(struct acpi_parse_state *parser_state)
u8 *start = parser_state->aml;
u8 *end = parser_state->aml;
- ACPI_FUNCTION_TRACE("ps_get_next_namestring");
+ ACPI_FUNCTION_TRACE(ps_get_next_namestring);
/* Point past any namestring prefix characters (backslash or carat) */
@@ -237,7 +237,7 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
struct acpi_namespace_node *node;
union acpi_generic_state scope_info;
- ACPI_FUNCTION_TRACE("ps_get_next_namepath");
+ ACPI_FUNCTION_TRACE(ps_get_next_namepath);
path = acpi_ps_get_next_namestring(parser_state);
acpi_ps_init_op(arg, AML_INT_NAMEPATH_OP);
@@ -275,6 +275,7 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
*/
if (ACPI_SUCCESS(status) &&
possible_method_call && (node->type == ACPI_TYPE_METHOD)) {
+
/* This name is actually a control method invocation */
method_desc = acpi_ns_get_attached_object(node);
@@ -319,6 +320,7 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
* some not_found cases are allowed
*/
if (status == AE_NOT_FOUND) {
+
/* 1) not_found is ok during load pass 1/2 (allow forward references) */
if ((walk_state->parse_flags & ACPI_PARSE_MODE_MASK) !=
@@ -354,6 +356,7 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
if ((walk_state->parse_flags & ACPI_PARSE_MODE_MASK) ==
ACPI_PARSE_EXECUTE) {
+
/* Report a control method execution error */
status = acpi_ds_method_error(status, walk_state);
@@ -388,7 +391,7 @@ acpi_ps_get_next_simple_arg(struct acpi_parse_state *parser_state,
u16 opcode;
u8 *aml = parser_state->aml;
- ACPI_FUNCTION_TRACE_U32("ps_get_next_simple_arg", arg_type);
+ ACPI_FUNCTION_TRACE_U32(ps_get_next_simple_arg, arg_type);
switch (arg_type) {
case ARGP_BYTEDATA:
@@ -453,7 +456,7 @@ acpi_ps_get_next_simple_arg(struct acpi_parse_state *parser_state,
default:
- ACPI_ERROR((AE_INFO, "Invalid arg_type %X", arg_type));
+ ACPI_ERROR((AE_INFO, "Invalid ArgType %X", arg_type));
return_VOID;
}
@@ -484,7 +487,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
u16 opcode;
u32 name;
- ACPI_FUNCTION_TRACE("ps_get_next_field");
+ ACPI_FUNCTION_TRACE(ps_get_next_field);
/* Determine field type */
@@ -590,7 +593,7 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
u32 subop;
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE_PTR("ps_get_next_arg", parser_state);
+ ACPI_FUNCTION_TRACE_PTR(ps_get_next_arg, parser_state);
switch (arg_type) {
case ARGP_BYTEDATA:
@@ -620,6 +623,7 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
case ARGP_FIELDLIST:
if (parser_state->aml < parser_state->pkg_end) {
+
/* Non-empty list */
while (parser_state->aml < parser_state->pkg_end) {
@@ -645,6 +649,7 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
case ARGP_BYTELIST:
if (parser_state->aml < parser_state->pkg_end) {
+
/* Non-empty list */
arg = acpi_ps_alloc_op(AML_INT_BYTELIST_OP);
@@ -673,6 +678,7 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
if (subop == 0 ||
acpi_ps_is_leading_char(subop) ||
acpi_ps_is_prefix_char(subop)) {
+
/* null_name or name_string */
arg = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP);
@@ -703,6 +709,7 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
case ARGP_OBJLIST:
if (parser_state->aml < parser_state->pkg_end) {
+
/* Non-empty list of variable arguments, nothing returned */
walk_state->arg_count = ACPI_VAR_ARGS;
@@ -711,7 +718,7 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
default:
- ACPI_ERROR((AE_INFO, "Invalid arg_type: %X", arg_type));
+ ACPI_ERROR((AE_INFO, "Invalid ArgType: %X", arg_type));
status = AE_AML_OPERAND_TYPE;
break;
}
diff --git a/drivers/acpi/parser/psloop.c b/drivers/acpi/parser/psloop.c
index 00b072e15d1..e1541db3753 100644
--- a/drivers/acpi/parser/psloop.c
+++ b/drivers/acpi/parser/psloop.c
@@ -83,7 +83,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
struct acpi_parse_state *parser_state;
u8 *aml_op_start = NULL;
- ACPI_FUNCTION_TRACE_PTR("ps_parse_loop", walk_state);
+ ACPI_FUNCTION_TRACE_PTR(ps_parse_loop, walk_state);
if (walk_state->descending_callback == NULL) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -95,6 +95,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
#if (!defined (ACPI_NO_METHOD_EXECUTION) && !defined (ACPI_CONSTANT_EVAL_ONLY))
if (walk_state->walk_type & ACPI_WALK_METHOD_RESTART) {
+
/* We are restarting a preempted control method */
if (acpi_ps_has_completed_scope(parser_state)) {
@@ -128,7 +129,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
}
ACPI_EXCEPTION((AE_INFO, status,
- "get_predicate Failed"));
+ "GetPredicate Failed"));
return_ACPI_STATUS(status);
}
@@ -143,6 +144,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
"Popped scope, Op=%p\n", op));
} else if (walk_state->prev_op) {
+
/* We were in the middle of an op */
op = walk_state->prev_op;
@@ -156,6 +158,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
while ((parser_state->aml < parser_state->aml_end) || (op)) {
aml_op_start = parser_state->aml;
if (!op) {
+
/* Get the next opcode from the AML stream */
walk_state->aml_offset =
@@ -213,6 +216,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
/* Create Op structure and append to parent's argument list */
if (walk_state->op_info->flags & AML_NAMED) {
+
/* Allocate a new pre_op if necessary */
if (!pre_op) {
@@ -371,7 +375,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
if (walk_state->op_info) {
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
- "Opcode %4.4X [%s] Op %p Aml %p aml_offset %5.5X\n",
+ "Opcode %4.4X [%s] Op %p Aml %p AmlOffset %5.5X\n",
(u32) op->common.aml_opcode,
walk_state->op_info->name, op,
parser_state->aml,
@@ -388,6 +392,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
/* Are there any arguments that must be processed? */
if (walk_state->arg_types) {
+
/* Get arguments */
switch (op->common.aml_opcode) {
@@ -742,7 +747,19 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
if (ACPI_FAILURE(status2)) {
return_ACPI_STATUS(status2);
}
+
+ status2 =
+ acpi_ds_result_stack_pop
+ (walk_state);
+ if (ACPI_FAILURE(status2)) {
+ return_ACPI_STATUS(status2);
+ }
+
+ acpi_ut_delete_generic_state
+ (acpi_ut_pop_generic_state
+ (&walk_state->control_state));
}
+
acpi_ps_pop_scope(parser_state, &op,
&walk_state->arg_types,
&walk_state->arg_count);
@@ -762,6 +779,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
return_ACPI_STATUS(status2);
}
}
+
acpi_ps_pop_scope(parser_state, &op,
&walk_state->arg_types,
&walk_state->arg_count);
@@ -853,6 +871,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
}
else if (ACPI_FAILURE(status)) {
+
/* First error is most important */
(void)
diff --git a/drivers/acpi/parser/psopcode.c b/drivers/acpi/parser/psopcode.c
index 11d6351ab8b..4bd25e32769 100644
--- a/drivers/acpi/parser/psopcode.c
+++ b/drivers/acpi/parser/psopcode.c
@@ -725,12 +725,13 @@ static const u8 acpi_gbl_long_op_index[NUM_EXTENDED_OPCODE] = {
const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode)
{
- ACPI_FUNCTION_NAME("ps_get_opcode_info");
+ ACPI_FUNCTION_NAME(ps_get_opcode_info);
/*
* Detect normal 8-bit opcode or extended 16-bit opcode
*/
if (!(opcode & 0xFF00)) {
+
/* Simple (8-bit) opcode: 0-255, can't index beyond table */
return (&acpi_gbl_aml_op_info
@@ -739,6 +740,7 @@ const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode)
if (((opcode & 0xFF00) == AML_EXTENDED_OPCODE) &&
(((u8) opcode) <= MAX_EXTENDED_OPCODE)) {
+
/* Valid extended (16-bit) opcode */
return (&acpi_gbl_aml_op_info
@@ -779,7 +781,7 @@ char *acpi_ps_get_opcode_name(u16 opcode)
return (op->name);
#else
- return ("AE_NOT_CONFIGURED");
+ return ("OpcodeName unavailable");
#endif
}
diff --git a/drivers/acpi/parser/psparse.c b/drivers/acpi/parser/psparse.c
index a9f3229f410..7ee2f2e7752 100644
--- a/drivers/acpi/parser/psparse.c
+++ b/drivers/acpi/parser/psparse.c
@@ -106,6 +106,7 @@ u16 acpi_ps_peek_opcode(struct acpi_parse_state * parser_state)
opcode = (u16) ACPI_GET8(aml);
if (opcode == AML_EXTENDED_OP_PREFIX) {
+
/* Extended opcode, get the second opcode byte */
aml++;
@@ -137,7 +138,7 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
const struct acpi_opcode_info *parent_info;
union acpi_parse_object *replacement_op = NULL;
- ACPI_FUNCTION_TRACE_PTR("ps_complete_this_op", op);
+ ACPI_FUNCTION_TRACE_PTR(ps_complete_this_op, op);
/* Check for null Op, can happen if AML code is corrupt */
@@ -158,6 +159,7 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
if (op->common.parent) {
prev = op->common.parent->common.value.arg;
if (!prev) {
+
/* Nothing more to do */
goto cleanup;
@@ -245,6 +247,7 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
/* We must unlink this op from the parent tree */
if (prev == op) {
+
/* This op is the first in the list */
if (replacement_op) {
@@ -265,6 +268,7 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
else
while (prev) {
+
/* Traverse all siblings in the parent's argument list */
next = prev->common.next;
@@ -329,7 +333,7 @@ acpi_ps_next_parse_state(struct acpi_walk_state *walk_state,
struct acpi_parse_state *parser_state = &walk_state->parser_state;
acpi_status status = AE_CTRL_PENDING;
- ACPI_FUNCTION_TRACE_PTR("ps_next_parse_state", op);
+ ACPI_FUNCTION_TRACE_PTR(ps_next_parse_state, op);
switch (callback_status) {
case AE_CTRL_TERMINATE:
@@ -449,10 +453,10 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
struct acpi_thread_state *prev_walk_list = acpi_gbl_current_walk_list;
struct acpi_walk_state *previous_walk_state;
- ACPI_FUNCTION_TRACE("ps_parse_aml");
+ ACPI_FUNCTION_TRACE(ps_parse_aml);
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
- "Entered with walk_state=%p Aml=%p size=%X\n",
+ "Entered with WalkState=%p Aml=%p size=%X\n",
walk_state, walk_state->parser_state.aml,
walk_state->parser_state.aml_size));
@@ -460,6 +464,7 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
thread = acpi_ut_create_thread_state();
if (!thread) {
+ acpi_ds_delete_walk_state(walk_state);
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -510,6 +515,7 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
} else if (status == AE_CTRL_TERMINATE) {
status = AE_OK;
} else if ((status != AE_OK) && (walk_state->method_desc)) {
+
/* Either the method parse or actual execution failed */
ACPI_ERROR_METHOD("Method parse/execution failed",
@@ -550,20 +556,9 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
*/
if (((walk_state->parse_flags & ACPI_PARSE_MODE_MASK) ==
ACPI_PARSE_EXECUTE) || (ACPI_FAILURE(status))) {
- if (walk_state->method_desc) {
- /* Decrement the thread count on the method parse tree */
-
- if (walk_state->method_desc->method.
- thread_count) {
- walk_state->method_desc->method.
- thread_count--;
- } else {
- ACPI_ERROR((AE_INFO,
- "Invalid zero thread count in method"));
- }
- }
-
- acpi_ds_terminate_control_method(walk_state);
+ acpi_ds_terminate_control_method(walk_state->
+ method_desc,
+ walk_state);
}
/* Delete this walk state and all linked control states */
@@ -572,7 +567,7 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
previous_walk_state = walk_state;
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
- "return_value=%p, implicit_value=%p State=%p\n",
+ "ReturnValue=%p, ImplicitValue=%p State=%p\n",
walk_state->return_desc,
walk_state->implicit_return_obj, walk_state));
@@ -633,12 +628,14 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
}
} else {
if (previous_walk_state->return_desc) {
+
/* Caller doesn't want it, must delete it */
acpi_ut_remove_reference(previous_walk_state->
return_desc);
}
if (previous_walk_state->implicit_return_obj) {
+
/* Caller doesn't want it, must delete it */
acpi_ut_remove_reference(previous_walk_state->
diff --git a/drivers/acpi/parser/psscope.c b/drivers/acpi/parser/psscope.c
index bc6047caccd..a3e0314de24 100644
--- a/drivers/acpi/parser/psscope.c
+++ b/drivers/acpi/parser/psscope.c
@@ -106,14 +106,14 @@ acpi_ps_init_scope(struct acpi_parse_state * parser_state,
{
union acpi_generic_state *scope;
- ACPI_FUNCTION_TRACE_PTR("ps_init_scope", root_op);
+ ACPI_FUNCTION_TRACE_PTR(ps_init_scope, root_op);
scope = acpi_ut_create_generic_state();
if (!scope) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
- scope->common.data_type = ACPI_DESC_TYPE_STATE_RPSCOPE;
+ scope->common.descriptor_type = ACPI_DESC_TYPE_STATE_RPSCOPE;
scope->parse_scope.op = root_op;
scope->parse_scope.arg_count = ACPI_VAR_ARGS;
scope->parse_scope.arg_end = parser_state->aml_end;
@@ -147,14 +147,14 @@ acpi_ps_push_scope(struct acpi_parse_state *parser_state,
{
union acpi_generic_state *scope;
- ACPI_FUNCTION_TRACE_PTR("ps_push_scope", op);
+ ACPI_FUNCTION_TRACE_PTR(ps_push_scope, op);
scope = acpi_ut_create_generic_state();
if (!scope) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
- scope->common.data_type = ACPI_DESC_TYPE_STATE_PSCOPE;
+ scope->common.descriptor_type = ACPI_DESC_TYPE_STATE_PSCOPE;
scope->parse_scope.op = op;
scope->parse_scope.arg_list = remaining_args;
scope->parse_scope.arg_count = arg_count;
@@ -165,6 +165,7 @@ acpi_ps_push_scope(struct acpi_parse_state *parser_state,
acpi_ut_push_generic_state(&parser_state->scope, scope);
if (arg_count == ACPI_VAR_ARGS) {
+
/* Multiple arguments */
scope->parse_scope.arg_end = parser_state->pkg_end;
@@ -199,14 +200,14 @@ acpi_ps_pop_scope(struct acpi_parse_state *parser_state,
{
union acpi_generic_state *scope = parser_state->scope;
- ACPI_FUNCTION_TRACE("ps_pop_scope");
+ ACPI_FUNCTION_TRACE(ps_pop_scope);
/* Only pop the scope if there is in fact a next scope */
if (scope->common.next) {
scope = acpi_ut_pop_generic_state(&parser_state->scope);
- /* return to parsing previous op */
+ /* Return to parsing previous op */
*op = scope->parse_scope.op;
*arg_list = scope->parse_scope.arg_list;
@@ -217,7 +218,7 @@ acpi_ps_pop_scope(struct acpi_parse_state *parser_state,
acpi_ut_delete_generic_state(scope);
} else {
- /* empty parse stack, prepare to fetch next opcode */
+ /* Empty parse stack, prepare to fetch next opcode */
*op = NULL;
*arg_list = 0;
@@ -246,7 +247,7 @@ void acpi_ps_cleanup_scope(struct acpi_parse_state *parser_state)
{
union acpi_generic_state *scope;
- ACPI_FUNCTION_TRACE_PTR("ps_cleanup_scope", parser_state);
+ ACPI_FUNCTION_TRACE_PTR(ps_cleanup_scope, parser_state);
if (!parser_state) {
return_VOID;
diff --git a/drivers/acpi/parser/pstree.c b/drivers/acpi/parser/pstree.c
index dd6f16726fc..0015717ef09 100644
--- a/drivers/acpi/parser/pstree.c
+++ b/drivers/acpi/parser/pstree.c
@@ -77,6 +77,7 @@ union acpi_parse_object *acpi_ps_get_arg(union acpi_parse_object *op, u32 argn)
op_info = acpi_ps_get_opcode_info(op->common.aml_opcode);
if (op_info->class == AML_CLASS_UNKNOWN) {
+
/* Invalid opcode or ASCII character */
return (NULL);
@@ -85,6 +86,7 @@ union acpi_parse_object *acpi_ps_get_arg(union acpi_parse_object *op, u32 argn)
/* Check if this opcode requires argument sub-objects */
if (!(op_info->flags & AML_HAS_ARGS)) {
+
/* Has no linked argument objects */
return (NULL);
@@ -130,6 +132,7 @@ acpi_ps_append_arg(union acpi_parse_object *op, union acpi_parse_object *arg)
op_info = acpi_ps_get_opcode_info(op->common.aml_opcode);
if (op_info->class == AML_CLASS_UNKNOWN) {
+
/* Invalid opcode */
ACPI_ERROR((AE_INFO, "Invalid AML Opcode: 0x%2.2X",
@@ -140,6 +143,7 @@ acpi_ps_append_arg(union acpi_parse_object *op, union acpi_parse_object *arg)
/* Check if this opcode requires argument sub-objects */
if (!(op_info->flags & AML_HAS_ARGS)) {
+
/* Has no linked argument objects */
return;
@@ -148,6 +152,7 @@ acpi_ps_append_arg(union acpi_parse_object *op, union acpi_parse_object *arg)
/* Append the argument to the linked argument list */
if (op->common.value.arg) {
+
/* Append to existing argument list */
prev_arg = op->common.value.arg;
@@ -222,12 +227,14 @@ union acpi_parse_object *acpi_ps_get_depth_next(union acpi_parse_object *origin,
}
if (arg == origin) {
+
/* Reached parent of origin, end search */
return (NULL);
}
if (parent->common.next) {
+
/* Found sibling of parent */
return (parent->common.next);
@@ -299,5 +306,4 @@ union acpi_parse_object *acpi_ps_get_child(union acpi_parse_object *op)
return (child);
}
#endif
-
#endif /* ACPI_FUTURE_USAGE */
diff --git a/drivers/acpi/parser/psutils.c b/drivers/acpi/parser/psutils.c
index 3e07cb9cb74..182474ae8ce 100644
--- a/drivers/acpi/parser/psutils.c
+++ b/drivers/acpi/parser/psutils.c
@@ -89,7 +89,7 @@ void acpi_ps_init_op(union acpi_parse_object *op, u16 opcode)
{
ACPI_FUNCTION_ENTRY();
- op->common.data_type = ACPI_DESC_TYPE_PARSER;
+ op->common.descriptor_type = ACPI_DESC_TYPE_PARSER;
op->common.aml_opcode = opcode;
ACPI_DISASM_ONLY_MEMBERS(ACPI_STRNCPY(op->common.aml_op_name,
@@ -135,6 +135,7 @@ union acpi_parse_object *acpi_ps_alloc_op(u16 opcode)
/* Allocate the minimum required size object */
if (flags == ACPI_PARSEOP_GENERIC) {
+
/* The generic op (default) is by far the most common (16 to 1) */
op = acpi_os_acquire_object(acpi_gbl_ps_node_cache);
@@ -171,7 +172,7 @@ union acpi_parse_object *acpi_ps_alloc_op(u16 opcode)
void acpi_ps_free_op(union acpi_parse_object *op)
{
- ACPI_FUNCTION_NAME("ps_free_op");
+ ACPI_FUNCTION_NAME(ps_free_op);
if (op->common.aml_opcode == AML_INT_RETURN_VALUE_OP) {
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "Free retval op: %p\n",
diff --git a/drivers/acpi/parser/pswalk.c b/drivers/acpi/parser/pswalk.c
index 06f05bfd761..a84a547a0f1 100644
--- a/drivers/acpi/parser/pswalk.c
+++ b/drivers/acpi/parser/pswalk.c
@@ -64,18 +64,21 @@ void acpi_ps_delete_parse_tree(union acpi_parse_object *subtree_root)
union acpi_parse_object *next = NULL;
union acpi_parse_object *parent = NULL;
- ACPI_FUNCTION_TRACE_PTR("ps_delete_parse_tree", subtree_root);
+ ACPI_FUNCTION_TRACE_PTR(ps_delete_parse_tree, subtree_root);
/* Visit all nodes in the subtree */
while (op) {
+
/* Check if we are not ascending */
if (op != parent) {
+
/* Look for an argument or child of the current op */
next = acpi_ps_get_arg(op, 0);
if (next) {
+
/* Still going downward in tree (Op is not completed yet) */
op = next;
diff --git a/drivers/acpi/parser/psxface.c b/drivers/acpi/parser/psxface.c
index 2dd48cbb7c0..5d996c1140a 100644
--- a/drivers/acpi/parser/psxface.c
+++ b/drivers/acpi/parser/psxface.c
@@ -50,14 +50,14 @@
ACPI_MODULE_NAME("psxface")
/* Local Prototypes */
-static void acpi_ps_start_trace(struct acpi_parameter_info *info);
+static void acpi_ps_start_trace(struct acpi_evaluate_info *info);
-static void acpi_ps_stop_trace(struct acpi_parameter_info *info);
+static void acpi_ps_stop_trace(struct acpi_evaluate_info *info);
-static acpi_status acpi_ps_execute_pass(struct acpi_parameter_info *info);
+static acpi_status acpi_ps_execute_pass(struct acpi_evaluate_info *info);
static void
-acpi_ps_update_parameter_list(struct acpi_parameter_info *info, u16 action);
+acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action);
/*******************************************************************************
*
@@ -113,7 +113,7 @@ acpi_debug_trace(char *name, u32 debug_level, u32 debug_layer, u32 flags)
*
******************************************************************************/
-static void acpi_ps_start_trace(struct acpi_parameter_info *info)
+static void acpi_ps_start_trace(struct acpi_evaluate_info *info)
{
acpi_status status;
@@ -125,7 +125,7 @@ static void acpi_ps_start_trace(struct acpi_parameter_info *info)
}
if ((!acpi_gbl_trace_method_name) ||
- (acpi_gbl_trace_method_name != info->node->name.integer)) {
+ (acpi_gbl_trace_method_name != info->resolved_node->name.integer)) {
goto exit;
}
@@ -158,7 +158,7 @@ static void acpi_ps_start_trace(struct acpi_parameter_info *info)
*
******************************************************************************/
-static void acpi_ps_stop_trace(struct acpi_parameter_info *info)
+static void acpi_ps_stop_trace(struct acpi_evaluate_info *info)
{
acpi_status status;
@@ -170,7 +170,7 @@ static void acpi_ps_stop_trace(struct acpi_parameter_info *info)
}
if ((!acpi_gbl_trace_method_name) ||
- (acpi_gbl_trace_method_name != info->node->name.integer)) {
+ (acpi_gbl_trace_method_name != info->resolved_node->name.integer)) {
goto exit;
}
@@ -212,22 +212,23 @@ static void acpi_ps_stop_trace(struct acpi_parameter_info *info)
*
******************************************************************************/
-acpi_status acpi_ps_execute_method(struct acpi_parameter_info *info)
+acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ps_execute_method");
+ ACPI_FUNCTION_TRACE(ps_execute_method);
/* Validate the Info and method Node */
- if (!info || !info->node) {
+ if (!info || !info->resolved_node) {
return_ACPI_STATUS(AE_NULL_ENTRY);
}
/* Init for new method, wait on concurrency semaphore */
status =
- acpi_ds_begin_method_execution(info->node, info->obj_desc, NULL);
+ acpi_ds_begin_method_execution(info->resolved_node, info->obj_desc,
+ NULL);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
@@ -248,7 +249,7 @@ acpi_status acpi_ps_execute_method(struct acpi_parameter_info *info)
*/
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
"**** Begin Method Parse **** Entry=%p obj=%p\n",
- info->node, info->obj_desc));
+ info->resolved_node, info->obj_desc));
info->pass_number = 1;
status = acpi_ps_execute_pass(info);
@@ -261,7 +262,7 @@ acpi_status acpi_ps_execute_method(struct acpi_parameter_info *info)
*/
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
"**** Begin Method Execution **** Entry=%p obj=%p\n",
- info->node, info->obj_desc));
+ info->resolved_node, info->obj_desc));
info->pass_number = 3;
status = acpi_ps_execute_pass(info);
@@ -286,8 +287,7 @@ acpi_status acpi_ps_execute_method(struct acpi_parameter_info *info)
* a control exception code
*/
if (info->return_object) {
- ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
- "Method returned obj_desc=%p\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "Method returned ObjDesc=%p\n",
info->return_object));
ACPI_DUMP_STACK_ENTRY(info->return_object);
@@ -301,7 +301,7 @@ acpi_status acpi_ps_execute_method(struct acpi_parameter_info *info)
*
* FUNCTION: acpi_ps_update_parameter_list
*
- * PARAMETERS: Info - See struct acpi_parameter_info
+ * PARAMETERS: Info - See struct acpi_evaluate_info
* (Used: parameter_type and Parameters)
* Action - Add or Remove reference
*
@@ -312,14 +312,16 @@ acpi_status acpi_ps_execute_method(struct acpi_parameter_info *info)
******************************************************************************/
static void
-acpi_ps_update_parameter_list(struct acpi_parameter_info *info, u16 action)
+acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action)
{
acpi_native_uint i;
if ((info->parameter_type == ACPI_PARAM_ARGS) && (info->parameters)) {
+
/* Update reference count for each parameter */
for (i = 0; info->parameters[i]; i++) {
+
/* Ignore errors, just do them all */
(void)acpi_ut_update_object_reference(info->
@@ -333,7 +335,7 @@ acpi_ps_update_parameter_list(struct acpi_parameter_info *info, u16 action)
*
* FUNCTION: acpi_ps_execute_pass
*
- * PARAMETERS: Info - See struct acpi_parameter_info
+ * PARAMETERS: Info - See struct acpi_evaluate_info
* (Used: pass_number, Node, and obj_desc)
*
* RETURN: Status
@@ -342,13 +344,13 @@ acpi_ps_update_parameter_list(struct acpi_parameter_info *info, u16 action)
*
******************************************************************************/
-static acpi_status acpi_ps_execute_pass(struct acpi_parameter_info *info)
+static acpi_status acpi_ps_execute_pass(struct acpi_evaluate_info *info)
{
acpi_status status;
union acpi_parse_object *op;
struct acpi_walk_state *walk_state;
- ACPI_FUNCTION_TRACE("ps_execute_pass");
+ ACPI_FUNCTION_TRACE(ps_execute_pass);
/* Create and init a Root Node */
@@ -367,7 +369,7 @@ static acpi_status acpi_ps_execute_pass(struct acpi_parameter_info *info)
goto cleanup;
}
- status = acpi_ds_init_aml_walk(walk_state, op, info->node,
+ status = acpi_ds_init_aml_walk(walk_state, op, info->resolved_node,
info->obj_desc->method.aml_start,
info->obj_desc->method.aml_length,
info->pass_number == 1 ? NULL : info,
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 8920e8c6e24..228bdb62650 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -38,6 +38,7 @@
#include <linux/spinlock.h>
#include <linux/pm.h>
#include <linux/pci.h>
+#include <linux/mutex.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
@@ -91,7 +92,7 @@ static struct {
int count;
struct list_head entries;
} acpi_link;
-DECLARE_MUTEX(acpi_link_lock);
+DEFINE_MUTEX(acpi_link_lock);
/* --------------------------------------------------------------------------
PCI Link Device Management
@@ -641,19 +642,19 @@ acpi_pci_link_allocate_irq(acpi_handle handle,
return_VALUE(-1);
}
- down(&acpi_link_lock);
+ mutex_lock(&acpi_link_lock);
if (acpi_pci_link_allocate(link)) {
- up(&acpi_link_lock);
+ mutex_unlock(&acpi_link_lock);
return_VALUE(-1);
}
if (!link->irq.active) {
- up(&acpi_link_lock);
+ mutex_unlock(&acpi_link_lock);
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Link active IRQ is 0!\n"));
return_VALUE(-1);
}
link->refcnt++;
- up(&acpi_link_lock);
+ mutex_unlock(&acpi_link_lock);
if (triggering)
*triggering = link->irq.triggering;
@@ -691,9 +692,9 @@ int acpi_pci_link_free_irq(acpi_handle handle)
return_VALUE(-1);
}
- down(&acpi_link_lock);
+ mutex_lock(&acpi_link_lock);
if (!link->irq.initialized) {
- up(&acpi_link_lock);
+ mutex_unlock(&acpi_link_lock);
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Link isn't initialized\n"));
return_VALUE(-1);
}
@@ -716,7 +717,7 @@ int acpi_pci_link_free_irq(acpi_handle handle)
if (link->refcnt == 0) {
acpi_ut_evaluate_object(link->handle, "_DIS", 0, NULL);
}
- up(&acpi_link_lock);
+ mutex_unlock(&acpi_link_lock);
return_VALUE(link->irq.active);
}
@@ -747,7 +748,7 @@ static int acpi_pci_link_add(struct acpi_device *device)
strcpy(acpi_device_class(device), ACPI_PCI_LINK_CLASS);
acpi_driver_data(device) = link;
- down(&acpi_link_lock);
+ mutex_lock(&acpi_link_lock);
result = acpi_pci_link_get_possible(link);
if (result)
goto end;
@@ -782,7 +783,7 @@ static int acpi_pci_link_add(struct acpi_device *device)
end:
/* disable all links -- to be activated on use */
acpi_ut_evaluate_object(link->handle, "_DIS", 0, NULL);
- up(&acpi_link_lock);
+ mutex_unlock(&acpi_link_lock);
if (result)
kfree(link);
@@ -840,9 +841,9 @@ static int acpi_pci_link_remove(struct acpi_device *device, int type)
link = (struct acpi_pci_link *)acpi_driver_data(device);
- down(&acpi_link_lock);
+ mutex_lock(&acpi_link_lock);
list_del(&link->node);
- up(&acpi_link_lock);
+ mutex_unlock(&acpi_link_lock);
kfree(link);
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 713b763884a..decaebb4cbe 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -110,7 +110,7 @@ static struct file_operations acpi_processor_info_fops = {
};
struct acpi_processor *processors[NR_CPUS];
-struct acpi_processor_errata errata;
+struct acpi_processor_errata errata __read_mostly;
/* --------------------------------------------------------------------------
Errata Handling
@@ -388,7 +388,7 @@ static int acpi_processor_remove_fs(struct acpi_device *device)
/* Use the acpiid in MADT to map cpus in case of SMP */
#ifndef CONFIG_SMP
-#define convert_acpiid_to_cpu(acpi_id) (0xff)
+#define convert_acpiid_to_cpu(acpi_id) (-1)
#else
#ifdef CONFIG_IA64
@@ -401,7 +401,7 @@ static int acpi_processor_remove_fs(struct acpi_device *device)
#define ARCH_BAD_APICID (0xff)
#endif
-static u8 convert_acpiid_to_cpu(u8 acpi_id)
+static int convert_acpiid_to_cpu(u8 acpi_id)
{
u16 apic_id;
int i;
@@ -427,7 +427,7 @@ static int acpi_processor_get_info(struct acpi_processor *pr)
acpi_status status = 0;
union acpi_object object = { 0 };
struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
- u8 cpu_index;
+ int cpu_index;
static int cpu0_initialized;
ACPI_FUNCTION_TRACE("acpi_processor_get_info");
@@ -473,7 +473,7 @@ static int acpi_processor_get_info(struct acpi_processor *pr)
cpu_index = convert_acpiid_to_cpu(pr->acpi_id);
/* Handle UP system running SMP kernel, with no LAPIC in MADT */
- if (!cpu0_initialized && (cpu_index == 0xff) &&
+ if (!cpu0_initialized && (cpu_index == -1) &&
(num_online_cpus() == 1)) {
cpu_index = 0;
}
@@ -487,7 +487,7 @@ static int acpi_processor_get_info(struct acpi_processor *pr)
* less than the max # of CPUs. They should be ignored _iff
* they are physically not present.
*/
- if (cpu_index >= NR_CPUS) {
+ if (cpu_index == -1) {
if (ACPI_FAILURE
(acpi_processor_hotadd_init(pr->handle, &pr->id))) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
@@ -558,8 +558,8 @@ static int acpi_processor_start(struct acpi_device *device)
*/
if (processor_device_array[pr->id] != NULL &&
processor_device_array[pr->id] != (void *)device) {
- ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "BIOS reporting wrong ACPI id"
- "for the processor\n"));
+ printk(KERN_WARNING "BIOS reported wrong ACPI id"
+ "for the processor\n");
return_VALUE(-ENODEV);
}
processor_device_array[pr->id] = (void *)device;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 80fa43471f4..3b97a5eae9e 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -54,10 +54,10 @@ ACPI_MODULE_NAME("acpi_processor")
#define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
#define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */
#define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */
-static void (*pm_idle_save) (void);
+static void (*pm_idle_save) (void) __read_mostly;
module_param(max_cstate, uint, 0644);
-static unsigned int nocst = 0;
+static unsigned int nocst __read_mostly;
module_param(nocst, uint, 0000);
/*
@@ -67,7 +67,7 @@ module_param(nocst, uint, 0000);
* 100 HZ: 0x0000000F: 4 jiffies = 40ms
* reduce history for more aggressive entry into C3
*/
-static unsigned int bm_history =
+static unsigned int bm_history __read_mostly =
(HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1));
module_param(bm_history, uint, 0644);
/* --------------------------------------------------------------------------
@@ -1081,7 +1081,7 @@ int acpi_processor_power_init(struct acpi_processor *pr,
struct acpi_device *device)
{
acpi_status status = 0;
- static int first_run = 0;
+ static int first_run;
struct proc_dir_entry *entry = NULL;
unsigned int i;
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index f36db22ce1a..41aaaba74b1 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -34,6 +34,7 @@
#ifdef CONFIG_X86_ACPI_CPUFREQ_PROC_INTF
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/mutex.h>
#include <asm/uaccess.h>
#endif
@@ -48,7 +49,7 @@
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
ACPI_MODULE_NAME("acpi_processor")
-static DECLARE_MUTEX(performance_sem);
+static DEFINE_MUTEX(performance_mutex);
/*
* _PPC support is implemented as a CPUfreq policy notifier:
@@ -72,7 +73,7 @@ static int acpi_processor_ppc_notifier(struct notifier_block *nb,
struct acpi_processor *pr;
unsigned int ppc = 0;
- down(&performance_sem);
+ mutex_lock(&performance_mutex);
if (event != CPUFREQ_INCOMPATIBLE)
goto out;
@@ -93,7 +94,7 @@ static int acpi_processor_ppc_notifier(struct notifier_block *nb,
core_frequency * 1000);
out:
- up(&performance_sem);
+ mutex_unlock(&performance_mutex);
return 0;
}
@@ -553,6 +554,230 @@ static void acpi_cpufreq_remove_file(struct acpi_processor *pr)
}
#endif /* CONFIG_X86_ACPI_CPUFREQ_PROC_INTF */
+static int acpi_processor_get_psd(struct acpi_processor *pr)
+{
+ int result = 0;
+ acpi_status status = AE_OK;
+ struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
+ struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
+ struct acpi_buffer state = {0, NULL};
+ union acpi_object *psd = NULL;
+ struct acpi_psd_package *pdomain;
+
+ status = acpi_evaluate_object(pr->handle, "_PSD", NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ return -ENODEV;
+ }
+
+ psd = (union acpi_object *) buffer.pointer;
+ if (!psd || (psd->type != ACPI_TYPE_PACKAGE)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSD data\n"));
+ result = -EFAULT;
+ goto end;
+ }
+
+ if (psd->package.count != 1) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSD data\n"));
+ result = -EFAULT;
+ goto end;
+ }
+
+ pdomain = &(pr->performance->domain_info);
+
+ state.length = sizeof(struct acpi_psd_package);
+ state.pointer = pdomain;
+
+ status = acpi_extract_package(&(psd->package.elements[0]),
+ &format, &state);
+ if (ACPI_FAILURE(status)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSD data\n"));
+ result = -EFAULT;
+ goto end;
+ }
+
+ if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _PSD:num_entries\n"));
+ result = -EFAULT;
+ goto end;
+ }
+
+ if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _PSD:revision\n"));
+ result = -EFAULT;
+ goto end;
+ }
+
+end:
+ acpi_os_free(buffer.pointer);
+ return result;
+}
+
+int acpi_processor_preregister_performance(
+ struct acpi_processor_performance **performance)
+{
+ int count, count_target;
+ int retval = 0;
+ unsigned int i, j;
+ cpumask_t covered_cpus;
+ struct acpi_processor *pr;
+ struct acpi_psd_package *pdomain;
+ struct acpi_processor *match_pr;
+ struct acpi_psd_package *match_pdomain;
+
+ mutex_lock(&performance_mutex);
+
+ retval = 0;
+
+ /* Call _PSD for all CPUs */
+ for_each_possible_cpu(i) {
+ pr = processors[i];
+ if (!pr) {
+ /* Look only at processors in ACPI namespace */
+ continue;
+ }
+
+ if (pr->performance) {
+ retval = -EBUSY;
+ continue;
+ }
+
+ if (!performance || !performance[i]) {
+ retval = -EINVAL;
+ continue;
+ }
+
+ pr->performance = performance[i];
+ cpu_set(i, pr->performance->shared_cpu_map);
+ if (acpi_processor_get_psd(pr)) {
+ retval = -EINVAL;
+ continue;
+ }
+ }
+ if (retval)
+ goto err_ret;
+
+ /*
+ * Now that we have _PSD data from all CPUs, lets setup P-state
+ * domain info.
+ */
+ for_each_possible_cpu(i) {
+ pr = processors[i];
+ if (!pr)
+ continue;
+
+ /* Basic validity check for domain info */
+ pdomain = &(pr->performance->domain_info);
+ if ((pdomain->revision != ACPI_PSD_REV0_REVISION) ||
+ (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES)) {
+ retval = -EINVAL;
+ goto err_ret;
+ }
+ if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
+ pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
+ pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
+ retval = -EINVAL;
+ goto err_ret;
+ }
+ }
+
+ cpus_clear(covered_cpus);
+ for_each_possible_cpu(i) {
+ pr = processors[i];
+ if (!pr)
+ continue;
+
+ if (cpu_isset(i, covered_cpus))
+ continue;
+
+ pdomain = &(pr->performance->domain_info);
+ cpu_set(i, pr->performance->shared_cpu_map);
+ cpu_set(i, covered_cpus);
+ if (pdomain->num_processors <= 1)
+ continue;
+
+ /* Validate the Domain info */
+ count_target = pdomain->num_processors;
+ count = 1;
+ if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL ||
+ pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL) {
+ pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
+ } else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY) {
+ pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ANY;
+ }
+
+ for_each_possible_cpu(j) {
+ if (i == j)
+ continue;
+
+ match_pr = processors[j];
+ if (!match_pr)
+ continue;
+
+ match_pdomain = &(match_pr->performance->domain_info);
+ if (match_pdomain->domain != pdomain->domain)
+ continue;
+
+ /* Here i and j are in the same domain */
+
+ if (match_pdomain->num_processors != count_target) {
+ retval = -EINVAL;
+ goto err_ret;
+ }
+
+ if (pdomain->coord_type != match_pdomain->coord_type) {
+ retval = -EINVAL;
+ goto err_ret;
+ }
+
+ cpu_set(j, covered_cpus);
+ cpu_set(j, pr->performance->shared_cpu_map);
+ count++;
+ }
+
+ for_each_possible_cpu(j) {
+ if (i == j)
+ continue;
+
+ match_pr = processors[j];
+ if (!match_pr)
+ continue;
+
+ match_pdomain = &(match_pr->performance->domain_info);
+ if (match_pdomain->domain != pdomain->domain)
+ continue;
+
+ match_pr->performance->shared_type =
+ pr->performance->shared_type;
+ match_pr->performance->shared_cpu_map =
+ pr->performance->shared_cpu_map;
+ }
+ }
+
+err_ret:
+ if (retval) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error while parsing _PSD domain information. Assuming no coordination\n"));
+ }
+
+ for_each_possible_cpu(i) {
+ pr = processors[i];
+ if (!pr || !pr->performance)
+ continue;
+
+ /* Assume no coordination on any error parsing domain info */
+ if (retval) {
+ cpus_clear(pr->performance->shared_cpu_map);
+ cpu_set(i, pr->performance->shared_cpu_map);
+ pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
+ }
+ pr->performance = NULL; /* Will be set for real in register */
+ }
+
+ mutex_unlock(&performance_mutex);
+ return retval;
+}
+EXPORT_SYMBOL(acpi_processor_preregister_performance);
+
+
int
acpi_processor_register_performance(struct acpi_processor_performance
*performance, unsigned int cpu)
@@ -564,16 +789,16 @@ acpi_processor_register_performance(struct acpi_processor_performance
if (!(acpi_processor_ppc_status & PPC_REGISTERED))
return_VALUE(-EINVAL);
- down(&performance_sem);
+ mutex_lock(&performance_mutex);
pr = processors[cpu];
if (!pr) {
- up(&performance_sem);
+ mutex_unlock(&performance_mutex);
return_VALUE(-ENODEV);
}
if (pr->performance) {
- up(&performance_sem);
+ mutex_unlock(&performance_mutex);
return_VALUE(-EBUSY);
}
@@ -583,13 +808,13 @@ acpi_processor_register_performance(struct acpi_processor_performance
if (acpi_processor_get_performance_info(pr)) {
pr->performance = NULL;
- up(&performance_sem);
+ mutex_unlock(&performance_mutex);
return_VALUE(-EIO);
}
acpi_cpufreq_add_file(pr);
- up(&performance_sem);
+ mutex_unlock(&performance_mutex);
return_VALUE(0);
}
@@ -603,11 +828,11 @@ acpi_processor_unregister_performance(struct acpi_processor_performance
ACPI_FUNCTION_TRACE("acpi_processor_unregister_performance");
- down(&performance_sem);
+ mutex_lock(&performance_mutex);
pr = processors[cpu];
if (!pr) {
- up(&performance_sem);
+ mutex_unlock(&performance_mutex);
return_VOID;
}
@@ -617,7 +842,7 @@ acpi_processor_unregister_performance(struct acpi_processor_performance
acpi_cpufreq_remove_file(pr);
- up(&performance_sem);
+ mutex_unlock(&performance_mutex);
return_VOID;
}
diff --git a/drivers/acpi/resources/rscalc.c b/drivers/acpi/resources/rscalc.c
index 4038dbfa63a..cf87b023002 100644
--- a/drivers/acpi/resources/rscalc.c
+++ b/drivers/acpi/resources/rscalc.c
@@ -78,6 +78,7 @@ static u8 acpi_rs_count_set_bits(u16 bit_field)
ACPI_FUNCTION_ENTRY();
for (bits_set = 0; bit_field; bits_set++) {
+
/* Zero the least significant bit that is set */
bit_field &= (bit_field - 1);
@@ -154,15 +155,18 @@ acpi_rs_stream_option_length(u32 resource_length,
* length, minus one byte for the resource_source_index itself.
*/
if (resource_length > minimum_aml_resource_length) {
+
/* Compute the length of the optional string */
string_length =
resource_length - minimum_aml_resource_length - 1;
}
- /* Round up length to 32 bits for internal structure alignment */
-
- return (ACPI_ROUND_UP_to_32_bITS(string_length));
+ /*
+ * Round the length up to a multiple of the native word in order to
+ * guarantee that the entire resource descriptor is native word aligned
+ */
+ return ((u32) ACPI_ROUND_UP_TO_NATIVE_WORD(string_length));
}
/*******************************************************************************
@@ -186,11 +190,12 @@ acpi_rs_get_aml_length(struct acpi_resource * resource, acpi_size * size_needed)
acpi_size aml_size_needed = 0;
acpi_rs_length total_size;
- ACPI_FUNCTION_TRACE("rs_get_aml_length");
+ ACPI_FUNCTION_TRACE(rs_get_aml_length);
/* Traverse entire list of internal resource descriptors */
while (resource) {
+
/* Validate the descriptor type */
if (resource->type > ACPI_RESOURCE_TYPE_MAX) {
@@ -214,6 +219,7 @@ acpi_rs_get_aml_length(struct acpi_resource * resource, acpi_size * size_needed)
* is a Large Resource data type.
*/
if (resource->data.vendor.byte_length > 7) {
+
/* Base size of a Large resource descriptor */
total_size =
@@ -332,20 +338,22 @@ acpi_rs_get_list_length(u8 * aml_buffer,
acpi_status status;
u8 *end_aml;
u8 *buffer;
- u32 buffer_size = 0;
+ u32 buffer_size;
u16 temp16;
u16 resource_length;
u32 extra_struct_bytes;
u8 resource_index;
u8 minimum_aml_resource_length;
- ACPI_FUNCTION_TRACE("rs_get_list_length");
+ ACPI_FUNCTION_TRACE(rs_get_list_length);
+ *size_needed = 0;
end_aml = aml_buffer + aml_buffer_length;
/* Walk the list of AML resource descriptors */
while (aml_buffer < end_aml) {
+
/* Validate the Resource Type and Resource Length */
status = acpi_ut_validate_resource(aml_buffer, &resource_index);
@@ -386,35 +394,28 @@ acpi_rs_get_list_length(u8 * aml_buffer,
break;
case ACPI_RESOURCE_NAME_VENDOR_SMALL:
+ case ACPI_RESOURCE_NAME_VENDOR_LARGE:
/*
* Vendor Resource:
- * Ensure a 32-bit boundary for the structure
+ * Get the number of vendor data bytes
*/
- extra_struct_bytes =
- ACPI_ROUND_UP_to_32_bITS(resource_length);
+ extra_struct_bytes = resource_length;
break;
case ACPI_RESOURCE_NAME_END_TAG:
/*
- * End Tag: This is the normal exit, add size of end_tag
+ * End Tag:
+ * This is the normal exit, add size of end_tag
*/
- *size_needed = buffer_size + ACPI_RS_SIZE_MIN;
+ *size_needed += ACPI_RS_SIZE_MIN;
return_ACPI_STATUS(AE_OK);
- case ACPI_RESOURCE_NAME_VENDOR_LARGE:
- /*
- * Vendor Resource:
- * Add vendor data and ensure a 32-bit boundary for the structure
- */
- extra_struct_bytes =
- ACPI_ROUND_UP_to_32_bITS(resource_length);
- break;
-
case ACPI_RESOURCE_NAME_ADDRESS32:
case ACPI_RESOURCE_NAME_ADDRESS16:
+ case ACPI_RESOURCE_NAME_ADDRESS64:
/*
- * 32-Bit or 16-bit Address Resource:
- * Add the size of any optional data (resource_source)
+ * Address Resource:
+ * Add the size of the optional resource_source
*/
extra_struct_bytes =
acpi_rs_stream_option_length(resource_length,
@@ -423,50 +424,46 @@ acpi_rs_get_list_length(u8 * aml_buffer,
case ACPI_RESOURCE_NAME_EXTENDED_IRQ:
/*
- * Extended IRQ:
- * Point past the interrupt_vector_flags to get the
- * interrupt_table_length.
+ * Extended IRQ Resource:
+ * Using the interrupt_table_length, add 4 bytes for each additional
+ * interrupt. Note: at least one interrupt is required and is
+ * included in the minimum descriptor size (reason for the -1)
*/
- buffer++;
+ extra_struct_bytes = (buffer[1] - 1) * sizeof(u32);
- extra_struct_bytes =
- /*
- * Add 4 bytes for each additional interrupt. Note: at
- * least one interrupt is required and is included in
- * the minimum descriptor size
- */
- ((*buffer - 1) * sizeof(u32)) +
- /* Add the size of any optional data (resource_source) */
+ /* Add the size of the optional resource_source */
+
+ extra_struct_bytes +=
acpi_rs_stream_option_length(resource_length -
extra_struct_bytes,
minimum_aml_resource_length);
break;
- case ACPI_RESOURCE_NAME_ADDRESS64:
- /*
- * 64-Bit Address Resource:
- * Add the size of any optional data (resource_source)
- * Ensure a 64-bit boundary for the structure
- */
- extra_struct_bytes =
- ACPI_ROUND_UP_to_64_bITS
- (acpi_rs_stream_option_length
- (resource_length, minimum_aml_resource_length));
- break;
-
default:
break;
}
- /* Update the required buffer size for the internal descriptor structs */
+ /*
+ * Update the required buffer size for the internal descriptor structs
+ *
+ * Important: Round the size up for the appropriate alignment. This
+ * is a requirement on IA64.
+ */
+ buffer_size = acpi_gbl_resource_struct_sizes[resource_index] +
+ extra_struct_bytes;
+ buffer_size = (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(buffer_size);
- temp16 = (u16) (acpi_gbl_resource_struct_sizes[resource_index] +
- extra_struct_bytes);
- buffer_size += (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(temp16);
+ *size_needed += buffer_size;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES,
+ "Type %.2X, AmlLength %.2X InternalLength %.2X\n",
+ acpi_ut_get_resource_type(aml_buffer),
+ acpi_ut_get_descriptor_length(aml_buffer),
+ buffer_size));
/*
- * Point to the next resource within the stream
- * using the size of the header plus the length contained in the header
+ * Point to the next resource within the AML stream using the length
+ * contained in the resource descriptor header
*/
aml_buffer += acpi_ut_get_descriptor_length(aml_buffer);
}
@@ -506,7 +503,7 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
u8 name_found;
u32 table_index;
- ACPI_FUNCTION_TRACE("rs_get_pci_routing_table_length");
+ ACPI_FUNCTION_TRACE(rs_get_pci_routing_table_length);
number_of_elements = package_object->package.count;
@@ -523,6 +520,7 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
top_object_list = package_object->package.elements;
for (index = 0; index < number_of_elements; index++) {
+
/* Dereference the sub-package */
package_element = *top_object_list;
@@ -581,7 +579,7 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
/* Round up the size since each element must be aligned */
- temp_size_needed = ACPI_ROUND_UP_to_64_bITS(temp_size_needed);
+ temp_size_needed = ACPI_ROUND_UP_TO_64BIT(temp_size_needed);
/* Point to the next union acpi_operand_object */
@@ -589,7 +587,7 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
}
/*
- * Adding an extra element to the end of the list, essentially a
+ * Add an extra element to the end of the list, essentially a
* NULL terminator
*/
*buffer_size_needed =
diff --git a/drivers/acpi/resources/rscreate.c b/drivers/acpi/resources/rscreate.c
index 8c128dea325..008058acdd3 100644
--- a/drivers/acpi/resources/rscreate.c
+++ b/drivers/acpi/resources/rscreate.c
@@ -75,10 +75,11 @@ acpi_rs_create_resource_list(union acpi_operand_object *aml_buffer,
u8 *aml_start;
acpi_size list_size_needed = 0;
u32 aml_buffer_length;
+ void *resource;
- ACPI_FUNCTION_TRACE("rs_create_resource_list");
+ ACPI_FUNCTION_TRACE(rs_create_resource_list);
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "aml_buffer = %p\n", aml_buffer));
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "AmlBuffer = %p\n", aml_buffer));
/* Params already validated, so we don't re-validate here */
@@ -92,7 +93,7 @@ acpi_rs_create_resource_list(union acpi_operand_object *aml_buffer,
status = acpi_rs_get_list_length(aml_start, aml_buffer_length,
&list_size_needed);
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Status=%X list_size_needed=%X\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Status=%X ListSizeNeeded=%X\n",
status, (u32) list_size_needed));
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
@@ -107,13 +108,15 @@ acpi_rs_create_resource_list(union acpi_operand_object *aml_buffer,
/* Do the conversion */
- status = acpi_rs_convert_aml_to_resources(aml_start, aml_buffer_length,
- output_buffer->pointer);
+ resource = output_buffer->pointer;
+ status = acpi_ut_walk_aml_resources(aml_start, aml_buffer_length,
+ acpi_rs_convert_aml_to_resources,
+ &resource);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "output_buffer %p Length %X\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "OutputBuffer %p Length %X\n",
output_buffer->pointer, (u32) output_buffer->length));
return_ACPI_STATUS(AE_OK);
}
@@ -155,7 +158,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
acpi_status status;
struct acpi_buffer path_buffer;
- ACPI_FUNCTION_TRACE("rs_create_pci_routing_table");
+ ACPI_FUNCTION_TRACE(rs_create_pci_routing_table);
/* Params already validated, so we don't re-validate here */
@@ -167,7 +170,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
return_ACPI_STATUS(status);
}
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "buffer_size_needed = %X\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "BufferSizeNeeded = %X\n",
(u32) buffer_size_needed));
/* Validate/Allocate/Clear caller buffer */
@@ -332,7 +335,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
/* Now align the current length */
user_prt->length =
- (u32) ACPI_ROUND_UP_to_64_bITS(user_prt->length);
+ (u32) ACPI_ROUND_UP_TO_64BIT(user_prt->length);
/* 4) Fourth subobject: Dereference the PRT.source_index */
@@ -341,7 +344,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
user_prt->source_index = (u32) obj_desc->integer.value;
} else {
ACPI_ERROR((AE_INFO,
- "(PRT[%X].source_index) Need Integer, found %s",
+ "(PRT[%X].SourceIndex) Need Integer, found %s",
index,
acpi_ut_get_object_type_name(obj_desc)));
return_ACPI_STATUS(AE_BAD_DATA);
@@ -352,7 +355,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
top_object_list++;
}
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "output_buffer %p Length %X\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "OutputBuffer %p Length %X\n",
output_buffer->pointer, (u32) output_buffer->length));
return_ACPI_STATUS(AE_OK);
}
@@ -382,9 +385,9 @@ acpi_rs_create_aml_resources(struct acpi_resource *linked_list_buffer,
acpi_status status;
acpi_size aml_size_needed = 0;
- ACPI_FUNCTION_TRACE("rs_create_aml_resources");
+ ACPI_FUNCTION_TRACE(rs_create_aml_resources);
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "linked_list_buffer = %p\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "LinkedListBuffer = %p\n",
linked_list_buffer));
/*
@@ -395,7 +398,7 @@ acpi_rs_create_aml_resources(struct acpi_resource *linked_list_buffer,
*/
status = acpi_rs_get_aml_length(linked_list_buffer, &aml_size_needed);
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "aml_size_needed=%X, %s\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "AmlSizeNeeded=%X, %s\n",
(u32) aml_size_needed,
acpi_format_exception(status)));
if (ACPI_FAILURE(status)) {
@@ -419,7 +422,7 @@ acpi_rs_create_aml_resources(struct acpi_resource *linked_list_buffer,
return_ACPI_STATUS(status);
}
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "output_buffer %p Length %X\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "OutputBuffer %p Length %X\n",
output_buffer->pointer, (u32) output_buffer->length));
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/resources/rsdump.c b/drivers/acpi/resources/rsdump.c
index e7de061cf88..9c99a723a86 100644
--- a/drivers/acpi/resources/rsdump.c
+++ b/drivers/acpi/resources/rsdump.c
@@ -91,11 +91,11 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table);
struct acpi_rsdump_info acpi_rs_dump_irq[6] = {
{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_irq), "IRQ", NULL},
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(irq.triggering), "Triggering",
- acpi_gbl_HEdecode},
+ acpi_gbl_he_decode},
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(irq.polarity), "Polarity",
- acpi_gbl_LLdecode},
+ acpi_gbl_ll_decode},
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(irq.sharable), "Sharing",
- acpi_gbl_SHRdecode},
+ acpi_gbl_shr_decode},
{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(irq.interrupt_count),
"Interrupt Count", NULL},
{ACPI_RSD_SHORTLIST, ACPI_RSD_OFFSET(irq.interrupts[0]),
@@ -105,11 +105,11 @@ struct acpi_rsdump_info acpi_rs_dump_irq[6] = {
struct acpi_rsdump_info acpi_rs_dump_dma[6] = {
{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_dma), "DMA", NULL},
{ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(dma.type), "Speed",
- acpi_gbl_TYPdecode},
+ acpi_gbl_typ_decode},
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(dma.bus_master), "Mastering",
- acpi_gbl_BMdecode},
+ acpi_gbl_bm_decode},
{ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(dma.transfer), "Transfer Type",
- acpi_gbl_SIZdecode},
+ acpi_gbl_siz_decode},
{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(dma.channel_count), "Channel Count",
NULL},
{ACPI_RSD_SHORTLIST, ACPI_RSD_OFFSET(dma.channels[0]), "Channel List",
@@ -158,7 +158,7 @@ struct acpi_rsdump_info acpi_rs_dump_vendor[3] = {
};
struct acpi_rsdump_info acpi_rs_dump_end_tag[1] = {
- {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_end_tag), "end_tag",
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_end_tag), "EndTag",
NULL}
};
@@ -166,7 +166,7 @@ struct acpi_rsdump_info acpi_rs_dump_memory24[6] = {
{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_memory24),
"24-Bit Memory Range", NULL},
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(memory24.write_protect),
- "Write Protect", acpi_gbl_RWdecode},
+ "Write Protect", acpi_gbl_rw_decode},
{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(memory24.minimum), "Address Minimum",
NULL},
{ACPI_RSD_UINT16, ACPI_RSD_OFFSET(memory24.maximum), "Address Maximum",
@@ -181,7 +181,7 @@ struct acpi_rsdump_info acpi_rs_dump_memory32[6] = {
{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_memory32),
"32-Bit Memory Range", NULL},
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(memory32.write_protect),
- "Write Protect", acpi_gbl_RWdecode},
+ "Write Protect", acpi_gbl_rw_decode},
{ACPI_RSD_UINT32, ACPI_RSD_OFFSET(memory32.minimum), "Address Minimum",
NULL},
{ACPI_RSD_UINT32, ACPI_RSD_OFFSET(memory32.maximum), "Address Maximum",
@@ -196,7 +196,7 @@ struct acpi_rsdump_info acpi_rs_dump_fixed_memory32[4] = {
{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_fixed_memory32),
"32-Bit Fixed Memory Range", NULL},
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(fixed_memory32.write_protect),
- "Write Protect", acpi_gbl_RWdecode},
+ "Write Protect", acpi_gbl_rw_decode},
{ACPI_RSD_UINT32, ACPI_RSD_OFFSET(fixed_memory32.address), "Address",
NULL},
{ACPI_RSD_UINT32, ACPI_RSD_OFFSET(fixed_memory32.address_length),
@@ -278,11 +278,11 @@ struct acpi_rsdump_info acpi_rs_dump_ext_irq[8] = {
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.producer_consumer),
"Type", acpi_gbl_consume_decode},
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.triggering),
- "Triggering", acpi_gbl_HEdecode},
+ "Triggering", acpi_gbl_he_decode},
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.polarity), "Polarity",
- acpi_gbl_LLdecode},
+ acpi_gbl_ll_decode},
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.sharable), "Sharing",
- acpi_gbl_SHRdecode},
+ acpi_gbl_shr_decode},
{ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(extended_irq.resource_source), NULL,
NULL},
{ACPI_RSD_UINT8, ACPI_RSD_OFFSET(extended_irq.interrupt_count),
@@ -314,7 +314,7 @@ static struct acpi_rsdump_info acpi_rs_dump_general_flags[5] = {
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.producer_consumer),
"Consumer/Producer", acpi_gbl_consume_decode},
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.decode), "Address Decode",
- acpi_gbl_DECdecode},
+ acpi_gbl_dec_decode},
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.min_address_fixed),
"Min Relocatability", acpi_gbl_min_decode},
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.max_address_fixed),
@@ -325,24 +325,24 @@ static struct acpi_rsdump_info acpi_rs_dump_memory_flags[5] = {
{ACPI_RSD_LITERAL, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_memory_flags),
"Resource Type", (void *)"Memory Range"},
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.mem.write_protect),
- "Write Protect", acpi_gbl_RWdecode},
+ "Write Protect", acpi_gbl_rw_decode},
{ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(address.info.mem.caching),
- "Caching", acpi_gbl_MEMdecode},
+ "Caching", acpi_gbl_mem_decode},
{ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(address.info.mem.range_type),
- "Range Type", acpi_gbl_MTPdecode},
+ "Range Type", acpi_gbl_mtp_decode},
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.mem.translation),
- "Translation", acpi_gbl_TTPdecode}
+ "Translation", acpi_gbl_ttp_decode}
};
static struct acpi_rsdump_info acpi_rs_dump_io_flags[4] = {
{ACPI_RSD_LITERAL, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_io_flags),
"Resource Type", (void *)"I/O Range"},
{ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(address.info.io.range_type),
- "Range Type", acpi_gbl_RNGdecode},
+ "Range Type", acpi_gbl_rng_decode},
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.io.translation),
- "Translation", acpi_gbl_TTPdecode},
+ "Translation", acpi_gbl_ttp_decode},
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(address.info.io.translation_type),
- "Translation Type", acpi_gbl_TRSdecode}
+ "Translation Type", acpi_gbl_trs_decode}
};
/*
diff --git a/drivers/acpi/resources/rsinfo.c b/drivers/acpi/resources/rsinfo.c
index d9ae64b77bd..9e7ae2f8a1d 100644
--- a/drivers/acpi/resources/rsinfo.c
+++ b/drivers/acpi/resources/rsinfo.c
@@ -141,6 +141,7 @@ struct acpi_rsdump_info *acpi_gbl_dump_resource_dispatch[] = {
acpi_rs_dump_generic_reg, /* ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
};
#endif
+
#endif /* ACPI_FUTURE_USAGE */
/*
* Base sizes for external AML resource descriptors, indexed by internal type.
diff --git a/drivers/acpi/resources/rslist.c b/drivers/acpi/resources/rslist.c
index 1434e786477..29423ce030c 100644
--- a/drivers/acpi/resources/rslist.c
+++ b/drivers/acpi/resources/rslist.c
@@ -51,76 +51,62 @@ ACPI_MODULE_NAME("rslist")
*
* FUNCTION: acpi_rs_convert_aml_to_resources
*
- * PARAMETERS: Aml - Pointer to the resource byte stream
- * aml_length - Length of Aml
- * output_buffer - Pointer to the buffer that will
- * contain the output structures
+ * PARAMETERS: acpi_walk_aml_callback
+ * resource_ptr - Pointer to the buffer that will
+ * contain the output structures
*
* RETURN: Status
*
- * DESCRIPTION: Takes the resource byte stream and parses it, creating a
- * linked list of resources in the caller's output buffer
+ * DESCRIPTION: Convert an AML resource to an internal representation of the
+ * resource that is aligned and easier to access.
*
******************************************************************************/
acpi_status
-acpi_rs_convert_aml_to_resources(u8 * aml, u32 aml_length, u8 * output_buffer)
+acpi_rs_convert_aml_to_resources(u8 * aml,
+ u32 length,
+ u32 offset, u8 resource_index, void **context)
{
- struct acpi_resource *resource = (void *)output_buffer;
+ struct acpi_resource **resource_ptr =
+ ACPI_CAST_INDIRECT_PTR(struct acpi_resource, context);
+ struct acpi_resource *resource;
acpi_status status;
- u8 resource_index;
- u8 *end_aml;
-
- ACPI_FUNCTION_TRACE("rs_convert_aml_to_resources");
-
- end_aml = aml + aml_length;
-
- /* Loop until end-of-buffer or an end_tag is found */
-
- while (aml < end_aml) {
- /* Validate the Resource Type and Resource Length */
-
- status = acpi_ut_validate_resource(aml, &resource_index);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- /* Convert the AML byte stream resource to a local resource struct */
-
- status =
- acpi_rs_convert_aml_to_resource(resource,
- ACPI_CAST_PTR(union
- aml_resource,
- aml),
- acpi_gbl_get_resource_dispatch
- [resource_index]);
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "Could not convert AML resource (Type %X)",
- *aml));
- return_ACPI_STATUS(status);
- }
+ ACPI_FUNCTION_TRACE(rs_convert_aml_to_resources);
- /* Normal exit on completion of an end_tag resource descriptor */
-
- if (acpi_ut_get_resource_type(aml) ==
- ACPI_RESOURCE_NAME_END_TAG) {
- return_ACPI_STATUS(AE_OK);
- }
-
- /* Point to the next input AML resource */
-
- aml += acpi_ut_get_descriptor_length(aml);
-
- /* Point to the next structure in the output buffer */
+ /*
+ * Check that the input buffer and all subsequent pointers into it
+ * are aligned on a native word boundary. Most important on IA64
+ */
+ resource = *resource_ptr;
+ if (ACPI_IS_MISALIGNED(resource)) {
+ ACPI_WARNING((AE_INFO,
+ "Misaligned resource pointer %p", resource));
+ }
- resource =
- ACPI_ADD_PTR(struct acpi_resource, resource,
- resource->length);
+ /* Convert the AML byte stream resource to a local resource struct */
+
+ status =
+ acpi_rs_convert_aml_to_resource(resource,
+ ACPI_CAST_PTR(union aml_resource,
+ aml),
+ acpi_gbl_get_resource_dispatch
+ [resource_index]);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not convert AML resource (Type %X)",
+ *aml));
+ return_ACPI_STATUS(status);
}
- /* Did not find an end_tag resource descriptor */
+ ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES,
+ "Type %.2X, AmlLength %.2X InternalLength %.2X\n",
+ acpi_ut_get_resource_type(aml), length,
+ resource->length));
- return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
+ /* Point to the next structure in the output buffer */
+
+ *resource_ptr = ACPI_ADD_PTR(void, resource, resource->length);
+ return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
@@ -150,11 +136,12 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
u8 *end_aml = output_buffer + aml_size_needed;
acpi_status status;
- ACPI_FUNCTION_TRACE("rs_convert_resources_to_aml");
+ ACPI_FUNCTION_TRACE(rs_convert_resources_to_aml);
/* Walk the resource descriptor list, convert each descriptor */
while (aml < end_aml) {
+
/* Validate the (internal) Resource Type */
if (resource->type > ACPI_RESOURCE_TYPE_MAX) {
@@ -191,6 +178,7 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
/* Check for end-of-list, normal exit */
if (resource->type == ACPI_RESOURCE_TYPE_END_TAG) {
+
/* An End Tag indicates the end of the input Resource Template */
return_ACPI_STATUS(AE_OK);
diff --git a/drivers/acpi/resources/rsmisc.c b/drivers/acpi/resources/rsmisc.c
index ed866cf1c6d..faf6e106b78 100644
--- a/drivers/acpi/resources/rsmisc.c
+++ b/drivers/acpi/resources/rsmisc.c
@@ -81,9 +81,10 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
u16 item_count = 0;
u16 temp16 = 0;
- ACPI_FUNCTION_TRACE("rs_get_resource");
+ ACPI_FUNCTION_TRACE(rs_convert_aml_to_resource);
if (((acpi_native_uint) resource) & 0x3) {
+
/* Each internal resource struct is expected to be 32-bit aligned */
ACPI_WARNING((AE_INFO,
@@ -295,9 +296,11 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
exit:
if (!flags_mode) {
- /* Round the resource struct length up to the next 32-bit boundary */
- resource->length = ACPI_ROUND_UP_to_32_bITS(resource->length);
+ /* Round the resource struct length up to the next boundary (32 or 64) */
+
+ resource->length =
+ (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(resource->length);
}
return_ACPI_STATUS(AE_OK);
}
@@ -329,7 +332,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
u16 temp16 = 0;
u16 item_count = 0;
- ACPI_FUNCTION_TRACE("rs_convert_resource_to_aml");
+ ACPI_FUNCTION_TRACE(rs_convert_resource_to_aml);
/*
* First table entry must be ACPI_RSC_INITxxx and must contain the
@@ -535,6 +538,7 @@ if (((aml->irq.flags & 0x09) == 0x00) || ((aml->irq.flags & 0x09) == 0x09)) {
resource->data.extended_irq.interrupt_count = temp8;
if (temp8 < 1) {
+
/* Must have at least one IRQ */
return_ACPI_STATUS(AE_AML_BAD_RESOURCE_LENGTH);
diff --git a/drivers/acpi/resources/rsutils.c b/drivers/acpi/resources/rsutils.c
index 25b5aedd661..a9cbee8e8b4 100644
--- a/drivers/acpi/resources/rsutils.c
+++ b/drivers/acpi/resources/rsutils.c
@@ -205,6 +205,7 @@ acpi_rs_set_resource_length(acpi_rsdesc_size total_length,
/* Length is stored differently for large and small descriptors */
if (aml->small_header.descriptor_type & ACPI_RESOURCE_NAME_LARGE) {
+
/* Large descriptor -- bytes 1-2 contain the 16-bit length */
ACPI_MOVE_16_TO_16(&aml->large_header.resource_length,
@@ -298,7 +299,8 @@ static u16 acpi_rs_strcpy(char *destination, char *source)
* string_ptr - (optional) where to store the actual
* resource_source string
*
- * RETURN: Length of the string plus NULL terminator, rounded up to 32 bit
+ * RETURN: Length of the string plus NULL terminator, rounded up to native
+ * word boundary
*
* DESCRIPTION: Copy the optional resource_source data from a raw AML descriptor
* to an internal resource descriptor
@@ -328,6 +330,7 @@ acpi_rs_get_resource_source(acpi_rs_length resource_length,
* we add 1 to the minimum length.
*/
if (total_length > (acpi_rsdesc_size) (minimum_length + 1)) {
+
/* Get the resource_source_index */
resource_source->index = aml_resource_source[0];
@@ -344,23 +347,26 @@ acpi_rs_get_resource_source(acpi_rs_length resource_length,
}
/*
- * In order for the struct_size to fall on a 32-bit boundary, calculate
- * the length of the string (+1 for the NULL terminator) and expand the
- * struct_size to the next 32-bit boundary.
+ * In order for the Resource length to be a multiple of the native
+ * word, calculate the length of the string (+1 for NULL terminator)
+ * and expand to the next word multiple.
*
* Zero the entire area of the buffer.
*/
total_length =
- ACPI_ROUND_UP_to_32_bITS(ACPI_STRLEN
- ((char *)&aml_resource_source[1]) +
- 1);
+ (u32)
+ ACPI_STRLEN(ACPI_CAST_PTR(char, &aml_resource_source[1])) +
+ 1;
+ total_length = (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(total_length);
+
ACPI_MEMSET(resource_source->string_ptr, 0, total_length);
/* Copy the resource_source string to the destination */
resource_source->string_length =
acpi_rs_strcpy(resource_source->string_ptr,
- (char *)&aml_resource_source[1]);
+ ACPI_CAST_PTR(char,
+ &aml_resource_source[1]));
return ((acpi_rs_length) total_length);
}
@@ -405,6 +411,7 @@ acpi_rs_set_resource_source(union aml_resource * aml,
/* Non-zero string length indicates presence of a resource_source */
if (resource_source->string_length) {
+
/* Point to the end of the AML descriptor */
aml_resource_source = ACPI_ADD_PTR(u8, aml, minimum_length);
@@ -415,7 +422,7 @@ acpi_rs_set_resource_source(union aml_resource * aml,
/* Copy the resource_source string */
- ACPI_STRCPY((char *)&aml_resource_source[1],
+ ACPI_STRCPY(ACPI_CAST_PTR(char, &aml_resource_source[1]),
resource_source->string_ptr);
/*
@@ -435,9 +442,9 @@ acpi_rs_set_resource_source(union aml_resource * aml,
*
* FUNCTION: acpi_rs_get_prt_method_data
*
- * PARAMETERS: Handle - a handle to the containing object
- * ret_buffer - a pointer to a buffer structure for the
- * results
+ * PARAMETERS: Node - Device node
+ * ret_buffer - Pointer to a buffer structure for the
+ * results
*
* RETURN: Status
*
@@ -450,18 +457,19 @@ acpi_rs_set_resource_source(union aml_resource * aml,
******************************************************************************/
acpi_status
-acpi_rs_get_prt_method_data(acpi_handle handle, struct acpi_buffer * ret_buffer)
+acpi_rs_get_prt_method_data(struct acpi_namespace_node * node,
+ struct acpi_buffer * ret_buffer)
{
union acpi_operand_object *obj_desc;
acpi_status status;
- ACPI_FUNCTION_TRACE("rs_get_prt_method_data");
+ ACPI_FUNCTION_TRACE(rs_get_prt_method_data);
/* Parameters guaranteed valid by caller */
/* Execute the method, no parameters */
- status = acpi_ut_evaluate_object(handle, METHOD_NAME__PRT,
+ status = acpi_ut_evaluate_object(node, METHOD_NAME__PRT,
ACPI_BTYPE_PACKAGE, &obj_desc);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
@@ -483,9 +491,9 @@ acpi_rs_get_prt_method_data(acpi_handle handle, struct acpi_buffer * ret_buffer)
*
* FUNCTION: acpi_rs_get_crs_method_data
*
- * PARAMETERS: Handle - a handle to the containing object
- * ret_buffer - a pointer to a buffer structure for the
- * results
+ * PARAMETERS: Node - Device node
+ * ret_buffer - Pointer to a buffer structure for the
+ * results
*
* RETURN: Status
*
@@ -498,18 +506,19 @@ acpi_rs_get_prt_method_data(acpi_handle handle, struct acpi_buffer * ret_buffer)
******************************************************************************/
acpi_status
-acpi_rs_get_crs_method_data(acpi_handle handle, struct acpi_buffer *ret_buffer)
+acpi_rs_get_crs_method_data(struct acpi_namespace_node *node,
+ struct acpi_buffer *ret_buffer)
{
union acpi_operand_object *obj_desc;
acpi_status status;
- ACPI_FUNCTION_TRACE("rs_get_crs_method_data");
+ ACPI_FUNCTION_TRACE(rs_get_crs_method_data);
/* Parameters guaranteed valid by caller */
/* Execute the method, no parameters */
- status = acpi_ut_evaluate_object(handle, METHOD_NAME__CRS,
+ status = acpi_ut_evaluate_object(node, METHOD_NAME__CRS,
ACPI_BTYPE_BUFFER, &obj_desc);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
@@ -522,7 +531,7 @@ acpi_rs_get_crs_method_data(acpi_handle handle, struct acpi_buffer *ret_buffer)
*/
status = acpi_rs_create_resource_list(obj_desc, ret_buffer);
- /* on exit, we must delete the object returned by evaluate_object */
+ /* On exit, we must delete the object returned by evaluate_object */
acpi_ut_remove_reference(obj_desc);
return_ACPI_STATUS(status);
@@ -532,9 +541,9 @@ acpi_rs_get_crs_method_data(acpi_handle handle, struct acpi_buffer *ret_buffer)
*
* FUNCTION: acpi_rs_get_prs_method_data
*
- * PARAMETERS: Handle - a handle to the containing object
- * ret_buffer - a pointer to a buffer structure for the
- * results
+ * PARAMETERS: Node - Device node
+ * ret_buffer - Pointer to a buffer structure for the
+ * results
*
* RETURN: Status
*
@@ -548,18 +557,19 @@ acpi_rs_get_crs_method_data(acpi_handle handle, struct acpi_buffer *ret_buffer)
#ifdef ACPI_FUTURE_USAGE
acpi_status
-acpi_rs_get_prs_method_data(acpi_handle handle, struct acpi_buffer *ret_buffer)
+acpi_rs_get_prs_method_data(struct acpi_namespace_node *node,
+ struct acpi_buffer *ret_buffer)
{
union acpi_operand_object *obj_desc;
acpi_status status;
- ACPI_FUNCTION_TRACE("rs_get_prs_method_data");
+ ACPI_FUNCTION_TRACE(rs_get_prs_method_data);
/* Parameters guaranteed valid by caller */
/* Execute the method, no parameters */
- status = acpi_ut_evaluate_object(handle, METHOD_NAME__PRS,
+ status = acpi_ut_evaluate_object(node, METHOD_NAME__PRS,
ACPI_BTYPE_BUFFER, &obj_desc);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
@@ -572,7 +582,7 @@ acpi_rs_get_prs_method_data(acpi_handle handle, struct acpi_buffer *ret_buffer)
*/
status = acpi_rs_create_resource_list(obj_desc, ret_buffer);
- /* on exit, we must delete the object returned by evaluate_object */
+ /* On exit, we must delete the object returned by evaluate_object */
acpi_ut_remove_reference(obj_desc);
return_ACPI_STATUS(status);
@@ -583,10 +593,10 @@ acpi_rs_get_prs_method_data(acpi_handle handle, struct acpi_buffer *ret_buffer)
*
* FUNCTION: acpi_rs_get_method_data
*
- * PARAMETERS: Handle - a handle to the containing object
+ * PARAMETERS: Handle - Handle to the containing object
* Path - Path to method, relative to Handle
- * ret_buffer - a pointer to a buffer structure for the
- * results
+ * ret_buffer - Pointer to a buffer structure for the
+ * results
*
* RETURN: Status
*
@@ -605,7 +615,7 @@ acpi_rs_get_method_data(acpi_handle handle,
union acpi_operand_object *obj_desc;
acpi_status status;
- ACPI_FUNCTION_TRACE("rs_get_method_data");
+ ACPI_FUNCTION_TRACE(rs_get_method_data);
/* Parameters guaranteed valid by caller */
@@ -634,9 +644,9 @@ acpi_rs_get_method_data(acpi_handle handle,
*
* FUNCTION: acpi_rs_set_srs_method_data
*
- * PARAMETERS: Handle - a handle to the containing object
- * in_buffer - a pointer to a buffer structure of the
- * parameter
+ * PARAMETERS: Node - Device node
+ * in_buffer - Pointer to a buffer structure of the
+ * parameter
*
* RETURN: Status
*
@@ -646,23 +656,37 @@ acpi_rs_get_method_data(acpi_handle handle,
* If the function fails an appropriate status will be returned
* and the contents of the callers buffer is undefined.
*
+ * Note: Parameters guaranteed valid by caller
+ *
******************************************************************************/
acpi_status
-acpi_rs_set_srs_method_data(acpi_handle handle, struct acpi_buffer *in_buffer)
+acpi_rs_set_srs_method_data(struct acpi_namespace_node *node,
+ struct acpi_buffer *in_buffer)
{
- struct acpi_parameter_info info;
- union acpi_operand_object *params[2];
+ struct acpi_evaluate_info *info;
+ union acpi_operand_object *args[2];
acpi_status status;
struct acpi_buffer buffer;
- ACPI_FUNCTION_TRACE("rs_set_srs_method_data");
+ ACPI_FUNCTION_TRACE(rs_set_srs_method_data);
- /* Parameters guaranteed valid by caller */
+ /* Allocate and initialize the evaluation information block */
+
+ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+ if (!info) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ info->prefix_node = node;
+ info->pathname = METHOD_NAME__SRS;
+ info->parameters = args;
+ info->parameter_type = ACPI_PARAM_ARGS;
+ info->flags = ACPI_IGNORE_RETURN_VALUE;
/*
* The in_buffer parameter will point to a linked list of
- * resource parameters. It needs to be formatted into a
+ * resource parameters. It needs to be formatted into a
* byte stream to be sent in as an input parameter to _SRS
*
* Convert the linked list into a byte stream
@@ -670,41 +694,36 @@ acpi_rs_set_srs_method_data(acpi_handle handle, struct acpi_buffer *in_buffer)
buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
status = acpi_rs_create_aml_resources(in_buffer->pointer, &buffer);
if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
+ goto cleanup;
}
- /* Init the param object */
+ /* Create and initialize the method parameter object */
- params[0] = acpi_ut_create_internal_object(ACPI_TYPE_BUFFER);
- if (!params[0]) {
- acpi_os_free(buffer.pointer);
- return_ACPI_STATUS(AE_NO_MEMORY);
+ args[0] = acpi_ut_create_internal_object(ACPI_TYPE_BUFFER);
+ if (!args[0]) {
+ /*
+ * Must free the buffer allocated above (otherwise it is freed
+ * later)
+ */
+ ACPI_FREE(buffer.pointer);
+ status = AE_NO_MEMORY;
+ goto cleanup;
}
- /* Set up the parameter object */
-
- params[0]->buffer.length = (u32) buffer.length;
- params[0]->buffer.pointer = buffer.pointer;
- params[0]->common.flags = AOPOBJ_DATA_VALID;
- params[1] = NULL;
-
- info.node = handle;
- info.parameters = params;
- info.parameter_type = ACPI_PARAM_ARGS;
+ args[0]->buffer.length = (u32) buffer.length;
+ args[0]->buffer.pointer = buffer.pointer;
+ args[0]->common.flags = AOPOBJ_DATA_VALID;
+ args[1] = NULL;
- /* Execute the method, no return value */
+ /* Execute the method, no return value is expected */
- status = acpi_ns_evaluate_relative(METHOD_NAME__SRS, &info);
- if (ACPI_SUCCESS(status)) {
- /* Delete any return object (especially if implicit_return is enabled) */
+ status = acpi_ns_evaluate(info);
- if (info.return_object) {
- acpi_ut_remove_reference(info.return_object);
- }
- }
+ /* Clean up and return the status from acpi_ns_evaluate */
- /* Clean up and return the status from acpi_ns_evaluate_relative */
+ acpi_ut_remove_reference(args[0]);
- acpi_ut_remove_reference(params[0]);
+ cleanup:
+ ACPI_FREE(info);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/resources/rsxface.c b/drivers/acpi/resources/rsxface.c
index 88b67077aee..1999e2ab7da 100644
--- a/drivers/acpi/resources/rsxface.c
+++ b/drivers/acpi/resources/rsxface.c
@@ -41,10 +41,9 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/module.h>
-
#include <acpi/acpi.h>
#include <acpi/acresrc.h>
+#include <acpi/acnamesp.h>
#define _COMPONENT ACPI_RESOURCES
ACPI_MODULE_NAME("rsxface")
@@ -68,312 +67,262 @@ ACPI_MODULE_NAME("rsxface")
static acpi_status
acpi_rs_match_vendor_resource(struct acpi_resource *resource, void *context);
+static acpi_status
+acpi_rs_validate_parameters(acpi_handle device_handle,
+ struct acpi_buffer *buffer,
+ struct acpi_namespace_node **return_node);
+
/*******************************************************************************
*
- * FUNCTION: acpi_get_irq_routing_table
+ * FUNCTION: acpi_rs_validate_parameters
*
- * PARAMETERS: device_handle - a handle to the Bus device we are querying
- * ret_buffer - a pointer to a buffer to receive the
- * current resources for the device
+ * PARAMETERS: device_handle - Handle to a device
+ * Buffer - Pointer to a data buffer
+ * return_node - Pointer to where the device node is returned
*
* RETURN: Status
*
- * DESCRIPTION: This function is called to get the IRQ routing table for a
- * specific bus. The caller must first acquire a handle for the
- * desired bus. The routine table is placed in the buffer pointed
- * to by the ret_buffer variable parameter.
- *
- * If the function fails an appropriate status will be returned
- * and the value of ret_buffer is undefined.
- *
- * This function attempts to execute the _PRT method contained in
- * the object indicated by the passed device_handle.
+ * DESCRIPTION: Common parameter validation for resource interfaces
*
******************************************************************************/
-acpi_status
-acpi_get_irq_routing_table(acpi_handle device_handle,
- struct acpi_buffer *ret_buffer)
+static acpi_status
+acpi_rs_validate_parameters(acpi_handle device_handle,
+ struct acpi_buffer *buffer,
+ struct acpi_namespace_node **return_node)
{
acpi_status status;
+ struct acpi_namespace_node *node;
- ACPI_FUNCTION_TRACE("acpi_get_irq_routing_table ");
+ ACPI_FUNCTION_TRACE(rs_validate_parameters);
/*
- * Must have a valid handle and buffer, So we have to have a handle
- * and a return buffer structure, and if there is a non-zero buffer length
- * we also need a valid pointer in the buffer. If it's a zero buffer length,
- * we'll be returning the needed buffer size, so keep going.
+ * Must have a valid handle to an ACPI device
*/
if (!device_handle) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- status = acpi_ut_validate_buffer(ret_buffer);
+ node = acpi_ns_map_handle_to_node(device_handle);
+ if (!node) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ if (node->type != ACPI_TYPE_DEVICE) {
+ return_ACPI_STATUS(AE_TYPE);
+ }
+
+ /*
+ * Validate the user buffer object
+ *
+ * if there is a non-zero buffer length we also need a valid pointer in
+ * the buffer. If it's a zero buffer length, we'll be returning the
+ * needed buffer size (later), so keep going.
+ */
+ status = acpi_ut_validate_buffer(buffer);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
- status = acpi_rs_get_prt_method_data(device_handle, ret_buffer);
- return_ACPI_STATUS(status);
+ *return_node = node;
+ return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
- * FUNCTION: acpi_get_current_resources
+ * FUNCTION: acpi_get_irq_routing_table
*
- * PARAMETERS: device_handle - a handle to the device object for the
- * device we are querying
- * ret_buffer - a pointer to a buffer to receive the
+ * PARAMETERS: device_handle - Handle to the Bus device we are querying
+ * ret_buffer - Pointer to a buffer to receive the
* current resources for the device
*
* RETURN: Status
*
- * DESCRIPTION: This function is called to get the current resources for a
- * specific device. The caller must first acquire a handle for
- * the desired device. The resource data is placed in the buffer
- * pointed to by the ret_buffer variable parameter.
+ * DESCRIPTION: This function is called to get the IRQ routing table for a
+ * specific bus. The caller must first acquire a handle for the
+ * desired bus. The routine table is placed in the buffer pointed
+ * to by the ret_buffer variable parameter.
*
* If the function fails an appropriate status will be returned
* and the value of ret_buffer is undefined.
*
- * This function attempts to execute the _CRS method contained in
+ * This function attempts to execute the _PRT method contained in
* the object indicated by the passed device_handle.
*
******************************************************************************/
acpi_status
-acpi_get_current_resources(acpi_handle device_handle,
+acpi_get_irq_routing_table(acpi_handle device_handle,
struct acpi_buffer *ret_buffer)
{
acpi_status status;
+ struct acpi_namespace_node *node;
- ACPI_FUNCTION_TRACE("acpi_get_current_resources");
+ ACPI_FUNCTION_TRACE(acpi_get_irq_routing_table);
- /*
- * Must have a valid handle and buffer, So we have to have a handle
- * and a return buffer structure, and if there is a non-zero buffer length
- * we also need a valid pointer in the buffer. If it's a zero buffer length,
- * we'll be returning the needed buffer size, so keep going.
- */
- if (!device_handle) {
- return_ACPI_STATUS(AE_BAD_PARAMETER);
- }
+ /* Validate parameters then dispatch to internal routine */
- status = acpi_ut_validate_buffer(ret_buffer);
+ status = acpi_rs_validate_parameters(device_handle, ret_buffer, &node);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
- status = acpi_rs_get_crs_method_data(device_handle, ret_buffer);
+ status = acpi_rs_get_prt_method_data(node, ret_buffer);
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_get_current_resources);
+ACPI_EXPORT_SYMBOL(acpi_get_irq_routing_table)
/*******************************************************************************
*
- * FUNCTION: acpi_get_possible_resources
+ * FUNCTION: acpi_get_current_resources
*
- * PARAMETERS: device_handle - a handle to the device object for the
+ * PARAMETERS: device_handle - Handle to the device object for the
* device we are querying
- * ret_buffer - a pointer to a buffer to receive the
- * resources for the device
+ * ret_buffer - Pointer to a buffer to receive the
+ * current resources for the device
*
* RETURN: Status
*
- * DESCRIPTION: This function is called to get a list of the possible resources
- * for a specific device. The caller must first acquire a handle
- * for the desired device. The resource data is placed in the
- * buffer pointed to by the ret_buffer variable.
+ * DESCRIPTION: This function is called to get the current resources for a
+ * specific device. The caller must first acquire a handle for
+ * the desired device. The resource data is placed in the buffer
+ * pointed to by the ret_buffer variable parameter.
*
* If the function fails an appropriate status will be returned
* and the value of ret_buffer is undefined.
*
+ * This function attempts to execute the _CRS method contained in
+ * the object indicated by the passed device_handle.
+ *
******************************************************************************/
-
-#ifdef ACPI_FUTURE_USAGE
acpi_status
-acpi_get_possible_resources(acpi_handle device_handle,
- struct acpi_buffer *ret_buffer)
+acpi_get_current_resources(acpi_handle device_handle,
+ struct acpi_buffer *ret_buffer)
{
acpi_status status;
+ struct acpi_namespace_node *node;
- ACPI_FUNCTION_TRACE("acpi_get_possible_resources");
+ ACPI_FUNCTION_TRACE(acpi_get_current_resources);
- /*
- * Must have a valid handle and buffer, So we have to have a handle
- * and a return buffer structure, and if there is a non-zero buffer length
- * we also need a valid pointer in the buffer. If it's a zero buffer length,
- * we'll be returning the needed buffer size, so keep going.
- */
- if (!device_handle) {
- return_ACPI_STATUS(AE_BAD_PARAMETER);
- }
+ /* Validate parameters then dispatch to internal routine */
- status = acpi_ut_validate_buffer(ret_buffer);
+ status = acpi_rs_validate_parameters(device_handle, ret_buffer, &node);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
- status = acpi_rs_get_prs_method_data(device_handle, ret_buffer);
+ status = acpi_rs_get_crs_method_data(node, ret_buffer);
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_get_possible_resources);
-#endif /* ACPI_FUTURE_USAGE */
+ACPI_EXPORT_SYMBOL(acpi_get_current_resources)
+#ifdef ACPI_FUTURE_USAGE
/*******************************************************************************
*
- * FUNCTION: acpi_walk_resources
+ * FUNCTION: acpi_get_possible_resources
*
* PARAMETERS: device_handle - Handle to the device object for the
* device we are querying
- * Name - Method name of the resources we want
- * (METHOD_NAME__CRS or METHOD_NAME__PRS)
- * user_function - Called for each resource
- * Context - Passed to user_function
+ * ret_buffer - Pointer to a buffer to receive the
+ * resources for the device
*
* RETURN: Status
*
- * DESCRIPTION: Retrieves the current or possible resource list for the
- * specified device. The user_function is called once for
- * each resource in the list.
+ * DESCRIPTION: This function is called to get a list of the possible resources
+ * for a specific device. The caller must first acquire a handle
+ * for the desired device. The resource data is placed in the
+ * buffer pointed to by the ret_buffer variable.
+ *
+ * If the function fails an appropriate status will be returned
+ * and the value of ret_buffer is undefined.
*
******************************************************************************/
-
acpi_status
-acpi_walk_resources(acpi_handle device_handle,
- char *name,
- ACPI_WALK_RESOURCE_CALLBACK user_function, void *context)
+acpi_get_possible_resources(acpi_handle device_handle,
+ struct acpi_buffer *ret_buffer)
{
acpi_status status;
- struct acpi_buffer buffer;
- struct acpi_resource *resource;
- struct acpi_resource *resource_end;
-
- ACPI_FUNCTION_TRACE("acpi_walk_resources");
-
- /* Parameter validation */
+ struct acpi_namespace_node *node;
- if (!device_handle || !user_function || !name ||
- (ACPI_STRNCMP(name, METHOD_NAME__CRS, sizeof(METHOD_NAME__CRS)) &&
- ACPI_STRNCMP(name, METHOD_NAME__PRS, sizeof(METHOD_NAME__PRS)))) {
- return_ACPI_STATUS(AE_BAD_PARAMETER);
- }
+ ACPI_FUNCTION_TRACE(acpi_get_possible_resources);
- /* Get the _CRS or _PRS resource list */
+ /* Validate parameters then dispatch to internal routine */
- buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
- status = acpi_rs_get_method_data(device_handle, name, &buffer);
+ status = acpi_rs_validate_parameters(device_handle, ret_buffer, &node);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
- /* Buffer now contains the resource list */
-
- resource = ACPI_CAST_PTR(struct acpi_resource, buffer.pointer);
- resource_end =
- ACPI_ADD_PTR(struct acpi_resource, buffer.pointer, buffer.length);
-
- /* Walk the resource list until the end_tag is found (or buffer end) */
-
- while (resource < resource_end) {
- /* Sanity check the resource */
-
- if (resource->type > ACPI_RESOURCE_TYPE_MAX) {
- status = AE_AML_INVALID_RESOURCE_TYPE;
- break;
- }
-
- /* Invoke the user function, abort on any error returned */
-
- status = user_function(resource, context);
- if (ACPI_FAILURE(status)) {
- if (status == AE_CTRL_TERMINATE) {
- /* This is an OK termination by the user function */
-
- status = AE_OK;
- }
- break;
- }
-
- /* end_tag indicates end-of-list */
-
- if (resource->type == ACPI_RESOURCE_TYPE_END_TAG) {
- break;
- }
-
- /* Get the next resource descriptor */
-
- resource =
- ACPI_ADD_PTR(struct acpi_resource, resource,
- resource->length);
- }
-
- ACPI_MEM_FREE(buffer.pointer);
+ status = acpi_rs_get_prs_method_data(node, ret_buffer);
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_walk_resources);
+ACPI_EXPORT_SYMBOL(acpi_get_possible_resources)
+#endif /* ACPI_FUTURE_USAGE */
/*******************************************************************************
*
* FUNCTION: acpi_set_current_resources
*
- * PARAMETERS: device_handle - a handle to the device object for the
- * device we are changing the resources of
- * in_buffer - a pointer to a buffer containing the
+ * PARAMETERS: device_handle - Handle to the device object for the
+ * device we are setting resources
+ * in_buffer - Pointer to a buffer containing the
* resources to be set for the device
*
* RETURN: Status
*
* DESCRIPTION: This function is called to set the current resources for a
- * specific device. The caller must first acquire a handle for
- * the desired device. The resource data is passed to the routine
+ * specific device. The caller must first acquire a handle for
+ * the desired device. The resource data is passed to the routine
* the buffer pointed to by the in_buffer variable.
*
******************************************************************************/
-
acpi_status
acpi_set_current_resources(acpi_handle device_handle,
struct acpi_buffer *in_buffer)
{
acpi_status status;
+ struct acpi_namespace_node *node;
- ACPI_FUNCTION_TRACE("acpi_set_current_resources");
+ ACPI_FUNCTION_TRACE(acpi_set_current_resources);
- /* Must have a valid handle and buffer */
+ /* Validate the buffer, don't allow zero length */
- if ((!device_handle) ||
- (!in_buffer) || (!in_buffer->pointer) || (!in_buffer->length)) {
+ if ((!in_buffer) || (!in_buffer->pointer) || (!in_buffer->length)) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- status = acpi_rs_set_srs_method_data(device_handle, in_buffer);
+ /* Validate parameters then dispatch to internal routine */
+
+ status = acpi_rs_validate_parameters(device_handle, in_buffer, &node);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ status = acpi_rs_set_srs_method_data(node, in_buffer);
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_set_current_resources);
+ACPI_EXPORT_SYMBOL(acpi_set_current_resources)
/******************************************************************************
*
* FUNCTION: acpi_resource_to_address64
*
- * PARAMETERS: Resource - Pointer to a resource
- * Out - Pointer to the users's return
- * buffer (a struct
- * struct acpi_resource_address64)
+ * PARAMETERS: Resource - Pointer to a resource
+ * Out - Pointer to the users's return buffer
+ * (a struct acpi_resource_address64)
*
* RETURN: Status
*
* DESCRIPTION: If the resource is an address16, address32, or address64,
- * copy it to the address64 return buffer. This saves the
+ * copy it to the address64 return buffer. This saves the
* caller from having to duplicate code for different-sized
* addresses.
*
******************************************************************************/
-
acpi_status
acpi_resource_to_address64(struct acpi_resource *resource,
struct acpi_resource_address64 *out)
@@ -415,18 +364,18 @@ acpi_resource_to_address64(struct acpi_resource *resource,
return (AE_OK);
}
-EXPORT_SYMBOL(acpi_resource_to_address64);
+ACPI_EXPORT_SYMBOL(acpi_resource_to_address64)
/*******************************************************************************
*
* FUNCTION: acpi_get_vendor_resource
*
- * PARAMETERS: device_handle - Handle for the parent device object
- * Name - Method name for the parent resource
- * (METHOD_NAME__CRS or METHOD_NAME__PRS)
- * Uuid - Pointer to the UUID to be matched.
- * includes both subtype and 16-byte UUID
- * ret_buffer - Where the vendor resource is returned
+ * PARAMETERS: device_handle - Handle for the parent device object
+ * Name - Method name for the parent resource
+ * (METHOD_NAME__CRS or METHOD_NAME__PRS)
+ * Uuid - Pointer to the UUID to be matched.
+ * includes both subtype and 16-byte UUID
+ * ret_buffer - Where the vendor resource is returned
*
* RETURN: Status
*
@@ -435,7 +384,6 @@ EXPORT_SYMBOL(acpi_resource_to_address64);
* UUID subtype. Returns a struct acpi_resource of type Vendor.
*
******************************************************************************/
-
acpi_status
acpi_get_vendor_resource(acpi_handle device_handle,
char *name,
@@ -467,18 +415,19 @@ acpi_get_vendor_resource(acpi_handle device_handle,
return (info.status);
}
+ACPI_EXPORT_SYMBOL(acpi_get_vendor_resource)
+
/*******************************************************************************
*
* FUNCTION: acpi_rs_match_vendor_resource
*
- * PARAMETERS: ACPI_WALK_RESOURCE_CALLBACK
+ * PARAMETERS: acpi_walk_resource_callback
*
* RETURN: Status
*
* DESCRIPTION: Match a vendor resource via the ACPI 3.0 UUID
*
******************************************************************************/
-
static acpi_status
acpi_rs_match_vendor_resource(struct acpi_resource *resource, void *context)
{
@@ -526,3 +475,101 @@ acpi_rs_match_vendor_resource(struct acpi_resource *resource, void *context)
info->status = AE_OK;
return (AE_CTRL_TERMINATE);
}
+
+ACPI_EXPORT_SYMBOL(acpi_rs_match_vendor_resource)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_walk_resources
+ *
+ * PARAMETERS: device_handle - Handle to the device object for the
+ * device we are querying
+ * Name - Method name of the resources we want
+ * (METHOD_NAME__CRS or METHOD_NAME__PRS)
+ * user_function - Called for each resource
+ * Context - Passed to user_function
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Retrieves the current or possible resource list for the
+ * specified device. The user_function is called once for
+ * each resource in the list.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_walk_resources(acpi_handle device_handle,
+ char *name,
+ acpi_walk_resource_callback user_function, void *context)
+{
+ acpi_status status;
+ struct acpi_buffer buffer;
+ struct acpi_resource *resource;
+ struct acpi_resource *resource_end;
+
+ ACPI_FUNCTION_TRACE(acpi_walk_resources);
+
+ /* Parameter validation */
+
+ if (!device_handle || !user_function || !name ||
+ (!ACPI_COMPARE_NAME(name, METHOD_NAME__CRS) &&
+ !ACPI_COMPARE_NAME(name, METHOD_NAME__PRS))) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* Get the _CRS or _PRS resource list */
+
+ buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
+ status = acpi_rs_get_method_data(device_handle, name, &buffer);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Buffer now contains the resource list */
+
+ resource = ACPI_CAST_PTR(struct acpi_resource, buffer.pointer);
+ resource_end =
+ ACPI_ADD_PTR(struct acpi_resource, buffer.pointer, buffer.length);
+
+ /* Walk the resource list until the end_tag is found (or buffer end) */
+
+ while (resource < resource_end) {
+
+ /* Sanity check the resource */
+
+ if (resource->type > ACPI_RESOURCE_TYPE_MAX) {
+ status = AE_AML_INVALID_RESOURCE_TYPE;
+ break;
+ }
+
+ /* Invoke the user function, abort on any error returned */
+
+ status = user_function(resource, context);
+ if (ACPI_FAILURE(status)) {
+ if (status == AE_CTRL_TERMINATE) {
+
+ /* This is an OK termination by the user function */
+
+ status = AE_OK;
+ }
+ break;
+ }
+
+ /* end_tag indicates end-of-list */
+
+ if (resource->type == ACPI_RESOURCE_TYPE_END_TAG) {
+ break;
+ }
+
+ /* Get the next resource descriptor */
+
+ resource =
+ ACPI_ADD_PTR(struct acpi_resource, resource,
+ resource->length);
+ }
+
+ ACPI_FREE(buffer.pointer);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_walk_resources)
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index a0ab828b2cc..f8316a05ede 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -142,7 +142,7 @@ static void acpi_device_register(struct acpi_device *device,
create_sysfs_device_files(device);
}
-static int acpi_device_unregister(struct acpi_device *device, int type)
+static void acpi_device_unregister(struct acpi_device *device, int type)
{
spin_lock(&acpi_device_lock);
if (device->parent) {
@@ -158,7 +158,6 @@ static int acpi_device_unregister(struct acpi_device *device, int type)
acpi_detach_data(device->handle, acpi_bus_data_handler);
remove_sysfs_device_files(device);
kobject_unregister(&device->kobj);
- return 0;
}
void acpi_bus_data_handler(acpi_handle handle, u32 function, void *context)
@@ -234,12 +233,9 @@ static int acpi_bus_get_power_flags(struct acpi_device *device)
int acpi_match_ids(struct acpi_device *device, char *ids)
{
- int error = 0;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
-
if (device->flags.hardware_id)
if (strstr(ids, device->pnp.hardware_id))
- goto Done;
+ return 0;
if (device->flags.compatible_ids) {
struct acpi_compatible_id_list *cid_list = device->pnp.cid_list;
@@ -248,15 +244,10 @@ int acpi_match_ids(struct acpi_device *device, char *ids)
/* compare multiple _CID entries against driver ids */
for (i = 0; i < cid_list->count; i++) {
if (strstr(ids, cid_list->id[i].value))
- goto Done;
+ return 0;
}
}
- error = -ENOENT;
-
- Done:
- if (buffer.pointer)
- acpi_os_free(buffer.pointer);
- return error;
+ return -ENOENT;
}
static acpi_status
@@ -441,10 +432,7 @@ acpi_eject_store(struct acpi_device *device, const char *buf, size_t count)
islockable = device->flags.lockable;
handle = device->handle;
- if (type == ACPI_TYPE_PROCESSOR)
- result = acpi_bus_trim(device, 0);
- else
- result = acpi_bus_trim(device, 1);
+ result = acpi_bus_trim(device, 1);
if (!result)
result = acpi_eject_operation(handle, islockable);
@@ -471,7 +459,6 @@ static int acpi_bus_get_perf_flags(struct acpi_device *device)
-------------------------------------------------------------------------- */
static LIST_HEAD(acpi_bus_drivers);
-static DECLARE_MUTEX(acpi_bus_drivers_lock);
/**
* acpi_bus_match - match device IDs to driver's supported IDs
@@ -548,10 +535,9 @@ static int acpi_start_single_object(struct acpi_device *device)
return_VALUE(result);
}
-static int acpi_driver_attach(struct acpi_driver *drv)
+static void acpi_driver_attach(struct acpi_driver *drv)
{
struct list_head *node, *next;
- int count = 0;
ACPI_FUNCTION_TRACE("acpi_driver_attach");
@@ -568,7 +554,6 @@ static int acpi_driver_attach(struct acpi_driver *drv)
if (!acpi_bus_driver_init(dev, drv)) {
acpi_start_single_object(dev);
atomic_inc(&drv->references);
- count++;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Found driver [%s] for device [%s]\n",
drv->name, dev->pnp.bus_id));
@@ -577,10 +562,9 @@ static int acpi_driver_attach(struct acpi_driver *drv)
spin_lock(&acpi_device_lock);
}
spin_unlock(&acpi_device_lock);
- return_VALUE(count);
}
-static int acpi_driver_detach(struct acpi_driver *drv)
+static void acpi_driver_detach(struct acpi_driver *drv)
{
struct list_head *node, *next;
@@ -602,7 +586,6 @@ static int acpi_driver_detach(struct acpi_driver *drv)
}
}
spin_unlock(&acpi_device_lock);
- return_VALUE(0);
}
/**
@@ -610,28 +593,22 @@ static int acpi_driver_detach(struct acpi_driver *drv)
* @driver: driver being registered
*
* Registers a driver with the ACPI bus. Searches the namespace for all
- * devices that match the driver's criteria and binds. Returns the
- * number of devices that were claimed by the driver, or a negative
- * error status for failure.
+ * devices that match the driver's criteria and binds. Returns zero for
+ * success or a negative error status for failure.
*/
int acpi_bus_register_driver(struct acpi_driver *driver)
{
- int count;
-
ACPI_FUNCTION_TRACE("acpi_bus_register_driver");
if (acpi_disabled)
return_VALUE(-ENODEV);
- if (!driver)
- return_VALUE(-EINVAL);
-
spin_lock(&acpi_device_lock);
list_add_tail(&driver->node, &acpi_bus_drivers);
spin_unlock(&acpi_device_lock);
- count = acpi_driver_attach(driver);
+ acpi_driver_attach(driver);
- return_VALUE(count);
+ return_VALUE(0);
}
EXPORT_SYMBOL(acpi_bus_register_driver);
@@ -643,23 +620,16 @@ EXPORT_SYMBOL(acpi_bus_register_driver);
* Unregisters a driver with the ACPI bus. Searches the namespace for all
* devices that match the driver's criteria and unbinds.
*/
-int acpi_bus_unregister_driver(struct acpi_driver *driver)
+void acpi_bus_unregister_driver(struct acpi_driver *driver)
{
- int error = 0;
+ acpi_driver_detach(driver);
- ACPI_FUNCTION_TRACE("acpi_bus_unregister_driver");
-
- if (driver) {
- acpi_driver_detach(driver);
-
- if (!atomic_read(&driver->references)) {
- spin_lock(&acpi_device_lock);
- list_del_init(&driver->node);
- spin_unlock(&acpi_device_lock);
- }
- } else
- error = -EINVAL;
- return_VALUE(error);
+ if (!atomic_read(&driver->references)) {
+ spin_lock(&acpi_device_lock);
+ list_del_init(&driver->node);
+ spin_unlock(&acpi_device_lock);
+ }
+ return;
}
EXPORT_SYMBOL(acpi_bus_unregister_driver);
@@ -1371,6 +1341,100 @@ static int acpi_bus_scan_fixed(struct acpi_device *root)
return_VALUE(result);
}
+
+static inline struct acpi_device * to_acpi_dev(struct device * dev)
+{
+ return container_of(dev, struct acpi_device, dev);
+}
+
+
+static int root_suspend(struct acpi_device * acpi_dev, pm_message_t state)
+{
+ struct acpi_device * dev, * next;
+ int result;
+
+ spin_lock(&acpi_device_lock);
+ list_for_each_entry_safe_reverse(dev, next, &acpi_device_list, g_list) {
+ if (dev->driver && dev->driver->ops.suspend) {
+ spin_unlock(&acpi_device_lock);
+ result = dev->driver->ops.suspend(dev, 0);
+ if (result) {
+ printk(KERN_ERR PREFIX "[%s - %s] Suspend failed: %d\n",
+ acpi_device_name(dev),
+ acpi_device_bid(dev), result);
+ }
+ spin_lock(&acpi_device_lock);
+ }
+ }
+ spin_unlock(&acpi_device_lock);
+ return 0;
+}
+
+
+static int acpi_device_suspend(struct device * dev, pm_message_t state)
+{
+ struct acpi_device * acpi_dev = to_acpi_dev(dev);
+
+ /*
+ * For now, we should only register 1 generic device -
+ * the ACPI root device - and from there, we walk the
+ * tree of ACPI devices to suspend each one using the
+ * ACPI driver methods.
+ */
+ if (acpi_dev->handle == ACPI_ROOT_OBJECT)
+ root_suspend(acpi_dev, state);
+ return 0;
+}
+
+
+
+static int root_resume(struct acpi_device * acpi_dev)
+{
+ struct acpi_device * dev, * next;
+ int result;
+
+ spin_lock(&acpi_device_lock);
+ list_for_each_entry_safe(dev, next, &acpi_device_list, g_list) {
+ if (dev->driver && dev->driver->ops.resume) {
+ spin_unlock(&acpi_device_lock);
+ result = dev->driver->ops.resume(dev, 0);
+ if (result) {
+ printk(KERN_ERR PREFIX "[%s - %s] resume failed: %d\n",
+ acpi_device_name(dev),
+ acpi_device_bid(dev), result);
+ }
+ spin_lock(&acpi_device_lock);
+ }
+ }
+ spin_unlock(&acpi_device_lock);
+ return 0;
+}
+
+
+static int acpi_device_resume(struct device * dev)
+{
+ struct acpi_device * acpi_dev = to_acpi_dev(dev);
+
+ /*
+ * For now, we should only register 1 generic device -
+ * the ACPI root device - and from there, we walk the
+ * tree of ACPI devices to resume each one using the
+ * ACPI driver methods.
+ */
+ if (acpi_dev->handle == ACPI_ROOT_OBJECT)
+ root_resume(acpi_dev);
+ return 0;
+}
+
+
+struct bus_type acpi_bus_type = {
+ .name = "acpi",
+ .suspend = acpi_device_suspend,
+ .resume = acpi_device_resume,
+};
+
+
+
static int __init acpi_scan_init(void)
{
int result;
@@ -1383,6 +1447,12 @@ static int __init acpi_scan_init(void)
kset_register(&acpi_namespace_kset);
+ result = bus_register(&acpi_bus_type);
+ if (result) {
+ /* We don't want to quit even if we failed to add suspend/resume */
+ printk(KERN_ERR PREFIX "Could not register bus type\n");
+ }
+
/*
* Create the root device in the bus's device tree
*/
@@ -1392,6 +1462,16 @@ static int __init acpi_scan_init(void)
goto Done;
result = acpi_start_single_object(acpi_root);
+ if (result)
+ goto Done;
+
+ acpi_root->dev.bus = &acpi_bus_type;
+ snprintf(acpi_root->dev.bus_id, BUS_ID_SIZE, "%s", acpi_bus_type.name);
+ result = device_register(&acpi_root->dev);
+ if (result) {
+ /* We don't want to quit even if we failed to add suspend/resume */
+ printk(KERN_ERR PREFIX "Could not register device\n");
+ }
/*
* Enumerate devices in the ACPI namespace.
diff --git a/drivers/acpi/sleep/main.c b/drivers/acpi/sleep/main.c
index 930427fc0c4..62ce87d7165 100644
--- a/drivers/acpi/sleep/main.c
+++ b/drivers/acpi/sleep/main.c
@@ -105,6 +105,14 @@ static int acpi_pm_enter(suspend_state_t pm_state)
default:
return -EINVAL;
}
+
+ /* ACPI 3.0 specs (P62) says that it's the responsabilty
+ * of the OSPM to clear the status bit [ implying that the
+ * POWER_BUTTON event should not reach userspace ]
+ */
+ if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3))
+ acpi_clear_event(ACPI_EVENT_POWER_BUTTON);
+
local_irq_restore(flags);
printk(KERN_DEBUG "Back to C!\n");
diff --git a/drivers/acpi/sleep/wakeup.c b/drivers/acpi/sleep/wakeup.c
index 85df0ceda2a..af1dbabaf0b 100644
--- a/drivers/acpi/sleep/wakeup.c
+++ b/drivers/acpi/sleep/wakeup.c
@@ -155,7 +155,6 @@ static int __init acpi_wakeup_device_init(void)
if (acpi_disabled)
return 0;
- printk("ACPI wakeup devices: \n");
spin_lock(&acpi_device_lock);
list_for_each_safe(node, next, &acpi_wakeup_device_list) {
@@ -174,10 +173,8 @@ static int __init acpi_wakeup_device_init(void)
dev->wakeup.state.enabled = 1;
spin_lock(&acpi_device_lock);
}
- printk("%4s ", dev->pnp.bus_id);
}
spin_unlock(&acpi_device_lock);
- printk("\n");
return 0;
}
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c
index e4308c7a674..a934ac42178 100644
--- a/drivers/acpi/system.c
+++ b/drivers/acpi/system.c
@@ -39,7 +39,7 @@ ACPI_MODULE_NAME("acpi_system")
#define ACPI_SYSTEM_FILE_EVENT "event"
#define ACPI_SYSTEM_FILE_DSDT "dsdt"
#define ACPI_SYSTEM_FILE_FADT "fadt"
-extern FADT_DESCRIPTOR acpi_fadt;
+extern struct fadt_descriptor acpi_fadt;
/* --------------------------------------------------------------------------
FS Interface (/proc)
@@ -82,7 +82,7 @@ acpi_system_read_dsdt(struct file *file,
ACPI_FUNCTION_TRACE("acpi_system_read_dsdt");
- status = acpi_get_table(ACPI_TABLE_DSDT, 1, &dsdt);
+ status = acpi_get_table(ACPI_TABLE_ID_DSDT, 1, &dsdt);
if (ACPI_FAILURE(status))
return_VALUE(-ENODEV);
@@ -110,7 +110,7 @@ acpi_system_read_fadt(struct file *file,
ACPI_FUNCTION_TRACE("acpi_system_read_fadt");
- status = acpi_get_table(ACPI_TABLE_FADT, 1, &fadt);
+ status = acpi_get_table(ACPI_TABLE_ID_FADT, 1, &fadt);
if (ACPI_FAILURE(status))
return_VALUE(-ENODEV);
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 7f37c7cc5ef..ed5e8816d83 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -282,8 +282,8 @@ acpi_get_table_header_early(enum acpi_table_id id,
/* Map the DSDT header via the pointer in the FADT */
if (id == ACPI_DSDT) {
- struct fadt_descriptor_rev2 *fadt =
- (struct fadt_descriptor_rev2 *)*header;
+ struct fadt_descriptor *fadt =
+ (struct fadt_descriptor *)*header;
if (fadt->revision == 3 && fadt->Xdsdt) {
*header = (void *)__acpi_map_table(fadt->Xdsdt,
diff --git a/drivers/acpi/tables/tbconvrt.c b/drivers/acpi/tables/tbconvrt.c
index 03b37d2223b..d697fcb35d5 100644
--- a/drivers/acpi/tables/tbconvrt.c
+++ b/drivers/acpi/tables/tbconvrt.c
@@ -41,8 +41,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/module.h>
-
#include <acpi/acpi.h>
#include <acpi/actables.h>
@@ -56,15 +54,15 @@ acpi_tb_init_generic_address(struct acpi_generic_address *new_gas_struct,
acpi_physical_address address);
static void
-acpi_tb_convert_fadt1(struct fadt_descriptor_rev2 *local_fadt,
+acpi_tb_convert_fadt1(struct fadt_descriptor *local_fadt,
struct fadt_descriptor_rev1 *original_fadt);
static void
-acpi_tb_convert_fadt2(struct fadt_descriptor_rev2 *local_fadt,
- struct fadt_descriptor_rev2 *original_fadt);
+acpi_tb_convert_fadt2(struct fadt_descriptor *local_fadt,
+ struct fadt_descriptor *original_fadt);
u8 acpi_fadt_is_v1;
-EXPORT_SYMBOL(acpi_fadt_is_v1);
+ACPI_EXPORT_SYMBOL(acpi_fadt_is_v1)
/*******************************************************************************
*
@@ -122,7 +120,7 @@ acpi_status acpi_tb_convert_to_xsdt(struct acpi_table_desc *table_info)
{
acpi_size table_size;
u32 i;
- XSDT_DESCRIPTOR *new_table;
+ struct xsdt_descriptor *new_table;
ACPI_FUNCTION_ENTRY();
@@ -133,7 +131,7 @@ acpi_status acpi_tb_convert_to_xsdt(struct acpi_table_desc *table_info)
/* Allocate an XSDT */
- new_table = ACPI_MEM_CALLOCATE(table_size);
+ new_table = ACPI_ALLOCATE_ZEROED(table_size);
if (!new_table) {
return (AE_NO_MEMORY);
}
@@ -147,17 +145,18 @@ acpi_status acpi_tb_convert_to_xsdt(struct acpi_table_desc *table_info)
/* Copy the table pointers */
for (i = 0; i < acpi_gbl_rsdt_table_count; i++) {
+
/* RSDT pointers are 32 bits, XSDT pointers are 64 bits */
if (acpi_gbl_root_table_type == ACPI_TABLE_TYPE_RSDT) {
ACPI_STORE_ADDRESS(new_table->table_offset_entry[i],
(ACPI_CAST_PTR
- (struct rsdt_descriptor_rev1,
+ (struct rsdt_descriptor,
table_info->pointer))->
table_offset_entry[i]);
} else {
new_table->table_offset_entry[i] =
- (ACPI_CAST_PTR(XSDT_DESCRIPTOR,
+ (ACPI_CAST_PTR(struct xsdt_descriptor,
table_info->pointer))->
table_offset_entry[i];
}
@@ -219,7 +218,7 @@ acpi_tb_init_generic_address(struct acpi_generic_address *new_gas_struct,
******************************************************************************/
static void
-acpi_tb_convert_fadt1(struct fadt_descriptor_rev2 *local_fadt,
+acpi_tb_convert_fadt1(struct fadt_descriptor *local_fadt,
struct fadt_descriptor_rev1 *original_fadt)
{
@@ -365,14 +364,13 @@ acpi_tb_convert_fadt1(struct fadt_descriptor_rev2 *local_fadt,
******************************************************************************/
static void
-acpi_tb_convert_fadt2(struct fadt_descriptor_rev2 *local_fadt,
- struct fadt_descriptor_rev2 *original_fadt)
+acpi_tb_convert_fadt2(struct fadt_descriptor *local_fadt,
+ struct fadt_descriptor *original_fadt)
{
/* We have an ACPI 2.0 FADT but we must copy it to our local buffer */
- ACPI_MEMCPY(local_fadt, original_fadt,
- sizeof(struct fadt_descriptor_rev2));
+ ACPI_MEMCPY(local_fadt, original_fadt, sizeof(struct fadt_descriptor));
/*
* "X" fields are optional extensions to the original V1.0 fields, so
@@ -491,10 +489,10 @@ acpi_tb_convert_fadt2(struct fadt_descriptor_rev2 *local_fadt,
acpi_status acpi_tb_convert_table_fadt(void)
{
- struct fadt_descriptor_rev2 *local_fadt;
+ struct fadt_descriptor *local_fadt;
struct acpi_table_desc *table_desc;
- ACPI_FUNCTION_TRACE("tb_convert_table_fadt");
+ ACPI_FUNCTION_TRACE(tb_convert_table_fadt);
/*
* acpi_gbl_FADT is valid. Validate the FADT length. The table must be
@@ -508,13 +506,14 @@ acpi_status acpi_tb_convert_table_fadt(void)
/* Allocate buffer for the ACPI 2.0(+) FADT */
- local_fadt = ACPI_MEM_CALLOCATE(sizeof(struct fadt_descriptor_rev2));
+ local_fadt = ACPI_ALLOCATE_ZEROED(sizeof(struct fadt_descriptor));
if (!local_fadt) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
if (acpi_gbl_FADT->revision >= FADT2_REVISION_ID) {
- if (acpi_gbl_FADT->length < sizeof(struct fadt_descriptor_rev2)) {
+ if (acpi_gbl_FADT->length < sizeof(struct fadt_descriptor)) {
+
/* Length is too short to be a V2.0 table */
ACPI_WARNING((AE_INFO,
@@ -538,11 +537,11 @@ acpi_status acpi_tb_convert_table_fadt(void)
/* Global FADT pointer will point to the new common V2.0 FADT */
acpi_gbl_FADT = local_fadt;
- acpi_gbl_FADT->length = sizeof(FADT_DESCRIPTOR);
+ acpi_gbl_FADT->length = sizeof(struct fadt_descriptor);
/* Free the original table */
- table_desc = acpi_gbl_table_lists[ACPI_TABLE_FADT].next;
+ table_desc = acpi_gbl_table_lists[ACPI_TABLE_ID_FADT].next;
acpi_tb_delete_single_table(table_desc);
/* Install the new table */
@@ -550,7 +549,7 @@ acpi_status acpi_tb_convert_table_fadt(void)
table_desc->pointer =
ACPI_CAST_PTR(struct acpi_table_header, acpi_gbl_FADT);
table_desc->allocation = ACPI_MEM_ALLOCATED;
- table_desc->length = sizeof(struct fadt_descriptor_rev2);
+ table_desc->length = sizeof(struct fadt_descriptor);
/* Dump the entire FADT */
@@ -580,7 +579,7 @@ acpi_status acpi_tb_convert_table_fadt(void)
acpi_status acpi_tb_build_common_facs(struct acpi_table_desc *table_info)
{
- ACPI_FUNCTION_TRACE("tb_build_common_facs");
+ ACPI_FUNCTION_TRACE(tb_build_common_facs);
/* Absolute minimum length is 24, but the ACPI spec says 64 */
@@ -603,6 +602,7 @@ acpi_status acpi_tb_build_common_facs(struct acpi_table_desc *table_info)
if ((acpi_gbl_RSDP->revision < 2) ||
(acpi_gbl_FACS->length < 32) ||
(!(acpi_gbl_FACS->xfirmware_waking_vector))) {
+
/* ACPI 1.0 FACS or short table or optional X_ field is zero */
acpi_gbl_common_fACS.firmware_waking_vector = ACPI_CAST_PTR(u64,
diff --git a/drivers/acpi/tables/tbget.c b/drivers/acpi/tables/tbget.c
index 09b4ee6dfd6..99eacceff56 100644
--- a/drivers/acpi/tables/tbget.c
+++ b/drivers/acpi/tables/tbget.c
@@ -78,7 +78,7 @@ acpi_tb_get_table(struct acpi_pointer *address,
acpi_status status;
struct acpi_table_header header;
- ACPI_FUNCTION_TRACE("tb_get_table");
+ ACPI_FUNCTION_TRACE(tb_get_table);
/* Get the header in order to get signature and table size */
@@ -124,7 +124,7 @@ acpi_tb_get_table_header(struct acpi_pointer *address,
acpi_status status = AE_OK;
struct acpi_table_header *header = NULL;
- ACPI_FUNCTION_TRACE("tb_get_table_header");
+ ACPI_FUNCTION_TRACE(tb_get_table_header);
/*
* Flags contains the current processor mode (Virtual or Physical
@@ -148,6 +148,10 @@ acpi_tb_get_table_header(struct acpi_pointer *address,
sizeof(struct acpi_table_header),
(void *)&header);
if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO,
+ "Could not map memory at %8.8X%8.8X for table header",
+ ACPI_FORMAT_UINT64(address->pointer.
+ physical)));
return_ACPI_STATUS(status);
}
@@ -198,7 +202,7 @@ acpi_tb_get_table_body(struct acpi_pointer *address,
{
acpi_status status;
- ACPI_FUNCTION_TRACE("tb_get_table_body");
+ ACPI_FUNCTION_TRACE(tb_get_table_body);
if (!table_info || !address) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -208,6 +212,7 @@ acpi_tb_get_table_body(struct acpi_pointer *address,
status = acpi_tb_table_override(header, table_info);
if (ACPI_SUCCESS(status)) {
+
/* Table was overridden by the host OS */
return_ACPI_STATUS(status);
@@ -241,7 +246,7 @@ acpi_tb_table_override(struct acpi_table_header *header,
acpi_status status;
struct acpi_pointer address;
- ACPI_FUNCTION_TRACE("tb_table_override");
+ ACPI_FUNCTION_TRACE(tb_table_override);
/*
* The OSL will examine the header and decide whether to override this
@@ -250,6 +255,7 @@ acpi_tb_table_override(struct acpi_table_header *header,
*/
status = acpi_os_table_override(header, &new_table);
if (ACPI_FAILURE(status)) {
+
/* Some severe error from the OSL, but we basically ignore it */
ACPI_EXCEPTION((AE_INFO, status,
@@ -258,6 +264,7 @@ acpi_tb_table_override(struct acpi_table_header *header,
}
if (!new_table) {
+
/* No table override */
return_ACPI_STATUS(AE_NO_ACPI_TABLES);
@@ -311,7 +318,7 @@ acpi_tb_get_this_table(struct acpi_pointer *address,
u8 allocation;
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("tb_get_this_table");
+ ACPI_FUNCTION_TRACE(tb_get_this_table);
/*
* Flags contains the current processor mode (Virtual or Physical
@@ -323,7 +330,7 @@ acpi_tb_get_this_table(struct acpi_pointer *address,
/* Pointer matches processor mode, copy the table to a new buffer */
- full_table = ACPI_MEM_ALLOCATE(header->length);
+ full_table = ACPI_ALLOCATE(header->length);
if (!full_table) {
ACPI_ERROR((AE_INFO,
"Could not allocate table memory for [%4.4s] length %X",
@@ -376,11 +383,12 @@ acpi_tb_get_this_table(struct acpi_pointer *address,
* Validate checksum for _most_ tables,
* even the ones whose signature we don't recognize
*/
- if (table_info->type != ACPI_TABLE_FACS) {
+ if (table_info->type != ACPI_TABLE_ID_FACS) {
status = acpi_tb_verify_table_checksum(full_table);
#if (!ACPI_CHECKSUM_ABORT)
if (ACPI_FAILURE(status)) {
+
/* Ignore the error if configuration says so */
status = AE_OK;
@@ -409,7 +417,7 @@ acpi_tb_get_this_table(struct acpi_pointer *address,
*
* PARAMETERS: table_type - one of the defined table types
* Instance - Which table of this type
- * table_ptr_loc - pointer to location to place the pointer for
+ * return_table - pointer to location to place the pointer for
* return
*
* RETURN: Status
@@ -420,57 +428,34 @@ acpi_tb_get_this_table(struct acpi_pointer *address,
acpi_status
acpi_tb_get_table_ptr(acpi_table_type table_type,
- u32 instance, struct acpi_table_header **table_ptr_loc)
+ u32 instance, struct acpi_table_header **return_table)
{
struct acpi_table_desc *table_desc;
u32 i;
- ACPI_FUNCTION_TRACE("tb_get_table_ptr");
-
- if (!acpi_gbl_DSDT) {
- return_ACPI_STATUS(AE_NO_ACPI_TABLES);
- }
+ ACPI_FUNCTION_TRACE(tb_get_table_ptr);
- if (table_type > ACPI_TABLE_MAX) {
+ if (table_type > ACPI_TABLE_ID_MAX) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- /*
- * For all table types (Single/Multiple), the first
- * instance is always in the list head.
- */
- if (instance == 1) {
- /* Get the first */
-
- *table_ptr_loc = NULL;
- if (acpi_gbl_table_lists[table_type].next) {
- *table_ptr_loc =
- acpi_gbl_table_lists[table_type].next->pointer;
- }
- return_ACPI_STATUS(AE_OK);
- }
-
- /* Check for instance out of range */
+ /* Check for instance out of range of the current table count */
if (instance > acpi_gbl_table_lists[table_type].count) {
return_ACPI_STATUS(AE_NOT_EXIST);
}
- /* Walk the list to get the desired table
- * Since the if (Instance == 1) check above checked for the
- * first table, setting table_desc equal to the .Next member
- * is actually pointing to the second table. Therefore, we
- * need to walk from the 2nd table until we reach the Instance
- * that the user is looking for and return its table pointer.
+ /*
+ * Walk the list to get the desired table
+ * Note: Instance is one-based
*/
table_desc = acpi_gbl_table_lists[table_type].next;
- for (i = 2; i < instance; i++) {
+ for (i = 1; i < instance; i++) {
table_desc = table_desc->next;
}
/* We are now pointing to the requested table's descriptor */
- *table_ptr_loc = table_desc->pointer;
-
+ *return_table = table_desc->pointer;
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/tables/tbgetall.c b/drivers/acpi/tables/tbgetall.c
index 134e5dce0bc..ad982112e4c 100644
--- a/drivers/acpi/tables/tbgetall.c
+++ b/drivers/acpi/tables/tbgetall.c
@@ -77,7 +77,7 @@ acpi_tb_get_primary_table(struct acpi_pointer *address,
acpi_status status;
struct acpi_table_header header;
- ACPI_FUNCTION_TRACE("tb_get_primary_table");
+ ACPI_FUNCTION_TRACE(tb_get_primary_table);
/* Ignore a NULL address in the RSDT */
@@ -140,7 +140,7 @@ acpi_tb_get_secondary_table(struct acpi_pointer *address,
acpi_status status;
struct acpi_table_header header;
- ACPI_FUNCTION_TRACE_STR("tb_get_secondary_table", signature);
+ ACPI_FUNCTION_TRACE_STR(tb_get_secondary_table, signature);
/* Get the header in order to match the signature */
@@ -151,7 +151,7 @@ acpi_tb_get_secondary_table(struct acpi_pointer *address,
/* Signature must match request */
- if (ACPI_STRNCMP(header.signature, signature, ACPI_NAME_SIZE)) {
+ if (!ACPI_COMPARE_NAME(header.signature, signature)) {
ACPI_ERROR((AE_INFO,
"Incorrect table signature - wanted [%s] found [%4.4s]",
signature, header.signature));
@@ -207,7 +207,7 @@ acpi_status acpi_tb_get_required_tables(void)
struct acpi_table_desc table_info;
struct acpi_pointer address;
- ACPI_FUNCTION_TRACE("tb_get_required_tables");
+ ACPI_FUNCTION_TRACE(tb_get_required_tables);
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "%d ACPI tables in RSDT\n",
acpi_gbl_rsdt_table_count));
@@ -223,6 +223,7 @@ acpi_status acpi_tb_get_required_tables(void)
* any SSDTs.
*/
for (i = 0; i < acpi_gbl_rsdt_table_count; i++) {
+
/* Get the table address from the common internal XSDT */
address.pointer.value = acpi_gbl_XSDT->table_offset_entry[i];
@@ -305,6 +306,6 @@ acpi_status acpi_tb_get_required_tables(void)
/* Always delete the RSDP mapping, we are done with it */
- acpi_tb_delete_tables_by_type(ACPI_TABLE_RSDP);
+ acpi_tb_delete_tables_by_type(ACPI_TABLE_ID_RSDP);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/tables/tbinstal.c b/drivers/acpi/tables/tbinstal.c
index 7ffd0fddb4e..7ca2df75bb1 100644
--- a/drivers/acpi/tables/tbinstal.c
+++ b/drivers/acpi/tables/tbinstal.c
@@ -73,17 +73,18 @@ acpi_tb_match_signature(char *signature,
{
acpi_native_uint i;
- ACPI_FUNCTION_TRACE("tb_match_signature");
+ ACPI_FUNCTION_TRACE(tb_match_signature);
/* Search for a signature match among the known table types */
- for (i = 0; i < NUM_ACPI_TABLE_TYPES; i++) {
+ for (i = 0; i < (ACPI_TABLE_ID_MAX + 1); i++) {
if (!(acpi_gbl_table_data[i].flags & search_type)) {
continue;
}
if (!ACPI_STRNCMP(signature, acpi_gbl_table_data[i].signature,
acpi_gbl_table_data[i].sig_length)) {
+
/* Found a signature match, return index if requested */
if (table_info) {
@@ -122,7 +123,7 @@ acpi_status acpi_tb_install_table(struct acpi_table_desc *table_info)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("tb_install_table");
+ ACPI_FUNCTION_TRACE(tb_install_table);
/* Lock tables while installing */
@@ -187,7 +188,7 @@ acpi_tb_recognize_table(struct acpi_table_desc *table_info, u8 search_type)
struct acpi_table_header *table_header;
acpi_status status;
- ACPI_FUNCTION_TRACE("tb_recognize_table");
+ ACPI_FUNCTION_TRACE(tb_recognize_table);
/* Ensure that we have a valid table pointer */
@@ -218,7 +219,6 @@ acpi_tb_recognize_table(struct acpi_table_desc *table_info, u8 search_type)
/* Return the table type and length via the info struct */
table_info->length = (acpi_size) table_header->length;
-
return_ACPI_STATUS(status);
}
@@ -243,11 +243,11 @@ acpi_tb_init_table_descriptor(acpi_table_type table_type,
struct acpi_table_desc *table_desc;
acpi_status status;
- ACPI_FUNCTION_TRACE_U32("tb_init_table_descriptor", table_type);
+ ACPI_FUNCTION_TRACE_U32(tb_init_table_descriptor, table_type);
/* Allocate a descriptor for this table */
- table_desc = ACPI_MEM_CALLOCATE(sizeof(struct acpi_table_desc));
+ table_desc = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_table_desc));
if (!table_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -274,7 +274,7 @@ acpi_tb_init_table_descriptor(acpi_table_type table_type,
* at this location, so return an error.
*/
if (list_head->next) {
- ACPI_MEM_FREE(table_desc);
+ ACPI_FREE(table_desc);
return_ACPI_STATUS(AE_ALREADY_EXISTS);
}
@@ -312,15 +312,14 @@ acpi_tb_init_table_descriptor(acpi_table_type table_type,
/* Finish initialization of the table descriptor */
+ table_desc->loaded_into_namespace = FALSE;
table_desc->type = (u8) table_type;
table_desc->pointer = table_info->pointer;
table_desc->length = table_info->length;
table_desc->allocation = table_info->allocation;
table_desc->aml_start = (u8 *) (table_desc->pointer + 1),
- table_desc->aml_length = (u32) (table_desc->length -
- (u32) sizeof(struct
- acpi_table_header));
- table_desc->loaded_into_namespace = FALSE;
+ table_desc->aml_length = (u32)
+ (table_desc->length - (u32) sizeof(struct acpi_table_header));
/*
* Set the appropriate global pointer (if there is one) to point to the
@@ -335,7 +334,6 @@ acpi_tb_init_table_descriptor(acpi_table_type table_type,
table_info->owner_id = table_desc->owner_id;
table_info->installed_desc = table_desc;
-
return_ACPI_STATUS(AE_OK);
}
@@ -359,7 +357,7 @@ void acpi_tb_delete_all_tables(void)
* Free memory allocated for ACPI tables
* Memory can either be mapped or allocated
*/
- for (type = 0; type < NUM_ACPI_TABLE_TYPES; type++) {
+ for (type = 0; type < (ACPI_TABLE_ID_MAX + 1); type++) {
acpi_tb_delete_tables_by_type(type);
}
}
@@ -383,9 +381,9 @@ void acpi_tb_delete_tables_by_type(acpi_table_type type)
u32 count;
u32 i;
- ACPI_FUNCTION_TRACE_U32("tb_delete_tables_by_type", type);
+ ACPI_FUNCTION_TRACE_U32(tb_delete_tables_by_type, type);
- if (type > ACPI_TABLE_MAX) {
+ if (type > ACPI_TABLE_ID_MAX) {
return_VOID;
}
@@ -396,28 +394,28 @@ void acpi_tb_delete_tables_by_type(acpi_table_type type)
/* Clear the appropriate "typed" global table pointer */
switch (type) {
- case ACPI_TABLE_RSDP:
+ case ACPI_TABLE_ID_RSDP:
acpi_gbl_RSDP = NULL;
break;
- case ACPI_TABLE_DSDT:
+ case ACPI_TABLE_ID_DSDT:
acpi_gbl_DSDT = NULL;
break;
- case ACPI_TABLE_FADT:
+ case ACPI_TABLE_ID_FADT:
acpi_gbl_FADT = NULL;
break;
- case ACPI_TABLE_FACS:
+ case ACPI_TABLE_ID_FACS:
acpi_gbl_FACS = NULL;
break;
- case ACPI_TABLE_XSDT:
+ case ACPI_TABLE_ID_XSDT:
acpi_gbl_XSDT = NULL;
break;
- case ACPI_TABLE_SSDT:
- case ACPI_TABLE_PSDT:
+ case ACPI_TABLE_ID_SSDT:
+ case ACPI_TABLE_ID_PSDT:
default:
break;
}
@@ -471,7 +469,7 @@ void acpi_tb_delete_single_table(struct acpi_table_desc *table_desc)
case ACPI_MEM_ALLOCATED:
- ACPI_MEM_FREE(table_desc->pointer);
+ ACPI_FREE(table_desc->pointer);
break;
case ACPI_MEM_MAPPED:
@@ -503,7 +501,7 @@ struct acpi_table_desc *acpi_tb_uninstall_table(struct acpi_table_desc
{
struct acpi_table_desc *next_desc;
- ACPI_FUNCTION_TRACE_PTR("tb_uninstall_table", table_desc);
+ ACPI_FUNCTION_TRACE_PTR(tb_uninstall_table, table_desc);
if (!table_desc) {
return_PTR(NULL);
@@ -530,7 +528,7 @@ struct acpi_table_desc *acpi_tb_uninstall_table(struct acpi_table_desc
/* Free the table descriptor */
next_desc = table_desc->next;
- ACPI_MEM_FREE(table_desc);
+ ACPI_FREE(table_desc);
/* Return pointer to the next descriptor */
diff --git a/drivers/acpi/tables/tbrsdt.c b/drivers/acpi/tables/tbrsdt.c
index 4d308220225..abcb08c2592 100644
--- a/drivers/acpi/tables/tbrsdt.c
+++ b/drivers/acpi/tables/tbrsdt.c
@@ -64,7 +64,7 @@ acpi_status acpi_tb_verify_rsdp(struct acpi_pointer *address)
acpi_status status;
struct rsdp_descriptor *rsdp;
- ACPI_FUNCTION_TRACE("tb_verify_rsdp");
+ ACPI_FUNCTION_TRACE(tb_verify_rsdp);
switch (address->pointer_type) {
case ACPI_LOGICAL_POINTER:
@@ -78,7 +78,7 @@ acpi_status acpi_tb_verify_rsdp(struct acpi_pointer *address)
*/
status = acpi_os_map_memory(address->pointer.physical,
sizeof(struct rsdp_descriptor),
- (void *)&rsdp);
+ ACPI_CAST_PTR(void, &rsdp));
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
@@ -95,15 +95,20 @@ acpi_status acpi_tb_verify_rsdp(struct acpi_pointer *address)
goto cleanup;
}
- /* The RSDP supplied is OK */
+ /* RSDP is ok. Init the table info */
table_info.pointer = ACPI_CAST_PTR(struct acpi_table_header, rsdp);
table_info.length = sizeof(struct rsdp_descriptor);
- table_info.allocation = ACPI_MEM_MAPPED;
+
+ if (address->pointer_type == ACPI_PHYSICAL_POINTER) {
+ table_info.allocation = ACPI_MEM_MAPPED;
+ } else {
+ table_info.allocation = ACPI_MEM_NOT_ALLOCATED;
+ }
/* Save the table pointers and allocation info */
- status = acpi_tb_init_table_descriptor(ACPI_TABLE_RSDP, &table_info);
+ status = acpi_tb_init_table_descriptor(ACPI_TABLE_ID_RSDP, &table_info);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
@@ -174,22 +179,20 @@ void acpi_tb_get_rsdt_address(struct acpi_pointer *out_address)
acpi_status acpi_tb_validate_rsdt(struct acpi_table_header *table_ptr)
{
- int no_match;
+ char *signature;
ACPI_FUNCTION_ENTRY();
- /*
- * Search for appropriate signature, RSDT or XSDT
- */
+ /* Search for appropriate signature, RSDT or XSDT */
+
if (acpi_gbl_root_table_type == ACPI_TABLE_TYPE_RSDT) {
- no_match = ACPI_STRNCMP((char *)table_ptr, RSDT_SIG,
- sizeof(RSDT_SIG) - 1);
+ signature = RSDT_SIG;
} else {
- no_match = ACPI_STRNCMP((char *)table_ptr, XSDT_SIG,
- sizeof(XSDT_SIG) - 1);
+ signature = XSDT_SIG;
}
- if (no_match) {
+ if (!ACPI_COMPARE_NAME(table_ptr->signature, signature)) {
+
/* Invalid RSDT or XSDT signature */
ACPI_ERROR((AE_INFO,
@@ -198,10 +201,8 @@ acpi_status acpi_tb_validate_rsdt(struct acpi_table_header *table_ptr)
ACPI_DUMP_BUFFER(acpi_gbl_RSDP, 20);
ACPI_ERROR((AE_INFO,
- "RSDT/XSDT signature at %X (%p) is invalid",
- acpi_gbl_RSDP->rsdt_physical_address,
- (void *)(acpi_native_uint) acpi_gbl_RSDP->
- rsdt_physical_address));
+ "RSDT/XSDT signature at %X is invalid",
+ acpi_gbl_RSDP->rsdt_physical_address));
if (acpi_gbl_root_table_type == ACPI_TABLE_TYPE_RSDT) {
ACPI_ERROR((AE_INFO, "Looking for RSDT"));
@@ -234,13 +235,13 @@ acpi_status acpi_tb_get_table_rsdt(void)
acpi_status status;
struct acpi_pointer address;
- ACPI_FUNCTION_TRACE("tb_get_table_rsdt");
+ ACPI_FUNCTION_TRACE(tb_get_table_rsdt);
/* Get the RSDT/XSDT via the RSDP */
acpi_tb_get_rsdt_address(&address);
- table_info.type = ACPI_TABLE_XSDT;
+ table_info.type = ACPI_TABLE_ID_XSDT;
status = acpi_tb_get_table(&address, &table_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
@@ -274,12 +275,13 @@ acpi_status acpi_tb_get_table_rsdt(void)
/* Save the table pointers and allocation info */
- status = acpi_tb_init_table_descriptor(ACPI_TABLE_XSDT, &table_info);
+ status = acpi_tb_init_table_descriptor(ACPI_TABLE_ID_XSDT, &table_info);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
- acpi_gbl_XSDT = ACPI_CAST_PTR(XSDT_DESCRIPTOR, table_info.pointer);
+ acpi_gbl_XSDT =
+ ACPI_CAST_PTR(struct xsdt_descriptor, table_info.pointer);
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "XSDT located at %p\n", acpi_gbl_XSDT));
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/tables/tbutils.c b/drivers/acpi/tables/tbutils.c
index bc571592f08..209a401801e 100644
--- a/drivers/acpi/tables/tbutils.c
+++ b/drivers/acpi/tables/tbutils.c
@@ -71,7 +71,7 @@ acpi_status acpi_tb_is_table_installed(struct acpi_table_desc *new_table_desc)
{
struct acpi_table_desc *table_desc;
- ACPI_FUNCTION_TRACE("tb_is_table_installed");
+ ACPI_FUNCTION_TRACE(tb_is_table_installed);
/* Get the list descriptor and first table descriptor */
@@ -96,10 +96,11 @@ acpi_status acpi_tb_is_table_installed(struct acpi_table_desc *new_table_desc)
(!ACPI_MEMCMP
(table_desc->pointer, new_table_desc->pointer,
new_table_desc->pointer->length))) {
+
/* Match: this table is already installed */
ACPI_DEBUG_PRINT((ACPI_DB_TABLES,
- "Table [%4.4s] already installed: Rev %X oem_table_id [%8.8s]\n",
+ "Table [%4.4s] already installed: Rev %X OemTableId [%8.8s]\n",
new_table_desc->pointer->signature,
new_table_desc->pointer->revision,
new_table_desc->pointer->
@@ -159,12 +160,8 @@ acpi_tb_validate_table_header(struct acpi_table_header *table_header)
ACPI_MOVE_32_TO_32(&signature, table_header->signature);
if (!acpi_ut_valid_acpi_name(signature)) {
- ACPI_ERROR((AE_INFO,
- "Table signature at %p [%p] has invalid characters",
- table_header, &signature));
-
- ACPI_WARNING((AE_INFO, "Invalid table signature found: [%4.4s]",
- ACPI_CAST_PTR(char, &signature)));
+ ACPI_ERROR((AE_INFO, "Invalid table signature 0x%8.8X",
+ signature));
ACPI_DUMP_BUFFER(table_header,
sizeof(struct acpi_table_header));
@@ -175,12 +172,9 @@ acpi_tb_validate_table_header(struct acpi_table_header *table_header)
if (table_header->length < sizeof(struct acpi_table_header)) {
ACPI_ERROR((AE_INFO,
- "Invalid length in table header %p name %4.4s",
- table_header, (char *)&signature));
-
- ACPI_WARNING((AE_INFO,
- "Invalid table header length (0x%X) found",
- (u32) table_header->length));
+ "Invalid length 0x%X in table with signature %4.4s",
+ (u32) table_header->length,
+ ACPI_CAST_PTR(char, &signature)));
ACPI_DUMP_BUFFER(table_header,
sizeof(struct acpi_table_header));
@@ -192,72 +186,119 @@ acpi_tb_validate_table_header(struct acpi_table_header *table_header)
/*******************************************************************************
*
- * FUNCTION: acpi_tb_verify_table_checksum
+ * FUNCTION: acpi_tb_sum_table
*
- * PARAMETERS: *table_header - ACPI table to verify
+ * PARAMETERS: Buffer - Buffer to sum
+ * Length - Size of the buffer
*
- * RETURN: 8 bit checksum of table
+ * RETURN: 8 bit sum of buffer
*
- * DESCRIPTION: Does an 8 bit checksum of table and returns status. A correct
- * table should have a checksum of 0.
+ * DESCRIPTION: Computes an 8 bit sum of the buffer(length) and returns it.
*
******************************************************************************/
-acpi_status
-acpi_tb_verify_table_checksum(struct acpi_table_header * table_header)
+u8 acpi_tb_sum_table(void *buffer, u32 length)
+{
+ acpi_native_uint i;
+ u8 sum = 0;
+
+ if (!buffer || !length) {
+ return (0);
+ }
+
+ for (i = 0; i < length; i++) {
+ sum = (u8) (sum + ((u8 *) buffer)[i]);
+ }
+ return (sum);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_generate_checksum
+ *
+ * PARAMETERS: Table - Pointer to a valid ACPI table (with a
+ * standard ACPI header)
+ *
+ * RETURN: 8 bit checksum of buffer
+ *
+ * DESCRIPTION: Computes an 8 bit checksum of the table.
+ *
+ ******************************************************************************/
+
+u8 acpi_tb_generate_checksum(struct acpi_table_header * table)
{
u8 checksum;
- acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("tb_verify_table_checksum");
+ /* Sum the entire table as-is */
- /* Compute the checksum on the table */
+ checksum = acpi_tb_sum_table(table, table->length);
- checksum =
- acpi_tb_generate_checksum(table_header, table_header->length);
+ /* Subtract off the existing checksum value in the table */
- /* Return the appropriate exception */
+ checksum = (u8) (checksum - table->checksum);
- if (checksum) {
- ACPI_WARNING((AE_INFO,
- "Invalid checksum in table [%4.4s] (%02X, sum %02X is not zero)",
- table_header->signature,
- (u32) table_header->checksum, (u32) checksum));
+ /* Compute the final checksum */
- status = AE_BAD_CHECKSUM;
- }
- return_ACPI_STATUS(status);
+ checksum = (u8) (0 - checksum);
+ return (checksum);
}
/*******************************************************************************
*
- * FUNCTION: acpi_tb_generate_checksum
+ * FUNCTION: acpi_tb_set_checksum
*
- * PARAMETERS: Buffer - Buffer to checksum
- * Length - Size of the buffer
+ * PARAMETERS: Table - Pointer to a valid ACPI table (with a
+ * standard ACPI header)
*
- * RETURN: 8 bit checksum of buffer
+ * RETURN: None. Sets the table checksum field
*
- * DESCRIPTION: Computes an 8 bit checksum of the buffer(length) and returns it.
+ * DESCRIPTION: Computes an 8 bit checksum of the table and inserts the
+ * checksum into the table header.
*
******************************************************************************/
-u8 acpi_tb_generate_checksum(void *buffer, u32 length)
+void acpi_tb_set_checksum(struct acpi_table_header *table)
{
- u8 *end_buffer;
- u8 *rover;
- u8 sum = 0;
- if (buffer && length) {
- /* Buffer and Length are valid */
+ table->checksum = acpi_tb_generate_checksum(table);
+}
- end_buffer = ACPI_ADD_PTR(u8, buffer, length);
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_verify_table_checksum
+ *
+ * PARAMETERS: *table_header - ACPI table to verify
+ *
+ * RETURN: 8 bit checksum of table
+ *
+ * DESCRIPTION: Generates an 8 bit checksum of table and returns and compares
+ * it to the existing checksum value.
+ *
+ ******************************************************************************/
- for (rover = buffer; rover < end_buffer; rover++) {
- sum = (u8) (sum + *rover);
- }
+acpi_status
+acpi_tb_verify_table_checksum(struct acpi_table_header *table_header)
+{
+ u8 checksum;
+
+ ACPI_FUNCTION_TRACE(tb_verify_table_checksum);
+
+ /* Compute the checksum on the table */
+
+ checksum = acpi_tb_generate_checksum(table_header);
+
+ /* Checksum ok? */
+
+ if (checksum == table_header->checksum) {
+ return_ACPI_STATUS(AE_OK);
}
- return (sum);
+
+ ACPI_WARNING((AE_INFO,
+ "Incorrect checksum in table [%4.4s] - is %2.2X, should be %2.2X",
+ table_header->signature, table_header->checksum,
+ checksum));
+
+ return_ACPI_STATUS(AE_BAD_CHECKSUM);
}
#ifdef ACPI_OBSOLETE_FUNCTIONS
@@ -276,12 +317,12 @@ u8 acpi_tb_generate_checksum(void *buffer, u32 length)
acpi_status
acpi_tb_handle_to_object(u16 table_id,
- struct acpi_table_desc ** return_table_desc)
+ struct acpi_table_desc **return_table_desc)
{
u32 i;
struct acpi_table_desc *table_desc;
- ACPI_FUNCTION_NAME("tb_handle_to_object");
+ ACPI_FUNCTION_NAME(tb_handle_to_object);
for (i = 0; i < ACPI_TABLE_MAX; i++) {
table_desc = acpi_gbl_table_lists[i].next;
@@ -295,7 +336,7 @@ acpi_tb_handle_to_object(u16 table_id,
}
}
- ACPI_ERROR((AE_INFO, "table_id=%X does not exist", table_id));
+ ACPI_ERROR((AE_INFO, "TableId=%X does not exist", table_id));
return (AE_BAD_PARAMETER);
}
#endif
diff --git a/drivers/acpi/tables/tbxface.c b/drivers/acpi/tables/tbxface.c
index 9fe53c9d5b9..4e91f298481 100644
--- a/drivers/acpi/tables/tbxface.c
+++ b/drivers/acpi/tables/tbxface.c
@@ -42,8 +42,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/module.h>
-
#include <acpi/acpi.h>
#include <acpi/acnamesp.h>
#include <acpi/actables.h>
@@ -68,7 +66,7 @@ acpi_status acpi_load_tables(void)
struct acpi_pointer rsdp_address;
acpi_status status;
- ACPI_FUNCTION_TRACE("acpi_load_tables");
+ ACPI_FUNCTION_TRACE(acpi_load_tables);
/* Get the RSDP */
@@ -123,6 +121,8 @@ acpi_status acpi_load_tables(void)
return_ACPI_STATUS(status);
}
+ACPI_EXPORT_SYMBOL(acpi_load_tables)
+
#ifdef ACPI_FUTURE_USAGE
/*******************************************************************************
*
@@ -139,14 +139,13 @@ acpi_status acpi_load_tables(void)
* is determined that the table is invalid, the call will fail.
*
******************************************************************************/
-
acpi_status acpi_load_table(struct acpi_table_header *table_ptr)
{
acpi_status status;
struct acpi_table_desc table_info;
struct acpi_pointer address;
- ACPI_FUNCTION_TRACE("acpi_load_table");
+ ACPI_FUNCTION_TRACE(acpi_load_table);
if (!table_ptr) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -174,6 +173,7 @@ acpi_status acpi_load_table(struct acpi_table_header *table_ptr)
status = acpi_tb_install_table(&table_info);
if (ACPI_FAILURE(status)) {
if (status == AE_ALREADY_EXISTS) {
+
/* Table already exists, no error */
status = AE_OK;
@@ -188,12 +188,12 @@ acpi_status acpi_load_table(struct acpi_table_header *table_ptr)
/* Convert the table to common format if necessary */
switch (table_info.type) {
- case ACPI_TABLE_FADT:
+ case ACPI_TABLE_ID_FADT:
status = acpi_tb_convert_table_fadt();
break;
- case ACPI_TABLE_FACS:
+ case ACPI_TABLE_ID_FACS:
status = acpi_tb_build_common_facs(&table_info);
break;
@@ -208,6 +208,7 @@ acpi_status acpi_load_table(struct acpi_table_header *table_ptr)
}
if (ACPI_FAILURE(status)) {
+
/* Uninstall table and free the buffer */
(void)acpi_tb_uninstall_table(table_info.installed_desc);
@@ -216,6 +217,8 @@ acpi_status acpi_load_table(struct acpi_table_header *table_ptr)
return_ACPI_STATUS(status);
}
+ACPI_EXPORT_SYMBOL(acpi_load_table)
+
/*******************************************************************************
*
* FUNCTION: acpi_unload_table
@@ -227,16 +230,15 @@ acpi_status acpi_load_table(struct acpi_table_header *table_ptr)
* DESCRIPTION: This routine is used to force the unload of a table
*
******************************************************************************/
-
acpi_status acpi_unload_table(acpi_table_type table_type)
{
struct acpi_table_desc *table_desc;
- ACPI_FUNCTION_TRACE("acpi_unload_table");
+ ACPI_FUNCTION_TRACE(acpi_unload_table);
/* Parameter validation */
- if (table_type > ACPI_TABLE_MAX) {
+ if (table_type > ACPI_TABLE_ID_MAX) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
@@ -261,6 +263,8 @@ acpi_status acpi_unload_table(acpi_table_type table_type)
return_ACPI_STATUS(AE_OK);
}
+ACPI_EXPORT_SYMBOL(acpi_unload_table)
+
/*******************************************************************************
*
* FUNCTION: acpi_get_table_header
@@ -281,7 +285,6 @@ acpi_status acpi_unload_table(acpi_table_type table_type)
* have a standard header and is fixed length.
*
******************************************************************************/
-
acpi_status
acpi_get_table_header(acpi_table_type table_type,
u32 instance, struct acpi_table_header *out_table_header)
@@ -289,16 +292,16 @@ acpi_get_table_header(acpi_table_type table_type,
struct acpi_table_header *tbl_ptr;
acpi_status status;
- ACPI_FUNCTION_TRACE("acpi_get_table_header");
+ ACPI_FUNCTION_TRACE(acpi_get_table_header);
if ((instance == 0) ||
- (table_type == ACPI_TABLE_RSDP) || (!out_table_header)) {
+ (table_type == ACPI_TABLE_ID_RSDP) || (!out_table_header)) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
/* Check the table type and instance */
- if ((table_type > ACPI_TABLE_MAX) ||
+ if ((table_type > ACPI_TABLE_ID_MAX) ||
(ACPI_IS_SINGLE_TABLE(acpi_gbl_table_data[table_type].flags) &&
instance > 1)) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -325,6 +328,7 @@ acpi_get_table_header(acpi_table_type table_type,
return_ACPI_STATUS(status);
}
+ACPI_EXPORT_SYMBOL(acpi_get_table_header)
#endif /* ACPI_FUTURE_USAGE */
/*******************************************************************************
@@ -349,7 +353,6 @@ acpi_get_table_header(acpi_table_type table_type,
* a complete table including the header.
*
******************************************************************************/
-
acpi_status
acpi_get_table(acpi_table_type table_type,
u32 instance, struct acpi_buffer *ret_buffer)
@@ -358,7 +361,7 @@ acpi_get_table(acpi_table_type table_type,
acpi_status status;
acpi_size table_length;
- ACPI_FUNCTION_TRACE("acpi_get_table");
+ ACPI_FUNCTION_TRACE(acpi_get_table);
/* Parameter validation */
@@ -373,7 +376,7 @@ acpi_get_table(acpi_table_type table_type,
/* Check the table type and instance */
- if ((table_type > ACPI_TABLE_MAX) ||
+ if ((table_type > ACPI_TABLE_ID_MAX) ||
(ACPI_IS_SINGLE_TABLE(acpi_gbl_table_data[table_type].flags) &&
instance > 1)) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -396,7 +399,8 @@ acpi_get_table(acpi_table_type table_type,
/* Get the table length */
- if (table_type == ACPI_TABLE_RSDP) {
+ if (table_type == ACPI_TABLE_ID_RSDP) {
+
/* RSD PTR is the only "table" without a header */
table_length = sizeof(struct rsdp_descriptor);
@@ -417,4 +421,4 @@ acpi_get_table(acpi_table_type table_type,
return_ACPI_STATUS(AE_OK);
}
-EXPORT_SYMBOL(acpi_get_table);
+ACPI_EXPORT_SYMBOL(acpi_get_table)
diff --git a/drivers/acpi/tables/tbxfroot.c b/drivers/acpi/tables/tbxfroot.c
index a62db6af83c..da2648bbdbc 100644
--- a/drivers/acpi/tables/tbxfroot.c
+++ b/drivers/acpi/tables/tbxfroot.c
@@ -41,8 +41,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/module.h>
-
#include <acpi/acpi.h>
#include <acpi/actables.h>
@@ -75,6 +73,7 @@ acpi_status acpi_tb_validate_rsdp(struct rsdp_descriptor *rsdp)
* The signature and checksum must both be correct
*/
if (ACPI_STRNCMP((char *)rsdp, RSDP_SIG, sizeof(RSDP_SIG) - 1) != 0) {
+
/* Nope, BAD Signature */
return (AE_BAD_SIGNATURE);
@@ -82,15 +81,14 @@ acpi_status acpi_tb_validate_rsdp(struct rsdp_descriptor *rsdp)
/* Check the standard checksum */
- if (acpi_tb_generate_checksum(rsdp, ACPI_RSDP_CHECKSUM_LENGTH) != 0) {
+ if (acpi_tb_sum_table(rsdp, ACPI_RSDP_CHECKSUM_LENGTH) != 0) {
return (AE_BAD_CHECKSUM);
}
/* Check extended checksum if table version >= 2 */
if ((rsdp->revision >= 2) &&
- (acpi_tb_generate_checksum(rsdp, ACPI_RSDP_XCHECKSUM_LENGTH) !=
- 0)) {
+ (acpi_tb_sum_table(rsdp, ACPI_RSDP_XCHECKSUM_LENGTH) != 0)) {
return (AE_BAD_CHECKSUM);
}
@@ -121,7 +119,7 @@ acpi_tb_find_table(char *signature,
acpi_status status;
struct acpi_table_header *table;
- ACPI_FUNCTION_TRACE("tb_find_table");
+ ACPI_FUNCTION_TRACE(tb_find_table);
/* Validate string lengths */
@@ -131,7 +129,7 @@ acpi_tb_find_table(char *signature,
return_ACPI_STATUS(AE_AML_STRING_LIMIT);
}
- if (!ACPI_STRNCMP(signature, DSDT_SIG, ACPI_NAME_SIZE)) {
+ if (ACPI_COMPARE_NAME(signature, DSDT_SIG)) {
/*
* The DSDT pointer is contained in the FADT, not the RSDT.
* This code should suffice, because the only code that would perform
@@ -156,10 +154,12 @@ acpi_tb_find_table(char *signature,
/* Check oem_id and oem_table_id */
- if ((oem_id[0] && ACPI_STRNCMP(oem_id, table->oem_id,
- sizeof(table->oem_id))) ||
- (oem_table_id[0] && ACPI_STRNCMP(oem_table_id, table->oem_table_id,
- sizeof(table->oem_table_id)))) {
+ if ((oem_id[0] &&
+ ACPI_STRNCMP(oem_id, table->oem_id,
+ sizeof(table->oem_id))) ||
+ (oem_table_id[0] &&
+ ACPI_STRNCMP(oem_table_id, table->oem_table_id,
+ sizeof(table->oem_table_id)))) {
return_ACPI_STATUS(AE_AML_NAME_NOT_FOUND);
}
@@ -203,7 +203,7 @@ acpi_get_firmware_table(acpi_string signature,
u32 i;
u32 j;
- ACPI_FUNCTION_TRACE("acpi_get_firmware_table");
+ ACPI_FUNCTION_TRACE(acpi_get_firmware_table);
/*
* Ensure that at least the table manager is initialized. We don't
@@ -217,6 +217,7 @@ acpi_get_firmware_table(acpi_string signature,
/* Ensure that we have a RSDP */
if (!acpi_gbl_RSDP) {
+
/* Get the RSDP */
status = acpi_os_get_root_pointer(flags, &address);
@@ -261,7 +262,7 @@ acpi_get_firmware_table(acpi_string signature,
/* Get and validate the RSDT */
- rsdt_info = ACPI_MEM_CALLOCATE(sizeof(struct acpi_table_desc));
+ rsdt_info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_table_desc));
if (!rsdt_info) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -278,13 +279,13 @@ acpi_get_firmware_table(acpi_string signature,
/* Allocate a scratch table header and table descriptor */
- header = ACPI_MEM_ALLOCATE(sizeof(struct acpi_table_header));
+ header = ACPI_ALLOCATE(sizeof(struct acpi_table_header));
if (!header) {
status = AE_NO_MEMORY;
goto cleanup;
}
- table_info = ACPI_MEM_ALLOCATE(sizeof(struct acpi_table_desc));
+ table_info = ACPI_ALLOCATE(sizeof(struct acpi_table_desc));
if (!table_info) {
status = AE_NO_MEMORY;
goto cleanup;
@@ -308,12 +309,12 @@ acpi_get_firmware_table(acpi_string signature,
if (acpi_gbl_root_table_type == ACPI_TABLE_TYPE_RSDT) {
address.pointer.value =
(ACPI_CAST_PTR
- (RSDT_DESCRIPTOR,
+ (struct rsdt_descriptor,
rsdt_info->pointer))->table_offset_entry[i];
} else {
address.pointer.value =
(ACPI_CAST_PTR
- (XSDT_DESCRIPTOR,
+ (struct xsdt_descriptor,
rsdt_info->pointer))->table_offset_entry[i];
}
@@ -326,11 +327,13 @@ acpi_get_firmware_table(acpi_string signature,
/* Compare table signatures and table instance */
- if (!ACPI_STRNCMP(header->signature, signature, ACPI_NAME_SIZE)) {
+ if (ACPI_COMPARE_NAME(header->signature, signature)) {
+
/* An instance of the table was found */
j++;
if (j >= instance) {
+
/* Found the correct instance, get the entire table */
status =
@@ -355,23 +358,21 @@ acpi_get_firmware_table(acpi_string signature,
acpi_os_unmap_memory(rsdt_info->pointer,
(acpi_size) rsdt_info->pointer->length);
}
- ACPI_MEM_FREE(rsdt_info);
+ ACPI_FREE(rsdt_info);
if (header) {
- ACPI_MEM_FREE(header);
+ ACPI_FREE(header);
}
if (table_info) {
- ACPI_MEM_FREE(table_info);
+ ACPI_FREE(table_info);
}
return_ACPI_STATUS(status);
}
-EXPORT_SYMBOL(acpi_get_firmware_table);
+ACPI_EXPORT_SYMBOL(acpi_get_firmware_table)
/* TBD: Move to a new file */
-
#if ACPI_MACHINE_WIDTH != 16
-
/*******************************************************************************
*
* FUNCTION: acpi_find_root_pointer
@@ -384,13 +385,12 @@ EXPORT_SYMBOL(acpi_get_firmware_table);
* DESCRIPTION: Find the RSDP
*
******************************************************************************/
-
acpi_status acpi_find_root_pointer(u32 flags, struct acpi_pointer *rsdp_address)
{
struct acpi_table_desc table_info;
acpi_status status;
- ACPI_FUNCTION_TRACE("acpi_find_root_pointer");
+ ACPI_FUNCTION_TRACE(acpi_find_root_pointer);
/* Get the RSDP */
@@ -407,6 +407,8 @@ acpi_status acpi_find_root_pointer(u32 flags, struct acpi_pointer *rsdp_address)
return_ACPI_STATUS(AE_OK);
}
+ACPI_EXPORT_SYMBOL(acpi_find_root_pointer)
+
/*******************************************************************************
*
* FUNCTION: acpi_tb_scan_memory_for_rsdp
@@ -419,14 +421,13 @@ acpi_status acpi_find_root_pointer(u32 flags, struct acpi_pointer *rsdp_address)
* DESCRIPTION: Search a block of memory for the RSDP signature
*
******************************************************************************/
-
static u8 *acpi_tb_scan_memory_for_rsdp(u8 * start_address, u32 length)
{
acpi_status status;
u8 *mem_rover;
u8 *end_address;
- ACPI_FUNCTION_TRACE("tb_scan_memory_for_rsdp");
+ ACPI_FUNCTION_TRACE(tb_scan_memory_for_rsdp);
end_address = start_address + length;
@@ -434,12 +435,14 @@ static u8 *acpi_tb_scan_memory_for_rsdp(u8 * start_address, u32 length)
for (mem_rover = start_address; mem_rover < end_address;
mem_rover += ACPI_RSDP_SCAN_STEP) {
+
/* The RSDP signature and checksum must both be correct */
status =
acpi_tb_validate_rsdp(ACPI_CAST_PTR
(struct rsdp_descriptor, mem_rover));
if (ACPI_SUCCESS(status)) {
+
/* Sig and checksum valid, we have found a real RSDP */
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
@@ -469,10 +472,10 @@ static u8 *acpi_tb_scan_memory_for_rsdp(u8 * start_address, u32 length)
*
* RETURN: Status, RSDP physical address
*
- * DESCRIPTION: search lower 1_mbyte of memory for the root system descriptor
+ * DESCRIPTION: Search lower 1_mbyte of memory for the root system descriptor
* pointer structure. If it is found, set *RSDP to point to it.
*
- * NOTE1: The RSDp must be either in the first 1_k of the Extended
+ * NOTE1: The RSDP must be either in the first 1_k of the Extended
* BIOS Data Area or between E0000 and FFFFF (From ACPI Spec.)
* Only a 32-bit physical address is necessary.
*
@@ -489,12 +492,13 @@ acpi_tb_find_rsdp(struct acpi_table_desc *table_info, u32 flags)
u32 physical_address;
acpi_status status;
- ACPI_FUNCTION_TRACE("tb_find_rsdp");
+ ACPI_FUNCTION_TRACE(tb_find_rsdp);
/*
* Scan supports either logical addressing or physical addressing
*/
if ((flags & ACPI_MEMORY_MODE) == ACPI_LOGICAL_ADDRESSING) {
+
/* 1a) Get the location of the Extended BIOS Data Area (EBDA) */
status = acpi_os_map_memory((acpi_physical_address)
@@ -521,7 +525,7 @@ acpi_tb_find_rsdp(struct acpi_table_desc *table_info, u32 flags)
if (physical_address > 0x400) {
/*
- * 1b) Search EBDA paragraphs (EBDa is required to be a
+ * 1b) Search EBDA paragraphs (EBDA is required to be a
* minimum of 1_k length)
*/
status = acpi_os_map_memory((acpi_physical_address)
@@ -542,10 +546,11 @@ acpi_tb_find_rsdp(struct acpi_table_desc *table_info, u32 flags)
acpi_os_unmap_memory(table_ptr, ACPI_EBDA_WINDOW_SIZE);
if (mem_rover) {
+
/* Return the physical address */
physical_address +=
- ACPI_PTR_DIFF(mem_rover, table_ptr);
+ (u32) ACPI_PTR_DIFF(mem_rover, table_ptr);
table_info->physical_address =
(acpi_physical_address) physical_address;
@@ -576,11 +581,12 @@ acpi_tb_find_rsdp(struct acpi_table_desc *table_info, u32 flags)
acpi_os_unmap_memory(table_ptr, ACPI_HI_RSDP_WINDOW_SIZE);
if (mem_rover) {
+
/* Return the physical address */
- physical_address =
- ACPI_HI_RSDP_WINDOW_BASE + ACPI_PTR_DIFF(mem_rover,
- table_ptr);
+ physical_address = (u32)
+ (ACPI_HI_RSDP_WINDOW_BASE +
+ ACPI_PTR_DIFF(mem_rover, table_ptr));
table_info->physical_address =
(acpi_physical_address) physical_address;
@@ -601,7 +607,7 @@ acpi_tb_find_rsdp(struct acpi_table_desc *table_info, u32 flags)
if (physical_address > 0x400) {
/*
- * 1b) Search EBDA paragraphs (EBDa is required to be a minimum of
+ * 1b) Search EBDA paragraphs (EBDA is required to be a minimum of
* 1_k length)
*/
mem_rover =
@@ -609,6 +615,7 @@ acpi_tb_find_rsdp(struct acpi_table_desc *table_info, u32 flags)
(physical_address),
ACPI_EBDA_WINDOW_SIZE);
if (mem_rover) {
+
/* Return the physical address */
table_info->physical_address =
@@ -624,6 +631,7 @@ acpi_tb_find_rsdp(struct acpi_table_desc *table_info, u32 flags)
(ACPI_HI_RSDP_WINDOW_BASE),
ACPI_HI_RSDP_WINDOW_SIZE);
if (mem_rover) {
+
/* Found it, return the physical address */
table_info->physical_address =
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 19f3ea48475..e7fe3a14fda 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -82,6 +82,7 @@ MODULE_PARM_DESC(tzp, "Thermal zone polling frequency, in 1/10 seconds.\n");
static int acpi_thermal_add(struct acpi_device *device);
static int acpi_thermal_remove(struct acpi_device *device, int type);
+static int acpi_thermal_resume(struct acpi_device *device, int state);
static int acpi_thermal_state_open_fs(struct inode *inode, struct file *file);
static int acpi_thermal_temp_open_fs(struct inode *inode, struct file *file);
static int acpi_thermal_trip_open_fs(struct inode *inode, struct file *file);
@@ -103,6 +104,7 @@ static struct acpi_driver acpi_thermal_driver = {
.ops = {
.add = acpi_thermal_add,
.remove = acpi_thermal_remove,
+ .resume = acpi_thermal_resume,
},
};
@@ -684,8 +686,7 @@ static void acpi_thermal_run(unsigned long data)
{
struct acpi_thermal *tz = (struct acpi_thermal *)data;
if (!tz->zombie)
- acpi_os_queue_for_execution(OSD_PRIORITY_GPE,
- acpi_thermal_check, (void *)data);
+ acpi_os_execute(OSL_GPE_HANDLER, acpi_thermal_check, (void *)data);
}
static void acpi_thermal_check(void *data)
@@ -942,8 +943,10 @@ acpi_thermal_write_trip_points(struct file *file,
memset(limit_string, 0, ACPI_THERMAL_MAX_LIMIT_STR_LEN);
active = kmalloc(ACPI_THERMAL_MAX_ACTIVE * sizeof(int), GFP_KERNEL);
- if (!active)
+ if (!active) {
+ kfree(limit_string);
return_VALUE(-ENOMEM);
+ }
if (!tz || (count > ACPI_THERMAL_MAX_LIMIT_STR_LEN - 1)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid argument\n"));
@@ -1342,7 +1345,7 @@ static int acpi_thermal_add(struct acpi_device *device)
result = acpi_thermal_add_fs(device);
if (result)
- return_VALUE(result);
+ goto end;
init_timer(&tz->timer);
@@ -1416,6 +1419,20 @@ static int acpi_thermal_remove(struct acpi_device *device, int type)
return_VALUE(0);
}
+static int acpi_thermal_resume(struct acpi_device *device, int state)
+{
+ struct acpi_thermal *tz = NULL;
+
+ if (!device || !acpi_driver_data(device))
+ return_VALUE(-EINVAL);
+
+ tz = (struct acpi_thermal *)acpi_driver_data(device);
+
+ acpi_thermal_check(tz);
+
+ return AE_OK;
+}
+
static int __init acpi_thermal_init(void)
{
int result = 0;
diff --git a/drivers/acpi/utilities/utalloc.c b/drivers/acpi/utilities/utalloc.c
index 03b0044974c..7940fc1bd69 100644
--- a/drivers/acpi/utilities/utalloc.c
+++ b/drivers/acpi/utilities/utalloc.c
@@ -46,24 +46,6 @@
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utalloc")
-/* Local prototypes */
-#ifdef ACPI_DBG_TRACK_ALLOCATIONS
-static struct acpi_debug_mem_block *acpi_ut_find_allocation(void *allocation);
-
-static acpi_status
-acpi_ut_track_allocation(struct acpi_debug_mem_block *address,
- acpi_size size,
- u8 alloc_type, u32 component, char *module, u32 line);
-
-static acpi_status
-acpi_ut_remove_allocation(struct acpi_debug_mem_block *address,
- u32 component, char *module, u32 line);
-
-static acpi_status
-acpi_ut_create_list(char *list_name,
- u16 object_size, struct acpi_memory_list **return_cache);
-#endif
-
/*******************************************************************************
*
* FUNCTION: acpi_ut_create_caches
@@ -75,33 +57,23 @@ acpi_ut_create_list(char *list_name,
* DESCRIPTION: Create all local caches
*
******************************************************************************/
-
acpi_status acpi_ut_create_caches(void)
{
acpi_status status;
-#ifdef ACPI_DBG_TRACK_ALLOCATIONS
-
- /* Memory allocation lists */
-
- status = acpi_ut_create_list("Acpi-Global", 0, &acpi_gbl_global_list);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
+ /* Object Caches, for frequently used objects */
status =
- acpi_ut_create_list("Acpi-Namespace",
- sizeof(struct acpi_namespace_node),
- &acpi_gbl_ns_node_list);
+ acpi_os_create_cache("Acpi-Namespace",
+ sizeof(struct acpi_namespace_node),
+ ACPI_MAX_NAMESPACE_CACHE_DEPTH,
+ &acpi_gbl_namespace_cache);
if (ACPI_FAILURE(status)) {
return (status);
}
-#endif
-
- /* Object Caches, for frequently used objects */
status =
- acpi_os_create_cache("acpi_state", sizeof(union acpi_generic_state),
+ acpi_os_create_cache("Acpi-State", sizeof(union acpi_generic_state),
ACPI_MAX_STATE_CACHE_DEPTH,
&acpi_gbl_state_cache);
if (ACPI_FAILURE(status)) {
@@ -109,7 +81,7 @@ acpi_status acpi_ut_create_caches(void)
}
status =
- acpi_os_create_cache("acpi_parse",
+ acpi_os_create_cache("Acpi-Parse",
sizeof(struct acpi_parse_obj_common),
ACPI_MAX_PARSE_CACHE_DEPTH,
&acpi_gbl_ps_node_cache);
@@ -118,7 +90,7 @@ acpi_status acpi_ut_create_caches(void)
}
status =
- acpi_os_create_cache("acpi_parse_ext",
+ acpi_os_create_cache("Acpi-ParseExt",
sizeof(struct acpi_parse_obj_named),
ACPI_MAX_EXTPARSE_CACHE_DEPTH,
&acpi_gbl_ps_node_ext_cache);
@@ -127,7 +99,7 @@ acpi_status acpi_ut_create_caches(void)
}
status =
- acpi_os_create_cache("acpi_operand",
+ acpi_os_create_cache("Acpi-Operand",
sizeof(union acpi_operand_object),
ACPI_MAX_OBJECT_CACHE_DEPTH,
&acpi_gbl_operand_cache);
@@ -135,6 +107,24 @@ acpi_status acpi_ut_create_caches(void)
return (status);
}
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
+
+ /* Memory allocation lists */
+
+ status = acpi_ut_create_list("Acpi-Global", 0, &acpi_gbl_global_list);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ status =
+ acpi_ut_create_list("Acpi-Namespace",
+ sizeof(struct acpi_namespace_node),
+ &acpi_gbl_ns_node_list);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+#endif
+
return (AE_OK);
}
@@ -153,6 +143,9 @@ acpi_status acpi_ut_create_caches(void)
acpi_status acpi_ut_delete_caches(void)
{
+ (void)acpi_os_delete_cache(acpi_gbl_namespace_cache);
+ acpi_gbl_namespace_cache = NULL;
+
(void)acpi_os_delete_cache(acpi_gbl_state_cache);
acpi_gbl_state_cache = NULL;
@@ -165,6 +158,21 @@ acpi_status acpi_ut_delete_caches(void)
(void)acpi_os_delete_cache(acpi_gbl_ps_node_ext_cache);
acpi_gbl_ps_node_ext_cache = NULL;
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
+
+ /* Debug only - display leftover memory allocation, if any */
+
+ acpi_ut_dump_allocations(ACPI_UINT32_MAX, NULL);
+
+ /* Free memory lists */
+
+ acpi_os_free(acpi_gbl_global_list);
+ acpi_gbl_global_list = NULL;
+
+ acpi_os_free(acpi_gbl_ns_node_list);
+ acpi_gbl_ns_node_list = NULL;
+#endif
+
return (AE_OK);
}
@@ -252,7 +260,7 @@ acpi_ut_initialize_buffer(struct acpi_buffer * buffer,
/* Allocate a new buffer with local interface to allow tracking */
- buffer->pointer = ACPI_MEM_CALLOCATE(required_length);
+ buffer->pointer = ACPI_ALLOCATE_ZEROED(required_length);
if (!buffer->pointer) {
return (AE_NO_MEMORY);
}
@@ -288,7 +296,7 @@ acpi_ut_initialize_buffer(struct acpi_buffer * buffer,
*
* RETURN: Address of the allocated memory on success, NULL on failure.
*
- * DESCRIPTION: The subsystem's equivalent of malloc.
+ * DESCRIPTION: Subsystem equivalent of malloc.
*
******************************************************************************/
@@ -296,23 +304,23 @@ void *acpi_ut_allocate(acpi_size size, u32 component, char *module, u32 line)
{
void *allocation;
- ACPI_FUNCTION_TRACE_U32("ut_allocate", size);
+ ACPI_FUNCTION_TRACE_U32(ut_allocate, size);
/* Check for an inadvertent size of zero bytes */
if (!size) {
- ACPI_ERROR((module, line,
- "ut_allocate: Attempt to allocate zero bytes, allocating 1 byte"));
+ ACPI_WARNING((module, line,
+ "Attempt to allocate zero bytes, allocating 1 byte"));
size = 1;
}
allocation = acpi_os_allocate(size);
if (!allocation) {
+
/* Report allocation error */
- ACPI_ERROR((module, line,
- "ut_allocate: Could not allocate size %X",
- (u32) size));
+ ACPI_WARNING((module, line,
+ "Could not allocate size %X", (u32) size));
return_PTR(NULL);
}
@@ -322,7 +330,7 @@ void *acpi_ut_allocate(acpi_size size, u32 component, char *module, u32 line)
/*******************************************************************************
*
- * FUNCTION: acpi_ut_callocate
+ * FUNCTION: acpi_ut_allocate_zeroed
*
* PARAMETERS: Size - Size of the allocation
* Component - Component type of caller
@@ -331,542 +339,24 @@ void *acpi_ut_allocate(acpi_size size, u32 component, char *module, u32 line)
*
* RETURN: Address of the allocated memory on success, NULL on failure.
*
- * DESCRIPTION: Subsystem equivalent of calloc.
+ * DESCRIPTION: Subsystem equivalent of calloc. Allocate and zero memory.
*
******************************************************************************/
-void *acpi_ut_callocate(acpi_size size, u32 component, char *module, u32 line)
+void *acpi_ut_allocate_zeroed(acpi_size size,
+ u32 component, char *module, u32 line)
{
void *allocation;
- ACPI_FUNCTION_TRACE_U32("ut_callocate", size);
-
- /* Check for an inadvertent size of zero bytes */
-
- if (!size) {
- ACPI_ERROR((module, line,
- "Attempt to allocate zero bytes, allocating 1 byte"));
- size = 1;
- }
-
- allocation = acpi_os_allocate(size);
- if (!allocation) {
- /* Report allocation error */
-
- ACPI_ERROR((module, line,
- "Could not allocate size %X", (u32) size));
- return_PTR(NULL);
- }
-
- /* Clear the memory block */
-
- ACPI_MEMSET(allocation, 0, size);
- return_PTR(allocation);
-}
-
-#ifdef ACPI_DBG_TRACK_ALLOCATIONS
-/*
- * These procedures are used for tracking memory leaks in the subsystem, and
- * they get compiled out when the ACPI_DBG_TRACK_ALLOCATIONS is not set.
- *
- * Each memory allocation is tracked via a doubly linked list. Each
- * element contains the caller's component, module name, function name, and
- * line number. acpi_ut_allocate and acpi_ut_callocate call
- * acpi_ut_track_allocation to add an element to the list; deletion
- * occurs in the body of acpi_ut_free.
- */
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_create_list
- *
- * PARAMETERS: cache_name - Ascii name for the cache
- * object_size - Size of each cached object
- * return_cache - Where the new cache object is returned
- *
- * RETURN: Status
- *
- * DESCRIPTION: Create a local memory list for tracking purposed
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ut_create_list(char *list_name,
- u16 object_size, struct acpi_memory_list **return_cache)
-{
- struct acpi_memory_list *cache;
-
- cache = acpi_os_allocate(sizeof(struct acpi_memory_list));
- if (!cache) {
- return (AE_NO_MEMORY);
- }
-
- ACPI_MEMSET(cache, 0, sizeof(struct acpi_memory_list));
-
- cache->list_name = list_name;
- cache->object_size = object_size;
-
- *return_cache = cache;
- return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_allocate_and_track
- *
- * PARAMETERS: Size - Size of the allocation
- * Component - Component type of caller
- * Module - Source file name of caller
- * Line - Line number of caller
- *
- * RETURN: Address of the allocated memory on success, NULL on failure.
- *
- * DESCRIPTION: The subsystem's equivalent of malloc.
- *
- ******************************************************************************/
-
-void *acpi_ut_allocate_and_track(acpi_size size,
- u32 component, char *module, u32 line)
-{
- struct acpi_debug_mem_block *allocation;
- acpi_status status;
-
- allocation =
- acpi_ut_allocate(size + sizeof(struct acpi_debug_mem_header),
- component, module, line);
- if (!allocation) {
- return (NULL);
- }
-
- status = acpi_ut_track_allocation(allocation, size,
- ACPI_MEM_MALLOC, component, module,
- line);
- if (ACPI_FAILURE(status)) {
- acpi_os_free(allocation);
- return (NULL);
- }
-
- acpi_gbl_global_list->total_allocated++;
- acpi_gbl_global_list->current_total_size += (u32) size;
-
- return ((void *)&allocation->user_space);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_callocate_and_track
- *
- * PARAMETERS: Size - Size of the allocation
- * Component - Component type of caller
- * Module - Source file name of caller
- * Line - Line number of caller
- *
- * RETURN: Address of the allocated memory on success, NULL on failure.
- *
- * DESCRIPTION: Subsystem equivalent of calloc.
- *
- ******************************************************************************/
-
-void *acpi_ut_callocate_and_track(acpi_size size,
- u32 component, char *module, u32 line)
-{
- struct acpi_debug_mem_block *allocation;
- acpi_status status;
-
- allocation =
- acpi_ut_callocate(size + sizeof(struct acpi_debug_mem_header),
- component, module, line);
- if (!allocation) {
- /* Report allocation error */
-
- ACPI_ERROR((module, line,
- "Could not allocate size %X", (u32) size));
- return (NULL);
- }
-
- status = acpi_ut_track_allocation(allocation, size,
- ACPI_MEM_CALLOC, component, module,
- line);
- if (ACPI_FAILURE(status)) {
- acpi_os_free(allocation);
- return (NULL);
- }
-
- acpi_gbl_global_list->total_allocated++;
- acpi_gbl_global_list->current_total_size += (u32) size;
-
- return ((void *)&allocation->user_space);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_free_and_track
- *
- * PARAMETERS: Allocation - Address of the memory to deallocate
- * Component - Component type of caller
- * Module - Source file name of caller
- * Line - Line number of caller
- *
- * RETURN: None
- *
- * DESCRIPTION: Frees the memory at Allocation
- *
- ******************************************************************************/
-
-void
-acpi_ut_free_and_track(void *allocation, u32 component, char *module, u32 line)
-{
- struct acpi_debug_mem_block *debug_block;
- acpi_status status;
-
- ACPI_FUNCTION_TRACE_PTR("ut_free", allocation);
-
- if (NULL == allocation) {
- ACPI_ERROR((module, line, "Attempt to delete a NULL address"));
-
- return_VOID;
- }
-
- debug_block = ACPI_CAST_PTR(struct acpi_debug_mem_block,
- (((char *)allocation) -
- sizeof(struct acpi_debug_mem_header)));
-
- acpi_gbl_global_list->total_freed++;
- acpi_gbl_global_list->current_total_size -= debug_block->size;
-
- status = acpi_ut_remove_allocation(debug_block,
- component, module, line);
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "Could not free memory"));
- }
-
- acpi_os_free(debug_block);
- ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "%p freed\n", allocation));
- return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_find_allocation
- *
- * PARAMETERS: Allocation - Address of allocated memory
- *
- * RETURN: A list element if found; NULL otherwise.
- *
- * DESCRIPTION: Searches for an element in the global allocation tracking list.
- *
- ******************************************************************************/
-
-static struct acpi_debug_mem_block *acpi_ut_find_allocation(void *allocation)
-{
- struct acpi_debug_mem_block *element;
-
ACPI_FUNCTION_ENTRY();
- element = acpi_gbl_global_list->list_head;
-
- /* Search for the address. */
-
- while (element) {
- if (element == allocation) {
- return (element);
- }
-
- element = element->next;
- }
-
- return (NULL);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_track_allocation
- *
- * PARAMETERS: Allocation - Address of allocated memory
- * Size - Size of the allocation
- * alloc_type - MEM_MALLOC or MEM_CALLOC
- * Component - Component type of caller
- * Module - Source file name of caller
- * Line - Line number of caller
- *
- * RETURN: None.
- *
- * DESCRIPTION: Inserts an element into the global allocation tracking list.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ut_track_allocation(struct acpi_debug_mem_block *allocation,
- acpi_size size,
- u8 alloc_type, u32 component, char *module, u32 line)
-{
- struct acpi_memory_list *mem_list;
- struct acpi_debug_mem_block *element;
- acpi_status status = AE_OK;
-
- ACPI_FUNCTION_TRACE_PTR("ut_track_allocation", allocation);
-
- mem_list = acpi_gbl_global_list;
- status = acpi_ut_acquire_mutex(ACPI_MTX_MEMORY);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /*
- * Search list for this address to make sure it is not already on the list.
- * This will catch several kinds of problems.
- */
- element = acpi_ut_find_allocation(allocation);
- if (element) {
- ACPI_ERROR((AE_INFO,
- "ut_track_allocation: Allocation already present in list! (%p)",
- allocation));
-
- ACPI_ERROR((AE_INFO, "Element %p Address %p",
- element, allocation));
-
- goto unlock_and_exit;
- }
-
- /* Fill in the instance data. */
-
- allocation->size = (u32) size;
- allocation->alloc_type = alloc_type;
- allocation->component = component;
- allocation->line = line;
-
- ACPI_STRNCPY(allocation->module, module, ACPI_MAX_MODULE_NAME);
- allocation->module[ACPI_MAX_MODULE_NAME - 1] = 0;
-
- /* Insert at list head */
-
- if (mem_list->list_head) {
- ((struct acpi_debug_mem_block *)(mem_list->list_head))->
- previous = allocation;
- }
-
- allocation->next = mem_list->list_head;
- allocation->previous = NULL;
-
- mem_list->list_head = allocation;
-
- unlock_and_exit:
- status = acpi_ut_release_mutex(ACPI_MTX_MEMORY);
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_remove_allocation
- *
- * PARAMETERS: Allocation - Address of allocated memory
- * Component - Component type of caller
- * Module - Source file name of caller
- * Line - Line number of caller
- *
- * RETURN:
- *
- * DESCRIPTION: Deletes an element from the global allocation tracking list.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ut_remove_allocation(struct acpi_debug_mem_block *allocation,
- u32 component, char *module, u32 line)
-{
- struct acpi_memory_list *mem_list;
- acpi_status status;
-
- ACPI_FUNCTION_TRACE("ut_remove_allocation");
-
- mem_list = acpi_gbl_global_list;
- if (NULL == mem_list->list_head) {
- /* No allocations! */
-
- ACPI_ERROR((module, line,
- "Empty allocation list, nothing to free!"));
-
- return_ACPI_STATUS(AE_OK);
- }
-
- status = acpi_ut_acquire_mutex(ACPI_MTX_MEMORY);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /* Unlink */
-
- if (allocation->previous) {
- (allocation->previous)->next = allocation->next;
- } else {
- mem_list->list_head = allocation->next;
- }
-
- if (allocation->next) {
- (allocation->next)->previous = allocation->previous;
- }
-
- /* Mark the segment as deleted */
-
- ACPI_MEMSET(&allocation->user_space, 0xEA, allocation->size);
-
- ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "Freeing size 0%X\n",
- allocation->size));
+ allocation = acpi_ut_allocate(size, component, module, line);
+ if (allocation) {
- status = acpi_ut_release_mutex(ACPI_MTX_MEMORY);
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_dump_allocation_info
- *
- * PARAMETERS:
- *
- * RETURN: None
- *
- * DESCRIPTION: Print some info about the outstanding allocations.
- *
- ******************************************************************************/
+ /* Clear the memory block */
-#ifdef ACPI_FUTURE_USAGE
-void acpi_ut_dump_allocation_info(void)
-{
-/*
- struct acpi_memory_list *mem_list;
-*/
-
- ACPI_FUNCTION_TRACE("ut_dump_allocation_info");
-
-/*
- ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES,
- ("%30s: %4d (%3d Kb)\n", "Current allocations",
- mem_list->current_count,
- ROUND_UP_TO_1K (mem_list->current_size)));
-
- ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES,
- ("%30s: %4d (%3d Kb)\n", "Max concurrent allocations",
- mem_list->max_concurrent_count,
- ROUND_UP_TO_1K (mem_list->max_concurrent_size)));
-
- ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES,
- ("%30s: %4d (%3d Kb)\n", "Total (all) internal objects",
- running_object_count,
- ROUND_UP_TO_1K (running_object_size)));
-
- ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES,
- ("%30s: %4d (%3d Kb)\n", "Total (all) allocations",
- running_alloc_count,
- ROUND_UP_TO_1K (running_alloc_size)));
-
- ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES,
- ("%30s: %4d (%3d Kb)\n", "Current Nodes",
- acpi_gbl_current_node_count,
- ROUND_UP_TO_1K (acpi_gbl_current_node_size)));
-
- ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES,
- ("%30s: %4d (%3d Kb)\n", "Max Nodes",
- acpi_gbl_max_concurrent_node_count,
- ROUND_UP_TO_1K ((acpi_gbl_max_concurrent_node_count *
- sizeof (struct acpi_namespace_node)))));
-*/
- return_VOID;
-}
-#endif /* ACPI_FUTURE_USAGE */
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_dump_allocations
- *
- * PARAMETERS: Component - Component(s) to dump info for.
- * Module - Module to dump info for. NULL means all.
- *
- * RETURN: None
- *
- * DESCRIPTION: Print a list of all outstanding allocations.
- *
- ******************************************************************************/
-
-void acpi_ut_dump_allocations(u32 component, char *module)
-{
- struct acpi_debug_mem_block *element;
- union acpi_descriptor *descriptor;
- u32 num_outstanding = 0;
-
- ACPI_FUNCTION_TRACE("ut_dump_allocations");
-
- /*
- * Walk the allocation list.
- */
- if (ACPI_FAILURE(acpi_ut_acquire_mutex(ACPI_MTX_MEMORY))) {
- return;
+ ACPI_MEMSET(allocation, 0, size);
}
- element = acpi_gbl_global_list->list_head;
- while (element) {
- if ((element->component & component) &&
- ((module == NULL)
- || (0 == ACPI_STRCMP(module, element->module)))) {
- /* Ignore allocated objects that are in a cache */
-
- descriptor =
- ACPI_CAST_PTR(union acpi_descriptor,
- &element->user_space);
- if (descriptor->descriptor_id != ACPI_DESC_TYPE_CACHED) {
- acpi_os_printf("%p Len %04X %9.9s-%d [%s] ",
- descriptor, element->size,
- element->module, element->line,
- acpi_ut_get_descriptor_name
- (descriptor));
-
- /* Most of the elements will be Operand objects. */
-
- switch (ACPI_GET_DESCRIPTOR_TYPE(descriptor)) {
- case ACPI_DESC_TYPE_OPERAND:
- acpi_os_printf("%12.12s R%hd",
- acpi_ut_get_type_name
- (descriptor->object.
- common.type),
- descriptor->object.
- common.reference_count);
- break;
-
- case ACPI_DESC_TYPE_PARSER:
- acpi_os_printf("aml_opcode %04hX",
- descriptor->op.asl.
- aml_opcode);
- break;
-
- case ACPI_DESC_TYPE_NAMED:
- acpi_os_printf("%4.4s",
- acpi_ut_get_node_name
- (&descriptor->node));
- break;
-
- default:
- break;
- }
-
- acpi_os_printf("\n");
- num_outstanding++;
- }
- }
- element = element->next;
- }
-
- (void)acpi_ut_release_mutex(ACPI_MTX_MEMORY);
-
- /* Print summary */
-
- if (!num_outstanding) {
- ACPI_INFO((AE_INFO, "No outstanding allocations"));
- } else {
- ACPI_ERROR((AE_INFO,
- "%d(%X) Outstanding allocations",
- num_outstanding, num_outstanding));
- }
-
- return_VOID;
+ return (allocation);
}
-
-#endif /* #ifdef ACPI_DBG_TRACK_ALLOCATIONS */
diff --git a/drivers/acpi/utilities/utcache.c b/drivers/acpi/utilities/utcache.c
index 2177cb1ef2c..56270a30718 100644
--- a/drivers/acpi/utilities/utcache.c
+++ b/drivers/acpi/utilities/utcache.c
@@ -118,13 +118,14 @@ acpi_status acpi_os_purge_cache(struct acpi_memory_list * cache)
/* Walk the list of objects in this cache */
while (cache->list_head) {
+
/* Delete and unlink one cached state object */
next = *(ACPI_CAST_INDIRECT_PTR(char,
&(((char *)cache->
list_head)[cache->
link_offset])));
- ACPI_MEM_FREE(cache->list_head);
+ ACPI_FREE(cache->list_head);
cache->list_head = next;
cache->current_depth--;
@@ -193,7 +194,7 @@ acpi_os_release_object(struct acpi_memory_list * cache, void *object)
/* If cache is full, just free this object */
if (cache->current_depth >= cache->max_depth) {
- ACPI_MEM_FREE(object);
+ ACPI_FREE(object);
ACPI_MEM_TRACKING(cache->total_freed++);
}
@@ -243,7 +244,7 @@ void *acpi_os_acquire_object(struct acpi_memory_list *cache)
acpi_status status;
void *object;
- ACPI_FUNCTION_NAME("os_acquire_object");
+ ACPI_FUNCTION_NAME(os_acquire_object);
if (!cache) {
return (NULL);
@@ -259,6 +260,7 @@ void *acpi_os_acquire_object(struct acpi_memory_list *cache)
/* Check the cache first */
if (cache->list_head) {
+
/* There is an object available, use it */
object = cache->list_head;
@@ -270,9 +272,9 @@ void *acpi_os_acquire_object(struct acpi_memory_list *cache)
cache->current_depth--;
ACPI_MEM_TRACKING(cache->hits++);
- ACPI_MEM_TRACKING(ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "Object %p from %s cache\n",
- object, cache->list_name)));
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Object %p from %s cache\n", object,
+ cache->list_name));
status = acpi_ut_release_mutex(ACPI_MTX_CACHES);
if (ACPI_FAILURE(status)) {
@@ -287,14 +289,14 @@ void *acpi_os_acquire_object(struct acpi_memory_list *cache)
ACPI_MEM_TRACKING(cache->total_allocated++);
- /* Avoid deadlock with ACPI_MEM_CALLOCATE */
+ /* Avoid deadlock with ACPI_ALLOCATE_ZEROED */
status = acpi_ut_release_mutex(ACPI_MTX_CACHES);
if (ACPI_FAILURE(status)) {
return (NULL);
}
- object = ACPI_MEM_CALLOCATE(cache->object_size);
+ object = ACPI_ALLOCATE_ZEROED(cache->object_size);
if (!object) {
return (NULL);
}
diff --git a/drivers/acpi/utilities/utcopy.c b/drivers/acpi/utilities/utcopy.c
index df2d32096b7..5e1a80d1bc3 100644
--- a/drivers/acpi/utilities/utcopy.c
+++ b/drivers/acpi/utilities/utcopy.c
@@ -109,7 +109,7 @@ acpi_ut_copy_isimple_to_esimple(union acpi_operand_object *internal_object,
{
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("ut_copy_isimple_to_esimple");
+ ACPI_FUNCTION_TRACE(ut_copy_isimple_to_esimple);
*buffer_space_used = 0;
@@ -325,7 +325,7 @@ acpi_ut_copy_ipackage_to_epackage(union acpi_operand_object *internal_object,
acpi_status status;
struct acpi_pkg_info info;
- ACPI_FUNCTION_TRACE("ut_copy_ipackage_to_epackage");
+ ACPI_FUNCTION_TRACE(ut_copy_ipackage_to_epackage);
/*
* First package at head of the buffer
@@ -383,7 +383,7 @@ acpi_ut_copy_iobject_to_eobject(union acpi_operand_object *internal_object,
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ut_copy_iobject_to_eobject");
+ ACPI_FUNCTION_TRACE(ut_copy_iobject_to_eobject);
if (ACPI_GET_OBJECT_TYPE(internal_object) == ACPI_TYPE_PACKAGE) {
/*
@@ -442,7 +442,7 @@ acpi_ut_copy_esimple_to_isimple(union acpi_object *external_object,
{
union acpi_operand_object *internal_object;
- ACPI_FUNCTION_TRACE("ut_copy_esimple_to_isimple");
+ ACPI_FUNCTION_TRACE(ut_copy_esimple_to_isimple);
/*
* Simple types supported are: String, Buffer, Integer
@@ -472,8 +472,8 @@ acpi_ut_copy_esimple_to_isimple(union acpi_object *external_object,
case ACPI_TYPE_STRING:
internal_object->string.pointer =
- ACPI_MEM_CALLOCATE((acpi_size) external_object->string.
- length + 1);
+ ACPI_ALLOCATE_ZEROED((acpi_size) external_object->string.
+ length + 1);
if (!internal_object->string.pointer) {
goto error_exit;
}
@@ -488,7 +488,7 @@ acpi_ut_copy_esimple_to_isimple(union acpi_object *external_object,
case ACPI_TYPE_BUFFER:
internal_object->buffer.pointer =
- ACPI_MEM_CALLOCATE(external_object->buffer.length);
+ ACPI_ALLOCATE_ZEROED(external_object->buffer.length);
if (!internal_object->buffer.pointer) {
goto error_exit;
}
@@ -552,7 +552,7 @@ acpi_ut_copy_epackage_to_ipackage(union acpi_operand_object *internal_object,
union acpi_operand_object *this_internal_obj;
union acpi_object *this_external_obj;
- ACPI_FUNCTION_TRACE("ut_copy_epackage_to_ipackage");
+ ACPI_FUNCTION_TRACE(ut_copy_epackage_to_ipackage);
/*
* First package at head of the buffer
@@ -600,7 +600,7 @@ acpi_ut_copy_eobject_to_iobject(union acpi_object *external_object,
{
acpi_status status;
- ACPI_FUNCTION_TRACE("ut_copy_eobject_to_iobject");
+ ACPI_FUNCTION_TRACE(ut_copy_eobject_to_iobject);
if (external_object->type == ACPI_TYPE_PACKAGE) {
/*
@@ -676,7 +676,7 @@ acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
if ((source_desc->buffer.pointer) &&
(source_desc->buffer.length)) {
dest_desc->buffer.pointer =
- ACPI_MEM_ALLOCATE(source_desc->buffer.length);
+ ACPI_ALLOCATE(source_desc->buffer.length);
if (!dest_desc->buffer.pointer) {
return (AE_NO_MEMORY);
}
@@ -697,8 +697,8 @@ acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
*/
if (source_desc->string.pointer) {
dest_desc->string.pointer =
- ACPI_MEM_ALLOCATE((acpi_size) source_desc->string.
- length + 1);
+ ACPI_ALLOCATE((acpi_size) source_desc->string.
+ length + 1);
if (!dest_desc->string.pointer) {
return (AE_NO_MEMORY);
}
@@ -805,9 +805,7 @@ acpi_ut_copy_ielement_to_ielement(u8 object_type,
/*
* Create the object array
*/
- target_object->package.elements =
- ACPI_MEM_CALLOCATE(((acpi_size) source_object->package.
- count + 1) * sizeof(void *));
+ target_object->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size) source_object->package.count + 1) * sizeof(void *));
if (!target_object->package.elements) {
status = AE_NO_MEMORY;
goto error_exit;
@@ -856,7 +854,7 @@ acpi_ut_copy_ipackage_to_ipackage(union acpi_operand_object *source_obj,
{
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("ut_copy_ipackage_to_ipackage");
+ ACPI_FUNCTION_TRACE(ut_copy_ipackage_to_ipackage);
dest_obj->common.type = ACPI_GET_OBJECT_TYPE(source_obj);
dest_obj->common.flags = source_obj->common.flags;
@@ -865,10 +863,10 @@ acpi_ut_copy_ipackage_to_ipackage(union acpi_operand_object *source_obj,
/*
* Create the object array and walk the source package tree
*/
- dest_obj->package.elements = ACPI_MEM_CALLOCATE(((acpi_size)
- source_obj->package.
- count +
- 1) * sizeof(void *));
+ dest_obj->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size)
+ source_obj->package.
+ count +
+ 1) * sizeof(void *));
if (!dest_obj->package.elements) {
ACPI_ERROR((AE_INFO, "Package allocation failure"));
return_ACPI_STATUS(AE_NO_MEMORY);
@@ -882,6 +880,7 @@ acpi_ut_copy_ipackage_to_ipackage(union acpi_operand_object *source_obj,
acpi_ut_copy_ielement_to_ielement,
walk_state);
if (ACPI_FAILURE(status)) {
+
/* On failure, delete the destination package object */
acpi_ut_remove_reference(dest_obj);
@@ -911,7 +910,7 @@ acpi_ut_copy_iobject_to_iobject(union acpi_operand_object *source_desc,
{
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("ut_copy_iobject_to_iobject");
+ ACPI_FUNCTION_TRACE(ut_copy_iobject_to_iobject);
/* Create the top level object */
diff --git a/drivers/acpi/utilities/utdebug.c b/drivers/acpi/utilities/utdebug.c
index 35f3d581e03..5ec1cfcc611 100644
--- a/drivers/acpi/utilities/utdebug.c
+++ b/drivers/acpi/utilities/utdebug.c
@@ -41,8 +41,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/module.h>
-
#include <acpi/acpi.h>
#define _COMPONENT ACPI_UTILITIES
@@ -123,12 +121,14 @@ static const char *acpi_ut_trim_function_name(const char *function_name)
/* All Function names are longer than 4 chars, check is safe */
if (*(ACPI_CAST_PTR(u32, function_name)) == ACPI_PREFIX_MIXED) {
+
/* This is the case where the original source has not been modified */
return (function_name + 4);
}
if (*(ACPI_CAST_PTR(u32, function_name)) == ACPI_PREFIX_LOWER) {
+
/* This is the case where the source has been 'linuxized' */
return (function_name + 5);
@@ -162,7 +162,7 @@ acpi_ut_debug_print(u32 requested_debug_level,
const char *function_name,
char *module_name, u32 component_id, char *format, ...)
{
- u32 thread_id;
+ acpi_thread_id thread_id;
va_list args;
/*
@@ -177,7 +177,6 @@ acpi_ut_debug_print(u32 requested_debug_level,
* Thread tracking and context switch notification
*/
thread_id = acpi_os_get_thread_id();
-
if (thread_id != acpi_gbl_prev_thread_id) {
if (ACPI_LV_THREADS & acpi_dbg_level) {
acpi_os_printf
@@ -206,7 +205,7 @@ acpi_ut_debug_print(u32 requested_debug_level,
acpi_os_vprintf(format, args);
}
-EXPORT_SYMBOL(acpi_ut_debug_print);
+ACPI_EXPORT_SYMBOL(acpi_ut_debug_print)
/*******************************************************************************
*
@@ -226,7 +225,6 @@ EXPORT_SYMBOL(acpi_ut_debug_print);
* debug_print so that the same macros can be used.
*
******************************************************************************/
-
void ACPI_INTERNAL_VAR_XFACE
acpi_ut_debug_print_raw(u32 requested_debug_level,
u32 line_number,
@@ -244,7 +242,7 @@ acpi_ut_debug_print_raw(u32 requested_debug_level,
acpi_os_vprintf(format, args);
}
-EXPORT_SYMBOL(acpi_ut_debug_print_raw);
+ACPI_EXPORT_SYMBOL(acpi_ut_debug_print_raw)
/*******************************************************************************
*
@@ -261,7 +259,6 @@ EXPORT_SYMBOL(acpi_ut_debug_print_raw);
* set in debug_level
*
******************************************************************************/
-
void
acpi_ut_trace(u32 line_number,
const char *function_name, char *module_name, u32 component_id)
@@ -275,7 +272,7 @@ acpi_ut_trace(u32 line_number,
component_id, "%s\n", acpi_gbl_fn_entry_str);
}
-EXPORT_SYMBOL(acpi_ut_trace);
+ACPI_EXPORT_SYMBOL(acpi_ut_trace)
/*******************************************************************************
*
@@ -293,7 +290,6 @@ EXPORT_SYMBOL(acpi_ut_trace);
* set in debug_level
*
******************************************************************************/
-
void
acpi_ut_trace_ptr(u32 line_number,
const char *function_name,
@@ -400,7 +396,7 @@ acpi_ut_exit(u32 line_number,
acpi_gbl_nesting_level--;
}
-EXPORT_SYMBOL(acpi_ut_exit);
+ACPI_EXPORT_SYMBOL(acpi_ut_exit)
/*******************************************************************************
*
@@ -418,7 +414,6 @@ EXPORT_SYMBOL(acpi_ut_exit);
* set in debug_level. Prints exit status also.
*
******************************************************************************/
-
void
acpi_ut_status_exit(u32 line_number,
const char *function_name,
@@ -442,7 +437,7 @@ acpi_ut_status_exit(u32 line_number,
acpi_gbl_nesting_level--;
}
-EXPORT_SYMBOL(acpi_ut_status_exit);
+ACPI_EXPORT_SYMBOL(acpi_ut_status_exit)
/*******************************************************************************
*
@@ -460,7 +455,6 @@ EXPORT_SYMBOL(acpi_ut_status_exit);
* set in debug_level. Prints exit value also.
*
******************************************************************************/
-
void
acpi_ut_value_exit(u32 line_number,
const char *function_name,
@@ -475,7 +469,7 @@ acpi_ut_value_exit(u32 line_number,
acpi_gbl_nesting_level--;
}
-EXPORT_SYMBOL(acpi_ut_value_exit);
+ACPI_EXPORT_SYMBOL(acpi_ut_value_exit)
/*******************************************************************************
*
@@ -493,7 +487,6 @@ EXPORT_SYMBOL(acpi_ut_value_exit);
* set in debug_level. Prints exit value also.
*
******************************************************************************/
-
void
acpi_ut_ptr_exit(u32 line_number,
const char *function_name,
@@ -524,20 +517,13 @@ acpi_ut_ptr_exit(u32 line_number,
*
******************************************************************************/
-void acpi_ut_dump_buffer(u8 * buffer, u32 count, u32 display, u32 component_id)
+void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display)
{
acpi_native_uint i = 0;
acpi_native_uint j;
u32 temp32;
u8 buf_char;
- /* Only dump the buffer if tracing is enabled */
-
- if (!((ACPI_LV_TABLES & acpi_dbg_level) &&
- (component_id & acpi_dbg_layer))) {
- return;
- }
-
if ((count < 4) || (count & 0x01)) {
display = DB_BYTE_DISPLAY;
}
@@ -545,6 +531,7 @@ void acpi_ut_dump_buffer(u8 * buffer, u32 count, u32 display, u32 component_id)
/* Nasty little dump buffer routine! */
while (i < count) {
+
/* Print current offset */
acpi_os_printf("%6.4X: ", (u32) i);
@@ -553,6 +540,7 @@ void acpi_ut_dump_buffer(u8 * buffer, u32 count, u32 display, u32 component_id)
for (j = 0; j < 16;) {
if (i + j >= count) {
+
/* Dump fill spaces */
acpi_os_printf("%*s", ((display * 2) + 1), " ");
@@ -561,6 +549,7 @@ void acpi_ut_dump_buffer(u8 * buffer, u32 count, u32 display, u32 component_id)
}
switch (display) {
+ case DB_BYTE_DISPLAY:
default: /* Default is BYTE display */
acpi_os_printf("%02X ", buffer[i + j]);
@@ -618,3 +607,31 @@ void acpi_ut_dump_buffer(u8 * buffer, u32 count, u32 display, u32 component_id)
return;
}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_dump_buffer
+ *
+ * PARAMETERS: Buffer - Buffer to dump
+ * Count - Amount to dump, in bytes
+ * Display - BYTE, WORD, DWORD, or QWORD display
+ * component_iD - Caller's component ID
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Generic dump buffer in both hex and ascii.
+ *
+ ******************************************************************************/
+
+void acpi_ut_dump_buffer(u8 * buffer, u32 count, u32 display, u32 component_id)
+{
+
+ /* Only dump the buffer if tracing is enabled */
+
+ if (!((ACPI_LV_TABLES & acpi_dbg_level) &&
+ (component_id & acpi_dbg_layer))) {
+ return;
+ }
+
+ acpi_ut_dump_buffer2(buffer, count, display);
+}
diff --git a/drivers/acpi/utilities/utdelete.c b/drivers/acpi/utilities/utdelete.c
index 1db9695b002..67b9f325c6f 100644
--- a/drivers/acpi/utilities/utdelete.c
+++ b/drivers/acpi/utilities/utdelete.c
@@ -76,7 +76,7 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
union acpi_operand_object *second_desc;
union acpi_operand_object *next_desc;
- ACPI_FUNCTION_TRACE_PTR("ut_delete_internal_obj", object);
+ ACPI_FUNCTION_TRACE_PTR(ut_delete_internal_obj, object);
if (!object) {
return_VOID;
@@ -96,6 +96,7 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
/* Free the actual string buffer */
if (!(object->common.flags & AOPOBJ_STATIC_POINTER)) {
+
/* But only if it is NOT a pointer into an ACPI table */
obj_pointer = object->string.pointer;
@@ -111,6 +112,7 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
/* Free the actual buffer */
if (!(object->common.flags & AOPOBJ_STATIC_POINTER)) {
+
/* But only if it is NOT a pointer into an ACPI table */
obj_pointer = object->buffer.pointer;
@@ -198,11 +200,22 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
*/
handler_desc = object->region.handler;
if (handler_desc) {
- if (handler_desc->address_space.
- hflags &
+ if (handler_desc->address_space.handler_flags &
ACPI_ADDR_HANDLER_DEFAULT_INSTALLED) {
- obj_pointer =
- second_desc->extra.region_context;
+
+ /* Deactivate region and free region context */
+
+ if (handler_desc->address_space.setup) {
+ (void)handler_desc->
+ address_space.setup(object,
+ ACPI_REGION_DEACTIVATE,
+ handler_desc->
+ address_space.
+ context,
+ &second_desc->
+ extra.
+ region_context);
+ }
}
acpi_ut_remove_reference(handler_desc);
@@ -234,7 +247,7 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
if (obj_pointer) {
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
"Deleting Object Subptr %p\n", obj_pointer));
- ACPI_MEM_FREE(obj_pointer);
+ ACPI_FREE(obj_pointer);
}
/* Now the object can be safely deleted */
@@ -263,7 +276,7 @@ void acpi_ut_delete_internal_object_list(union acpi_operand_object **obj_list)
{
union acpi_operand_object **internal_obj;
- ACPI_FUNCTION_TRACE("ut_delete_internal_object_list");
+ ACPI_FUNCTION_TRACE(ut_delete_internal_object_list);
/* Walk the null-terminated internal list */
@@ -273,7 +286,7 @@ void acpi_ut_delete_internal_object_list(union acpi_operand_object **obj_list)
/* Free the combined parameter pointer list and object array */
- ACPI_MEM_FREE(obj_list);
+ ACPI_FREE(obj_list);
return_VOID;
}
@@ -296,7 +309,7 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
u16 count;
u16 new_count;
- ACPI_FUNCTION_NAME("ut_update_ref_count");
+ ACPI_FUNCTION_NAME(ut_update_ref_count);
if (!object) {
return;
@@ -306,11 +319,9 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
new_count = count;
/*
- * Perform the reference count action
- * (increment, decrement, or force delete)
+ * Perform the reference count action (increment, decrement, force delete)
*/
switch (action) {
-
case REF_INCREMENT:
new_count++;
@@ -347,7 +358,6 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
if (new_count == 0) {
acpi_ut_delete_internal_obj(object);
}
-
break;
case REF_FORCE_DELETE:
@@ -372,13 +382,10 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
* (A deleted object will have a huge reference count)
*/
if (count > ACPI_MAX_REFERENCE_COUNT) {
-
ACPI_WARNING((AE_INFO,
- "Large Reference Count (%X) in object %p",
- count, object));
+ "Large Reference Count (%X) in object %p", count,
+ object));
}
-
- return;
}
/*******************************************************************************
@@ -404,7 +411,7 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
******************************************************************************/
acpi_status
-acpi_ut_update_object_reference(union acpi_operand_object * object, u16 action)
+acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
{
acpi_status status = AE_OK;
union acpi_generic_state *state_list = NULL;
@@ -412,9 +419,10 @@ acpi_ut_update_object_reference(union acpi_operand_object * object, u16 action)
union acpi_generic_state *state;
acpi_native_uint i;
- ACPI_FUNCTION_TRACE_PTR("ut_update_object_reference", object);
+ ACPI_FUNCTION_TRACE_PTR(ut_update_object_reference, object);
while (object) {
+
/* Make sure that this isn't a namespace handle */
if (ACPI_GET_DESCRIPTOR_TYPE(object) == ACPI_DESC_TYPE_NAMED) {
@@ -507,11 +515,11 @@ acpi_ut_update_object_reference(union acpi_operand_object * object, u16 action)
case ACPI_TYPE_REGION:
default:
- break; /* No subobjects */
+ break; /* No subobjects for all other types */
}
/*
- * Now we can update the count in the main object. This can only
+ * Now we can update the count in the main object. This can only
* happen after we update the sub-objects in case this causes the
* main object to be deleted.
*/
@@ -556,7 +564,7 @@ acpi_ut_update_object_reference(union acpi_operand_object * object, u16 action)
void acpi_ut_add_reference(union acpi_operand_object *object)
{
- ACPI_FUNCTION_TRACE_PTR("ut_add_reference", object);
+ ACPI_FUNCTION_TRACE_PTR(ut_add_reference, object);
/* Ensure that we have a valid object */
@@ -589,11 +597,11 @@ void acpi_ut_add_reference(union acpi_operand_object *object)
void acpi_ut_remove_reference(union acpi_operand_object *object)
{
- ACPI_FUNCTION_TRACE_PTR("ut_remove_reference", object);
+ ACPI_FUNCTION_TRACE_PTR(ut_remove_reference, object);
/*
- * Allow a NULL pointer to be passed in, just ignore it. This saves
- * each caller from having to check. Also, ignore NS nodes.
+ * Allow a NULL pointer to be passed in, just ignore it. This saves
+ * each caller from having to check. Also, ignore NS nodes.
*
*/
if (!object ||
@@ -613,7 +621,7 @@ void acpi_ut_remove_reference(union acpi_operand_object *object)
/*
* Decrement the reference count, and only actually delete the object
- * if the reference count becomes 0. (Must also decrement the ref count
+ * if the reference count becomes 0. (Must also decrement the ref count
* of all subobjects!)
*/
(void)acpi_ut_update_object_reference(object, REF_DECREMENT);
diff --git a/drivers/acpi/utilities/uteval.c b/drivers/acpi/utilities/uteval.c
index 106cc97cb4a..d6d7121583c 100644
--- a/drivers/acpi/utilities/uteval.c
+++ b/drivers/acpi/utilities/uteval.c
@@ -56,6 +56,34 @@ static acpi_status
acpi_ut_translate_one_cid(union acpi_operand_object *obj_desc,
struct acpi_compatible_id *one_cid);
+/*
+ * Strings supported by the _OSI predefined (internal) method.
+ */
+static const char *acpi_interfaces_supported[] = {
+ /* Operating System Vendor Strings */
+
+ "Linux",
+ "Windows 2000",
+ "Windows 2001",
+ "Windows 2001 SP0",
+ "Windows 2001 SP1",
+ "Windows 2001 SP2",
+ "Windows 2001 SP3",
+ "Windows 2001 SP4",
+ "Windows 2001.1",
+ "Windows 2001.1 SP1", /* Added 03/2006 */
+ "Windows 2006", /* Added 03/2006 */
+
+ /* Feature Group Strings */
+
+ "Extended Address Space Descriptor"
+ /*
+ * All "optional" feature group strings (features that are implemented
+ * by the host) should be implemented in the host version of
+ * acpi_os_validate_interface and should not be added here.
+ */
+};
+
/*******************************************************************************
*
* FUNCTION: acpi_ut_osi_implementation
@@ -64,18 +92,18 @@ acpi_ut_translate_one_cid(union acpi_operand_object *obj_desc,
*
* RETURN: Status
*
- * DESCRIPTION: Implementation of _OSI predefined control method
- * Supported = _OSI (String)
+ * DESCRIPTION: Implementation of the _OSI predefined control method
*
******************************************************************************/
acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state)
{
+ acpi_status status;
union acpi_operand_object *string_desc;
union acpi_operand_object *return_desc;
acpi_native_uint i;
- ACPI_FUNCTION_TRACE("ut_osi_implementation");
+ ACPI_FUNCTION_TRACE(ut_osi_implementation);
/* Validate the string input argument */
@@ -84,28 +112,47 @@ acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state)
return_ACPI_STATUS(AE_TYPE);
}
- /* Create a return object (Default value = 0) */
+ /* Create a return object */
return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
if (!return_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
- /* Compare input string to table of supported strings */
+ /* Default return value is SUPPORTED */
+
+ return_desc->integer.value = ACPI_UINT32_MAX;
+ walk_state->return_desc = return_desc;
+
+ /* Compare input string to static table of supported interfaces */
- for (i = 0; i < ACPI_NUM_OSI_STRINGS; i++) {
- if (!ACPI_STRCMP(string_desc->string.pointer,
- ACPI_CAST_PTR(char,
- acpi_gbl_valid_osi_strings[i])))
- {
- /* This string is supported */
+ for (i = 0; i < ACPI_ARRAY_LENGTH(acpi_interfaces_supported); i++) {
+ if (!ACPI_STRCMP
+ (string_desc->string.pointer,
+ acpi_interfaces_supported[i])) {
- return_desc->integer.value = 0xFFFFFFFF;
- break;
+ /* The interface is supported */
+
+ return_ACPI_STATUS(AE_CTRL_TERMINATE);
}
}
- walk_state->return_desc = return_desc;
+ /*
+ * Did not match the string in the static table, call the host OSL to
+ * check for a match with one of the optional strings (such as
+ * "Module Device", "3.0 Thermal Model", etc.)
+ */
+ status = acpi_os_validate_interface(string_desc->string.pointer);
+ if (ACPI_SUCCESS(status)) {
+
+ /* The interface is supported */
+
+ return_ACPI_STATUS(AE_CTRL_TERMINATE);
+ }
+
+ /* The interface is not supported */
+
+ return_desc->integer.value = 0;
return_ACPI_STATUS(AE_CTRL_TERMINATE);
}
@@ -134,19 +181,26 @@ acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
u32 expected_return_btypes,
union acpi_operand_object **return_desc)
{
- struct acpi_parameter_info info;
+ struct acpi_evaluate_info *info;
acpi_status status;
u32 return_btype;
- ACPI_FUNCTION_TRACE("ut_evaluate_object");
+ ACPI_FUNCTION_TRACE(ut_evaluate_object);
- info.node = prefix_node;
- info.parameters = NULL;
- info.parameter_type = ACPI_PARAM_ARGS;
+ /* Allocate the evaluation information block */
+
+ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+ if (!info) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ info->prefix_node = prefix_node;
+ info->pathname = path;
+ info->parameter_type = ACPI_PARAM_ARGS;
/* Evaluate the object/method */
- status = acpi_ns_evaluate_relative(path, &info);
+ status = acpi_ns_evaluate(info);
if (ACPI_FAILURE(status)) {
if (status == AE_NOT_FOUND) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
@@ -158,25 +212,25 @@ acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
prefix_node, path, status);
}
- return_ACPI_STATUS(status);
+ goto cleanup;
}
/* Did we get a return object? */
- if (!info.return_object) {
+ if (!info->return_object) {
if (expected_return_btypes) {
ACPI_ERROR_METHOD("No object was returned from",
prefix_node, path, AE_NOT_EXIST);
- return_ACPI_STATUS(AE_NOT_EXIST);
+ status = AE_NOT_EXIST;
}
- return_ACPI_STATUS(AE_OK);
+ goto cleanup;
}
/* Map the return object type to the bitmapped type */
- switch (ACPI_GET_OBJECT_TYPE(info.return_object)) {
+ switch (ACPI_GET_OBJECT_TYPE(info->return_object)) {
case ACPI_TYPE_INTEGER:
return_btype = ACPI_BTYPE_INTEGER;
break;
@@ -204,8 +258,8 @@ acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
* happen frequently if the "implicit return" feature is enabled.
* Just delete the return object and return AE_OK.
*/
- acpi_ut_remove_reference(info.return_object);
- return_ACPI_STATUS(AE_OK);
+ acpi_ut_remove_reference(info->return_object);
+ goto cleanup;
}
/* Is the return object one of the expected types? */
@@ -217,19 +271,23 @@ acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
ACPI_ERROR((AE_INFO,
"Type returned from %s was incorrect: %s, expected Btypes: %X",
path,
- acpi_ut_get_object_type_name(info.return_object),
+ acpi_ut_get_object_type_name(info->return_object),
expected_return_btypes));
/* On error exit, we must delete the return object */
- acpi_ut_remove_reference(info.return_object);
- return_ACPI_STATUS(AE_TYPE);
+ acpi_ut_remove_reference(info->return_object);
+ status = AE_TYPE;
+ goto cleanup;
}
/* Object type is OK, return it */
- *return_desc = info.return_object;
- return_ACPI_STATUS(AE_OK);
+ *return_desc = info->return_object;
+
+ cleanup:
+ ACPI_FREE(info);
+ return_ACPI_STATUS(status);
}
/*******************************************************************************
@@ -257,7 +315,7 @@ acpi_ut_evaluate_numeric_object(char *object_name,
union acpi_operand_object *obj_desc;
acpi_status status;
- ACPI_FUNCTION_TRACE("ut_evaluate_numeric_object");
+ ACPI_FUNCTION_TRACE(ut_evaluate_numeric_object);
status = acpi_ut_evaluate_object(device_node, object_name,
ACPI_BTYPE_INTEGER, &obj_desc);
@@ -333,7 +391,7 @@ acpi_ut_execute_HID(struct acpi_namespace_node *device_node,
union acpi_operand_object *obj_desc;
acpi_status status;
- ACPI_FUNCTION_TRACE("ut_execute_HID");
+ ACPI_FUNCTION_TRACE(ut_execute_HID);
status = acpi_ut_evaluate_object(device_node, METHOD_NAME__HID,
ACPI_BTYPE_INTEGER | ACPI_BTYPE_STRING,
@@ -343,6 +401,7 @@ acpi_ut_execute_HID(struct acpi_namespace_node *device_node,
}
if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) {
+
/* Convert the Numeric HID to string */
acpi_ex_eisa_id_to_string((u32) obj_desc->integer.value,
@@ -436,7 +495,7 @@ acpi_ut_execute_CID(struct acpi_namespace_node * device_node,
struct acpi_compatible_id_list *cid_list;
acpi_native_uint i;
- ACPI_FUNCTION_TRACE("ut_execute_CID");
+ ACPI_FUNCTION_TRACE(ut_execute_CID);
/* Evaluate the _CID method for this device */
@@ -459,7 +518,7 @@ acpi_ut_execute_CID(struct acpi_namespace_node * device_node,
size = (((count - 1) * sizeof(struct acpi_compatible_id)) +
sizeof(struct acpi_compatible_id_list));
- cid_list = ACPI_MEM_CALLOCATE((acpi_size) size);
+ cid_list = ACPI_ALLOCATE_ZEROED((acpi_size) size);
if (!cid_list) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -479,6 +538,7 @@ acpi_ut_execute_CID(struct acpi_namespace_node * device_node,
/* The _CID object can be either a single CID or a package (list) of CIDs */
if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_PACKAGE) {
+
/* Translate each package element */
for (i = 0; i < count; i++) {
@@ -499,7 +559,7 @@ acpi_ut_execute_CID(struct acpi_namespace_node * device_node,
/* Cleanup on error */
if (ACPI_FAILURE(status)) {
- ACPI_MEM_FREE(cid_list);
+ ACPI_FREE(cid_list);
} else {
*return_cid_list = cid_list;
}
@@ -533,7 +593,7 @@ acpi_ut_execute_UID(struct acpi_namespace_node *device_node,
union acpi_operand_object *obj_desc;
acpi_status status;
- ACPI_FUNCTION_TRACE("ut_execute_UID");
+ ACPI_FUNCTION_TRACE(ut_execute_UID);
status = acpi_ut_evaluate_object(device_node, METHOD_NAME__UID,
ACPI_BTYPE_INTEGER | ACPI_BTYPE_STRING,
@@ -543,6 +603,7 @@ acpi_ut_execute_UID(struct acpi_namespace_node *device_node,
}
if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) {
+
/* Convert the Numeric UID to string */
acpi_ex_unsigned_integer_to_string(obj_desc->integer.value,
@@ -582,7 +643,7 @@ acpi_ut_execute_STA(struct acpi_namespace_node *device_node, u32 * flags)
union acpi_operand_object *obj_desc;
acpi_status status;
- ACPI_FUNCTION_TRACE("ut_execute_STA");
+ ACPI_FUNCTION_TRACE(ut_execute_STA);
status = acpi_ut_evaluate_object(device_node, METHOD_NAME__STA,
ACPI_BTYPE_INTEGER, &obj_desc);
@@ -632,7 +693,7 @@ acpi_ut_execute_sxds(struct acpi_namespace_node *device_node, u8 * highest)
acpi_status status;
u32 i;
- ACPI_FUNCTION_TRACE("ut_execute_Sxds");
+ ACPI_FUNCTION_TRACE(ut_execute_sxds);
for (i = 0; i < 4; i++) {
highest[i] = 0xFF;
diff --git a/drivers/acpi/utilities/utglobal.c b/drivers/acpi/utilities/utglobal.c
index ffd13383a32..e5999c65c0b 100644
--- a/drivers/acpi/utilities/utglobal.c
+++ b/drivers/acpi/utilities/utglobal.c
@@ -43,7 +43,6 @@
#define DEFINE_ACPI_GLOBALS
-#include <linux/module.h>
#include <acpi/acpi.h>
#include <acpi/acnamesp.h>
@@ -119,6 +118,7 @@ const char *acpi_format_exception(acpi_status status)
}
if (!exception) {
+
/* Exception code was not recognized */
ACPI_ERROR((AE_INFO,
@@ -143,12 +143,10 @@ const char *acpi_format_exception(acpi_status status)
/* Debug switch - level and trace mask */
u32 acpi_dbg_level = ACPI_DEBUG_DEFAULT;
-EXPORT_SYMBOL(acpi_dbg_level);
/* Debug switch - layer (component) mask */
u32 acpi_dbg_layer = ACPI_COMPONENT_DEFAULT | ACPI_ALL_DRIVERS;
-EXPORT_SYMBOL(acpi_dbg_layer);
u32 acpi_gbl_nesting_level = 0;
/* Debugger globals */
@@ -183,28 +181,6 @@ const char *acpi_gbl_highest_dstate_names[4] = {
"_S4D"
};
-/*
- * Strings supported by the _OSI predefined (internal) method.
- * When adding strings, be sure to update ACPI_NUM_OSI_STRINGS.
- */
-const char *acpi_gbl_valid_osi_strings[ACPI_NUM_OSI_STRINGS] = {
- /* Operating System Vendor Strings */
-
- "Linux",
- "Windows 2000",
- "Windows 2001",
- "Windows 2001.1",
- "Windows 2001 SP0",
- "Windows 2001 SP1",
- "Windows 2001 SP2",
- "Windows 2001 SP3",
- "Windows 2001 SP4",
-
- /* Feature Group Strings */
-
- "Extended Address Space Descriptor"
-};
-
/*******************************************************************************
*
* Namespace globals
@@ -317,9 +293,9 @@ char acpi_ut_hex_to_ascii_char(acpi_integer integer, u32 position)
*
******************************************************************************/
-struct acpi_table_list acpi_gbl_table_lists[NUM_ACPI_TABLE_TYPES];
+struct acpi_table_list acpi_gbl_table_lists[ACPI_TABLE_ID_MAX + 1];
-struct acpi_table_support acpi_gbl_table_data[NUM_ACPI_TABLE_TYPES] = {
+struct acpi_table_support acpi_gbl_table_data[ACPI_TABLE_ID_MAX + 1] = {
/*********** Name, Signature, Global typed pointer Signature size, Type How many allowed?, Contains valid AML? */
/* RSDP 0 */ {RSDP_NAME, RSDP_SIG, NULL, sizeof(RSDP_SIG) - 1,
@@ -467,7 +443,6 @@ struct acpi_fixed_event_info acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS] =
/* Region type decoding */
const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
-/*! [Begin] no source code translation (keep these ASL Keywords as-is) */
"SystemMemory",
"SystemIO",
"PCI_Config",
@@ -476,16 +451,15 @@ const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
"CMOS",
"PCIBARTarget",
"DataTable"
-/*! [End] no source code translation !*/
};
char *acpi_ut_get_region_name(u8 space_id)
{
if (space_id >= ACPI_USER_REGION_BEGIN) {
- return ("user_defined_region");
+ return ("UserDefinedRegion");
} else if (space_id >= ACPI_NUM_PREDEFINED_REGIONS) {
- return ("invalid_space_id");
+ return ("InvalidSpaceId");
}
return (ACPI_CAST_PTR(char, acpi_gbl_region_types[space_id]));
@@ -506,20 +480,18 @@ char *acpi_ut_get_region_name(u8 space_id)
/* Event type decoding */
static const char *acpi_gbl_event_types[ACPI_NUM_FIXED_EVENTS] = {
-/*! [Begin] no source code translation (keep these strings as-is) */
"PM_Timer",
"GlobalLock",
"PowerButton",
"SleepButton",
"RealTimeClock",
-/*! [End] no source code translation !*/
};
char *acpi_ut_get_event_name(u32 event_id)
{
if (event_id > ACPI_EVENT_MAX) {
- return ("invalid_event_iD");
+ return ("InvalidEventID");
}
return (ACPI_CAST_PTR(char, acpi_gbl_event_types[event_id]));
@@ -550,7 +522,6 @@ static const char acpi_gbl_bad_type[] = "UNDEFINED";
/* Printable names of the ACPI object types */
static const char *acpi_gbl_ns_type_names[] = {
-/*! [Begin] no source code translation (keep these strings as-is) */
/* 00 */ "Untyped",
/* 01 */ "Integer",
/* 02 */ "String",
@@ -582,7 +553,6 @@ static const char *acpi_gbl_ns_type_names[] = {
/* 28 */ "Extra",
/* 29 */ "Data",
/* 30 */ "Invalid"
-/*! [End] no source code translation !*/
};
char *acpi_ut_get_type_name(acpi_object_type type)
@@ -635,14 +605,14 @@ char *acpi_ut_get_node_name(void *object)
/* Descriptor must be a namespace node */
- if (node->descriptor != ACPI_DESC_TYPE_NAMED) {
+ if (ACPI_GET_DESCRIPTOR_TYPE(node) != ACPI_DESC_TYPE_NAMED) {
return ("####");
}
/* Name must be a valid ACPI name */
if (!acpi_ut_valid_acpi_name(node->name.integer)) {
- return ("????");
+ node->name.integer = acpi_ut_repair_name(node->name.integer);
}
/* Return the name */
@@ -665,7 +635,6 @@ char *acpi_ut_get_node_name(void *object)
/* Printable names of object descriptor types */
static const char *acpi_gbl_desc_type_names[] = {
-/*! [Begin] no source code translation (keep these ASL Keywords as-is) */
/* 00 */ "Invalid",
/* 01 */ "Cached",
/* 02 */ "State-Generic",
@@ -682,7 +651,6 @@ static const char *acpi_gbl_desc_type_names[] = {
/* 13 */ "Parser",
/* 14 */ "Operand",
/* 15 */ "Node"
-/*! [End] no source code translation !*/
};
char *acpi_ut_get_descriptor_name(void *object)
@@ -723,7 +691,7 @@ char *acpi_ut_get_descriptor_name(void *object)
char *acpi_ut_get_mutex_name(u32 mutex_id)
{
- if (mutex_id > MAX_MUTEX) {
+ if (mutex_id > ACPI_MAX_MUTEX) {
return ("Invalid Mutex ID");
}
@@ -747,6 +715,7 @@ u8 acpi_ut_valid_object_type(acpi_object_type type)
{
if (type > ACPI_TYPE_LOCAL_MAX) {
+
/* Note: Assumes all TYPEs are contiguous (external/local) */
return (FALSE);
@@ -773,7 +742,7 @@ void acpi_ut_init_globals(void)
acpi_status status;
u32 i;
- ACPI_FUNCTION_TRACE("ut_init_globals");
+ ACPI_FUNCTION_TRACE(ut_init_globals);
/* Create all memory caches */
@@ -784,14 +753,14 @@ void acpi_ut_init_globals(void)
/* ACPI table structure */
- for (i = 0; i < NUM_ACPI_TABLE_TYPES; i++) {
+ for (i = 0; i < (ACPI_TABLE_ID_MAX + 1); i++) {
acpi_gbl_table_lists[i].next = NULL;
acpi_gbl_table_lists[i].count = 0;
}
/* Mutex locked flags */
- for (i = 0; i < NUM_MUTEX; i++) {
+ for (i = 0; i < ACPI_NUM_MUTEX; i++) {
acpi_gbl_mutex_info[i].mutex = NULL;
acpi_gbl_mutex_info[i].thread_id = ACPI_MUTEX_NOT_ACQUIRED;
acpi_gbl_mutex_info[i].use_count = 0;
@@ -856,7 +825,7 @@ void acpi_ut_init_globals(void)
acpi_gbl_root_node = NULL;
acpi_gbl_root_node_struct.name.integer = ACPI_ROOT_NAME;
- acpi_gbl_root_node_struct.descriptor = ACPI_DESC_TYPE_NAMED;
+ acpi_gbl_root_node_struct.descriptor_type = ACPI_DESC_TYPE_NAMED;
acpi_gbl_root_node_struct.type = ACPI_TYPE_DEVICE;
acpi_gbl_root_node_struct.child = NULL;
acpi_gbl_root_node_struct.peer = NULL;
@@ -869,3 +838,6 @@ void acpi_ut_init_globals(void)
return_VOID;
}
+
+ACPI_EXPORT_SYMBOL(acpi_dbg_level)
+ACPI_EXPORT_SYMBOL(acpi_dbg_layer)
diff --git a/drivers/acpi/utilities/utinit.c b/drivers/acpi/utilities/utinit.c
index ba771b4f39b..ff76055eb7d 100644
--- a/drivers/acpi/utilities/utinit.c
+++ b/drivers/acpi/utilities/utinit.c
@@ -50,7 +50,7 @@ ACPI_MODULE_NAME("utinit")
/* Local prototypes */
static void
-acpi_ut_fadt_register_error(char *register_name, u32 value, acpi_size offset);
+acpi_ut_fadt_register_error(char *register_name, u32 value, u8 offset);
static void acpi_ut_terminate(void);
@@ -69,12 +69,12 @@ static void acpi_ut_terminate(void);
******************************************************************************/
static void
-acpi_ut_fadt_register_error(char *register_name, u32 value, acpi_size offset)
+acpi_ut_fadt_register_error(char *register_name, u32 value, u8 offset)
{
ACPI_WARNING((AE_INFO,
"Invalid FADT value %s=%X at offset %X FADT=%p",
- register_name, value, (u32) offset, acpi_gbl_FADT));
+ register_name, value, offset, acpi_gbl_FADT));
}
/******************************************************************************
@@ -176,7 +176,7 @@ static void acpi_ut_terminate(void)
struct acpi_gpe_xrupt_info *gpe_xrupt_info;
struct acpi_gpe_xrupt_info *next_gpe_xrupt_info;
- ACPI_FUNCTION_TRACE("ut_terminate");
+ ACPI_FUNCTION_TRACE(ut_terminate);
/* Free global tables, etc. */
/* Free global GPE blocks and related info structures */
@@ -186,14 +186,14 @@ static void acpi_ut_terminate(void)
gpe_block = gpe_xrupt_info->gpe_block_list_head;
while (gpe_block) {
next_gpe_block = gpe_block->next;
- ACPI_MEM_FREE(gpe_block->event_info);
- ACPI_MEM_FREE(gpe_block->register_info);
- ACPI_MEM_FREE(gpe_block);
+ ACPI_FREE(gpe_block->event_info);
+ ACPI_FREE(gpe_block->register_info);
+ ACPI_FREE(gpe_block);
gpe_block = next_gpe_block;
}
next_gpe_xrupt_info = gpe_xrupt_info->next;
- ACPI_MEM_FREE(gpe_xrupt_info);
+ ACPI_FREE(gpe_xrupt_info);
gpe_xrupt_info = next_gpe_xrupt_info;
}
@@ -216,7 +216,7 @@ static void acpi_ut_terminate(void)
void acpi_ut_subsystem_shutdown(void)
{
- ACPI_FUNCTION_TRACE("ut_subsystem_shutdown");
+ ACPI_FUNCTION_TRACE(ut_subsystem_shutdown);
/* Just exit if subsystem is already shutdown */
@@ -228,6 +228,7 @@ void acpi_ut_subsystem_shutdown(void)
/* Subsystem appears active, go ahead and shut it down */
acpi_gbl_shutdown = TRUE;
+ acpi_gbl_startup_flags = 0;
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Shutting down ACPI Subsystem\n"));
/* Close the acpi_event Handling */
@@ -245,12 +246,5 @@ void acpi_ut_subsystem_shutdown(void)
/* Purge the local caches */
(void)acpi_ut_delete_caches();
-
- /* Debug only - display leftover memory allocation, if any */
-
-#ifdef ACPI_DBG_TRACK_ALLOCATIONS
- acpi_ut_dump_allocations(ACPI_UINT32_MAX, NULL);
-#endif
-
return_VOID;
}
diff --git a/drivers/acpi/utilities/utmath.c b/drivers/acpi/utilities/utmath.c
index 4a3360484e7..19d74bedce2 100644
--- a/drivers/acpi/utilities/utmath.c
+++ b/drivers/acpi/utilities/utmath.c
@@ -77,7 +77,7 @@ acpi_ut_short_divide(acpi_integer dividend,
union uint64_overlay quotient;
u32 remainder32;
- ACPI_FUNCTION_TRACE("ut_short_divide");
+ ACPI_FUNCTION_TRACE(ut_short_divide);
/* Always check for a zero divisor */
@@ -139,7 +139,7 @@ acpi_ut_divide(acpi_integer in_dividend,
union uint64_overlay partial2;
union uint64_overlay partial3;
- ACPI_FUNCTION_TRACE("ut_divide");
+ ACPI_FUNCTION_TRACE(ut_divide);
/* Always check for a zero divisor */
@@ -261,7 +261,7 @@ acpi_ut_short_divide(acpi_integer in_dividend,
acpi_integer * out_quotient, u32 * out_remainder)
{
- ACPI_FUNCTION_TRACE("ut_short_divide");
+ ACPI_FUNCTION_TRACE(ut_short_divide);
/* Always check for a zero divisor */
@@ -287,7 +287,7 @@ acpi_ut_divide(acpi_integer in_dividend,
acpi_integer in_divisor,
acpi_integer * out_quotient, acpi_integer * out_remainder)
{
- ACPI_FUNCTION_TRACE("ut_divide");
+ ACPI_FUNCTION_TRACE(ut_divide);
/* Always check for a zero divisor */
diff --git a/drivers/acpi/utilities/utmisc.c b/drivers/acpi/utilities/utmisc.c
index 7364f5f8c9c..5c75d35ad1c 100644
--- a/drivers/acpi/utilities/utmisc.c
+++ b/drivers/acpi/utilities/utmisc.c
@@ -49,6 +49,33 @@ ACPI_MODULE_NAME("utmisc")
/*******************************************************************************
*
+ * FUNCTION: acpi_ut_is_aml_table
+ *
+ * PARAMETERS: Table - An ACPI table
+ *
+ * RETURN: TRUE if table contains executable AML; FALSE otherwise
+ *
+ * DESCRIPTION: Check ACPI Signature for a table that contains AML code.
+ * Currently, these are DSDT,SSDT,PSDT. All other table types are
+ * data tables that do not contain AML code.
+ *
+ ******************************************************************************/
+u8 acpi_ut_is_aml_table(struct acpi_table_header *table)
+{
+
+ /* Ignore tables that contain AML */
+
+ if (ACPI_COMPARE_NAME(table->signature, DSDT_SIG) ||
+ ACPI_COMPARE_NAME(table->signature, PSDT_SIG) ||
+ ACPI_COMPARE_NAME(table->signature, SSDT_SIG)) {
+ return (TRUE);
+ }
+
+ return (FALSE);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_ut_allocate_owner_id
*
* PARAMETERS: owner_id - Where the new owner ID is returned
@@ -60,6 +87,7 @@ ACPI_MODULE_NAME("utmisc")
* when the method exits or the table is unloaded.
*
******************************************************************************/
+
acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
{
acpi_native_uint i;
@@ -67,7 +95,7 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
acpi_native_uint k;
acpi_status status;
- ACPI_FUNCTION_TRACE("ut_allocate_owner_id");
+ ACPI_FUNCTION_TRACE(ut_allocate_owner_id);
/* Guard against multiple allocations of ID to the same location */
@@ -97,6 +125,7 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
for (k = acpi_gbl_next_owner_id_offset; k < 32; k++) {
if (acpi_gbl_owner_id_mask[j] == ACPI_UINT32_MAX) {
+
/* There are no free IDs in this mask */
break;
@@ -123,7 +152,7 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
(acpi_owner_id) ((k + 1) + ACPI_MUL_32(j));
ACPI_DEBUG_PRINT((ACPI_DB_VALUES,
- "Allocated owner_id: %2.2X\n",
+ "Allocated OwnerId: %2.2X\n",
(unsigned int)*owner_id));
goto exit;
}
@@ -144,7 +173,7 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
*/
status = AE_OWNER_ID_LIMIT;
ACPI_ERROR((AE_INFO,
- "Could not allocate new owner_id (255 max), AE_OWNER_ID_LIMIT"));
+ "Could not allocate new OwnerId (255 max), AE_OWNER_ID_LIMIT"));
exit:
(void)acpi_ut_release_mutex(ACPI_MTX_CACHES);
@@ -172,7 +201,7 @@ void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr)
acpi_native_uint index;
u32 bit;
- ACPI_FUNCTION_TRACE_U32("ut_release_owner_id", owner_id);
+ ACPI_FUNCTION_TRACE_U32(ut_release_owner_id, owner_id);
/* Always clear the input owner_id (zero is an invalid ID) */
@@ -181,7 +210,7 @@ void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr)
/* Zero is not a valid owner_iD */
if (owner_id == 0) {
- ACPI_ERROR((AE_INFO, "Invalid owner_id: %2.2X", owner_id));
+ ACPI_ERROR((AE_INFO, "Invalid OwnerId: %2.2X", owner_id));
return_VOID;
}
@@ -207,7 +236,7 @@ void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr)
acpi_gbl_owner_id_mask[index] ^= bit;
} else {
ACPI_ERROR((AE_INFO,
- "Release of non-allocated owner_id: %2.2X",
+ "Release of non-allocated OwnerId: %2.2X",
owner_id + 1));
}
@@ -273,6 +302,7 @@ void acpi_ut_print_string(char *string, u8 max_length)
acpi_os_printf("\"");
for (i = 0; string[i] && (i < max_length); i++) {
+
/* Escape sequences */
switch (string[i]) {
@@ -461,12 +491,47 @@ acpi_ut_display_init_pathname(u8 type,
}
acpi_os_printf("\n");
- ACPI_MEM_FREE(buffer.pointer);
+ ACPI_FREE(buffer.pointer);
}
#endif
/*******************************************************************************
*
+ * FUNCTION: acpi_ut_valid_acpi_char
+ *
+ * PARAMETERS: Char - The character to be examined
+ *
+ * RETURN: TRUE if the character is valid, FALSE otherwise
+ *
+ * DESCRIPTION: Check for a valid ACPI character. Must be one of:
+ * 1) Upper case alpha
+ * 2) numeric
+ * 3) underscore
+ *
+ * We allow a '!' as the last character because of the ASF! table
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_valid_acpi_char(char character, acpi_native_uint position)
+{
+
+ if (!((character >= 'A' && character <= 'Z') ||
+ (character >= '0' && character <= '9') || (character == '_'))) {
+
+ /* Allow a '!' in the last position */
+
+ if (character == '!' && position == 3) {
+ return (TRUE);
+ }
+
+ return (FALSE);
+ }
+
+ return (TRUE);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_ut_valid_acpi_name
*
* PARAMETERS: Name - The name to be examined
@@ -482,19 +547,13 @@ acpi_ut_display_init_pathname(u8 type,
u8 acpi_ut_valid_acpi_name(u32 name)
{
- char *name_ptr = (char *)&name;
- char character;
acpi_native_uint i;
ACPI_FUNCTION_ENTRY();
for (i = 0; i < ACPI_NAME_SIZE; i++) {
- character = *name_ptr;
- name_ptr++;
-
- if (!((character == '_') ||
- (character >= 'A' && character <= 'Z') ||
- (character >= '0' && character <= '9'))) {
+ if (!acpi_ut_valid_acpi_char
+ ((ACPI_CAST_PTR(char, &name))[i], i)) {
return (FALSE);
}
}
@@ -504,24 +563,37 @@ u8 acpi_ut_valid_acpi_name(u32 name)
/*******************************************************************************
*
- * FUNCTION: acpi_ut_valid_acpi_character
+ * FUNCTION: acpi_ut_repair_name
*
- * PARAMETERS: Character - The character to be examined
+ * PARAMETERS: Name - The ACPI name to be repaired
*
- * RETURN: 1 if Character may appear in a name, else 0
+ * RETURN: Repaired version of the name
*
- * DESCRIPTION: Check for a printable character
+ * DESCRIPTION: Repair an ACPI name: Change invalid characters to '*' and
+ * return the new name.
*
******************************************************************************/
-u8 acpi_ut_valid_acpi_character(char character)
+acpi_name acpi_ut_repair_name(acpi_name name)
{
+ char *name_ptr = ACPI_CAST_PTR(char, &name);
+ char new_name[ACPI_NAME_SIZE];
+ acpi_native_uint i;
- ACPI_FUNCTION_ENTRY();
+ for (i = 0; i < ACPI_NAME_SIZE; i++) {
+ new_name[i] = name_ptr[i];
- return ((u8) ((character == '_') ||
- (character >= 'A' && character <= 'Z') ||
- (character >= '0' && character <= '9')));
+ /*
+ * Replace a bad character with something printable, yet technically
+ * still invalid. This prevents any collisions with existing "good"
+ * names in the namespace.
+ */
+ if (!acpi_ut_valid_acpi_char(name_ptr[i], i)) {
+ new_name[i] = '*';
+ }
+ }
+
+ return (*ACPI_CAST_PTR(u32, new_name));
}
/*******************************************************************************
@@ -529,7 +601,8 @@ u8 acpi_ut_valid_acpi_character(char character)
* FUNCTION: acpi_ut_strtoul64
*
* PARAMETERS: String - Null terminated string
- * Base - Radix of the string: 10, 16, or ACPI_ANY_BASE
+ * Base - Radix of the string: 16 or ACPI_ANY_BASE;
+ * ACPI_ANY_BASE means 'in behalf of to_integer'
* ret_integer - Where the converted integer is returned
*
* RETURN: Status and Converted value
@@ -545,16 +618,17 @@ acpi_ut_strtoul64(char *string, u32 base, acpi_integer * ret_integer)
u32 this_digit = 0;
acpi_integer return_value = 0;
acpi_integer quotient;
+ acpi_integer dividend;
+ u32 to_integer_op = (base == ACPI_ANY_BASE);
+ u32 mode32 = (acpi_gbl_integer_byte_width == 4);
+ u8 valid_digits = 0;
+ u8 sign_of0x = 0;
+ u8 term = 0;
- ACPI_FUNCTION_TRACE("ut_stroul64");
-
- if ((!string) || !(*string)) {
- goto error_exit;
- }
+ ACPI_FUNCTION_TRACE(ut_stroul64);
switch (base) {
case ACPI_ANY_BASE:
- case 10:
case 16:
break;
@@ -563,76 +637,110 @@ acpi_ut_strtoul64(char *string, u32 base, acpi_integer * ret_integer)
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
+ if (!string) {
+ goto error_exit;
+ }
+
/* Skip over any white space in the buffer */
- while (ACPI_IS_SPACE(*string) || *string == '\t') {
+ while ((*string) && (ACPI_IS_SPACE(*string) || *string == '\t')) {
string++;
}
- /*
- * If the input parameter Base is zero, then we need to
- * determine if it is decimal or hexadecimal:
- */
- if (base == 0) {
+ if (to_integer_op) {
+ /*
+ * Base equal to ACPI_ANY_BASE means 'to_integer operation case'.
+ * We need to determine if it is decimal or hexadecimal.
+ */
if ((*string == '0') && (ACPI_TOLOWER(*(string + 1)) == 'x')) {
+ sign_of0x = 1;
base = 16;
+
+ /* Skip over the leading '0x' */
string += 2;
} else {
base = 10;
}
}
- /*
- * For hexadecimal base, skip over the leading
- * 0 or 0x, if they are present.
- */
- if ((base == 16) &&
- (*string == '0') && (ACPI_TOLOWER(*(string + 1)) == 'x')) {
- string += 2;
+ /* Any string left? Check that '0x' is not followed by white space. */
+
+ if (!(*string) || ACPI_IS_SPACE(*string) || *string == '\t') {
+ if (to_integer_op) {
+ goto error_exit;
+ } else {
+ goto all_done;
+ }
}
- /* Any string left? */
+ dividend = (mode32) ? ACPI_UINT32_MAX : ACPI_UINT64_MAX;
- if (!(*string)) {
- goto error_exit;
- }
+ /* At least one character in the string here */
/* Main loop: convert the string to a 64-bit integer */
while (*string) {
if (ACPI_IS_DIGIT(*string)) {
+
/* Convert ASCII 0-9 to Decimal value */
this_digit = ((u8) * string) - '0';
- } else {
- if (base == 10) {
- /* Digit is out of range */
+ } else if (base == 10) {
- goto error_exit;
- }
+ /* Digit is out of range; possible in to_integer case only */
+ term = 1;
+ } else {
this_digit = (u8) ACPI_TOUPPER(*string);
if (ACPI_IS_XDIGIT((char)this_digit)) {
+
/* Convert ASCII Hex char to value */
this_digit = this_digit - 'A' + 10;
} else {
- /*
- * We allow non-hex chars, just stop now, same as end-of-string.
- * See ACPI spec, string-to-integer conversion.
- */
+ term = 1;
+ }
+ }
+
+ if (term) {
+ if (to_integer_op) {
+ goto error_exit;
+ } else {
break;
}
+ } else if ((valid_digits == 0) && (this_digit == 0)
+ && !sign_of0x) {
+
+ /* Skip zeros */
+ string++;
+ continue;
+ }
+
+ valid_digits++;
+
+ if (sign_of0x
+ && ((valid_digits > 16)
+ || ((valid_digits > 8) && mode32))) {
+ /*
+ * This is to_integer operation case.
+ * No any restrictions for string-to-integer conversion,
+ * see ACPI spec.
+ */
+ goto error_exit;
}
/* Divide the digit into the correct position */
(void)
- acpi_ut_short_divide((ACPI_INTEGER_MAX -
- (acpi_integer) this_digit), base,
- &quotient, NULL);
+ acpi_ut_short_divide((dividend - (acpi_integer) this_digit),
+ base, &quotient, NULL);
+
if (return_value > quotient) {
- goto error_exit;
+ if (to_integer_op) {
+ goto error_exit;
+ } else {
+ break;
+ }
}
return_value *= base;
@@ -642,6 +750,8 @@ acpi_ut_strtoul64(char *string, u32 base, acpi_integer * ret_integer)
/* All done, normal exit */
+ all_done:
+
*ret_integer = return_value;
return_ACPI_STATUS(AE_OK);
@@ -719,7 +829,7 @@ acpi_ut_walk_package_tree(union acpi_operand_object * source_object,
u32 this_index;
union acpi_operand_object *this_source_obj;
- ACPI_FUNCTION_TRACE("ut_walk_package_tree");
+ ACPI_FUNCTION_TRACE(ut_walk_package_tree);
state = acpi_ut_create_pkg_state(source_object, target_object, 0);
if (!state) {
@@ -727,6 +837,7 @@ acpi_ut_walk_package_tree(union acpi_operand_object * source_object,
}
while (state) {
+
/* Get one element of the package */
this_index = state->pkg.index;
@@ -814,31 +925,6 @@ acpi_ut_walk_package_tree(union acpi_operand_object * source_object,
/*******************************************************************************
*
- * FUNCTION: acpi_ut_generate_checksum
- *
- * PARAMETERS: Buffer - Buffer to be scanned
- * Length - number of bytes to examine
- *
- * RETURN: The generated checksum
- *
- * DESCRIPTION: Generate a checksum on a raw buffer
- *
- ******************************************************************************/
-
-u8 acpi_ut_generate_checksum(u8 * buffer, u32 length)
-{
- u32 i;
- signed char sum = 0;
-
- for (i = 0; i < length; i++) {
- sum = (signed char)(sum + buffer[i]);
- }
-
- return ((u8) (0 - sum));
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ut_error, acpi_ut_warning, acpi_ut_info
*
* PARAMETERS: module_name - Caller's module name (for error output)
@@ -900,36 +986,3 @@ acpi_ut_info(char *module_name, u32 line_number, char *format, ...)
acpi_os_vprintf(format, args);
acpi_os_printf(" [%X]\n", ACPI_CA_VERSION);
}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_report_error, Warning, Info
- *
- * PARAMETERS: module_name - Caller's module name (for error output)
- * line_number - Caller's line number (for error output)
- *
- * RETURN: None
- *
- * DESCRIPTION: Print error message
- *
- * Note: Legacy only, should be removed when no longer used by drivers.
- *
- ******************************************************************************/
-
-void acpi_ut_report_error(char *module_name, u32 line_number)
-{
-
- acpi_os_printf("ACPI Error (%s-%04d): ", module_name, line_number);
-}
-
-void acpi_ut_report_warning(char *module_name, u32 line_number)
-{
-
- acpi_os_printf("ACPI Warning (%s-%04d): ", module_name, line_number);
-}
-
-void acpi_ut_report_info(char *module_name, u32 line_number)
-{
-
- acpi_os_printf("ACPI (%s-%04d): ", module_name, line_number);
-}
diff --git a/drivers/acpi/utilities/utmutex.c b/drivers/acpi/utilities/utmutex.c
index 45a7244df92..25eb34369af 100644
--- a/drivers/acpi/utilities/utmutex.c
+++ b/drivers/acpi/utilities/utmutex.c
@@ -68,19 +68,26 @@ acpi_status acpi_ut_mutex_initialize(void)
u32 i;
acpi_status status;
- ACPI_FUNCTION_TRACE("ut_mutex_initialize");
+ ACPI_FUNCTION_TRACE(ut_mutex_initialize);
/*
* Create each of the predefined mutex objects
*/
- for (i = 0; i < NUM_MUTEX; i++) {
+ for (i = 0; i < ACPI_NUM_MUTEX; i++) {
status = acpi_ut_create_mutex(i);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
+ /* Create the spinlocks for use at interrupt level */
+
status = acpi_os_create_lock(&acpi_gbl_gpe_lock);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ status = acpi_os_create_lock(&acpi_gbl_hardware_lock);
return_ACPI_STATUS(status);
}
@@ -100,16 +107,19 @@ void acpi_ut_mutex_terminate(void)
{
u32 i;
- ACPI_FUNCTION_TRACE("ut_mutex_terminate");
+ ACPI_FUNCTION_TRACE(ut_mutex_terminate);
/*
* Delete each predefined mutex object
*/
- for (i = 0; i < NUM_MUTEX; i++) {
+ for (i = 0; i < ACPI_NUM_MUTEX; i++) {
(void)acpi_ut_delete_mutex(i);
}
+ /* Delete the spinlocks */
+
acpi_os_delete_lock(acpi_gbl_gpe_lock);
+ acpi_os_delete_lock(acpi_gbl_hardware_lock);
return_VOID;
}
@@ -129,9 +139,9 @@ static acpi_status acpi_ut_create_mutex(acpi_mutex_handle mutex_id)
{
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE_U32("ut_create_mutex", mutex_id);
+ ACPI_FUNCTION_TRACE_U32(ut_create_mutex, mutex_id);
- if (mutex_id > MAX_MUTEX) {
+ if (mutex_id > ACPI_MAX_MUTEX) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
@@ -163,9 +173,9 @@ static acpi_status acpi_ut_delete_mutex(acpi_mutex_handle mutex_id)
{
acpi_status status;
- ACPI_FUNCTION_TRACE_U32("ut_delete_mutex", mutex_id);
+ ACPI_FUNCTION_TRACE_U32(ut_delete_mutex, mutex_id);
- if (mutex_id > MAX_MUTEX) {
+ if (mutex_id > ACPI_MAX_MUTEX) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
@@ -192,11 +202,11 @@ static acpi_status acpi_ut_delete_mutex(acpi_mutex_handle mutex_id)
acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
{
acpi_status status;
- u32 this_thread_id;
+ acpi_thread_id this_thread_id;
- ACPI_FUNCTION_NAME("ut_acquire_mutex");
+ ACPI_FUNCTION_NAME(ut_acquire_mutex);
- if (mutex_id > MAX_MUTEX) {
+ if (mutex_id > ACPI_MAX_MUTEX) {
return (AE_BAD_PARAMETER);
}
@@ -213,7 +223,7 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
* the mutex ordering rule. This indicates a coding error somewhere in
* the ACPI subsystem code.
*/
- for (i = mutex_id; i < MAX_MUTEX; i++) {
+ for (i = mutex_id; i < ACPI_MAX_MUTEX; i++) {
if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) {
if (i == mutex_id) {
ACPI_ERROR((AE_INFO,
@@ -275,16 +285,16 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
{
acpi_status status;
- u32 this_thread_id;
+ acpi_thread_id this_thread_id;
- ACPI_FUNCTION_NAME("ut_release_mutex");
+ ACPI_FUNCTION_NAME(ut_release_mutex);
this_thread_id = acpi_os_get_thread_id();
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
"Thread %X releasing Mutex [%s]\n", this_thread_id,
acpi_ut_get_mutex_name(mutex_id)));
- if (mutex_id > MAX_MUTEX) {
+ if (mutex_id > ACPI_MAX_MUTEX) {
return (AE_BAD_PARAMETER);
}
@@ -309,7 +319,7 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
* ordering rule. This indicates a coding error somewhere in
* the ACPI subsystem code.
*/
- for (i = mutex_id; i < MAX_MUTEX; i++) {
+ for (i = mutex_id; i < ACPI_MAX_MUTEX; i++) {
if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) {
if (i == mutex_id) {
continue;
diff --git a/drivers/acpi/utilities/utobject.c b/drivers/acpi/utilities/utobject.c
index 7ee2d1d9807..ba7d8ac702d 100644
--- a/drivers/acpi/utilities/utobject.c
+++ b/drivers/acpi/utilities/utobject.c
@@ -92,7 +92,7 @@ union acpi_operand_object *acpi_ut_create_internal_object_dbg(char *module_name,
union acpi_operand_object *object;
union acpi_operand_object *second_object;
- ACPI_FUNCTION_TRACE_STR("ut_create_internal_object_dbg",
+ ACPI_FUNCTION_TRACE_STR(ut_create_internal_object_dbg,
acpi_ut_get_type_name(type));
/* Allocate the raw object descriptor */
@@ -161,7 +161,7 @@ union acpi_operand_object *acpi_ut_create_buffer_object(acpi_size buffer_size)
union acpi_operand_object *buffer_desc;
u8 *buffer = NULL;
- ACPI_FUNCTION_TRACE_U32("ut_create_buffer_object", buffer_size);
+ ACPI_FUNCTION_TRACE_U32(ut_create_buffer_object, buffer_size);
/* Create a new Buffer object */
@@ -173,9 +173,10 @@ union acpi_operand_object *acpi_ut_create_buffer_object(acpi_size buffer_size)
/* Create an actual buffer only if size > 0 */
if (buffer_size > 0) {
+
/* Allocate the actual buffer */
- buffer = ACPI_MEM_CALLOCATE(buffer_size);
+ buffer = ACPI_ALLOCATE_ZEROED(buffer_size);
if (!buffer) {
ACPI_ERROR((AE_INFO, "Could not allocate size %X",
(u32) buffer_size));
@@ -214,7 +215,7 @@ union acpi_operand_object *acpi_ut_create_string_object(acpi_size string_size)
union acpi_operand_object *string_desc;
char *string;
- ACPI_FUNCTION_TRACE_U32("ut_create_string_object", string_size);
+ ACPI_FUNCTION_TRACE_U32(ut_create_string_object, string_size);
/* Create a new String object */
@@ -227,7 +228,7 @@ union acpi_operand_object *acpi_ut_create_string_object(acpi_size string_size)
* Allocate the actual string buffer -- (Size + 1) for NULL terminator.
* NOTE: Zero-length strings are NULL terminated
*/
- string = ACPI_MEM_CALLOCATE(string_size + 1);
+ string = ACPI_ALLOCATE_ZEROED(string_size + 1);
if (!string) {
ACPI_ERROR((AE_INFO, "Could not allocate size %X",
(u32) string_size));
@@ -260,7 +261,7 @@ union acpi_operand_object *acpi_ut_create_string_object(acpi_size string_size)
u8 acpi_ut_valid_internal_object(void *object)
{
- ACPI_FUNCTION_NAME("ut_valid_internal_object");
+ ACPI_FUNCTION_NAME(ut_valid_internal_object);
/* Check for a null pointer */
@@ -308,7 +309,7 @@ void *acpi_ut_allocate_object_desc_dbg(char *module_name,
{
union acpi_operand_object *object;
- ACPI_FUNCTION_TRACE("ut_allocate_object_desc_dbg");
+ ACPI_FUNCTION_TRACE(ut_allocate_object_desc_dbg);
object = acpi_os_acquire_object(acpi_gbl_operand_cache);
if (!object) {
@@ -319,6 +320,7 @@ void *acpi_ut_allocate_object_desc_dbg(char *module_name,
}
/* Mark the descriptor type */
+
memset(object, 0, sizeof(union acpi_operand_object));
ACPI_SET_DESCRIPTOR_TYPE(object, ACPI_DESC_TYPE_OPERAND);
@@ -342,7 +344,7 @@ void *acpi_ut_allocate_object_desc_dbg(char *module_name,
void acpi_ut_delete_object_desc(union acpi_operand_object *object)
{
- ACPI_FUNCTION_TRACE_PTR("ut_delete_object_desc", object);
+ ACPI_FUNCTION_TRACE_PTR(ut_delete_object_desc, object);
/* Object must be an union acpi_operand_object */
@@ -381,7 +383,7 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
acpi_size length;
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE_PTR("ut_get_simple_object_size", internal_object);
+ ACPI_FUNCTION_TRACE_PTR(ut_get_simple_object_size, internal_object);
/*
* Handle a null object (Could be a uninitialized package
@@ -397,6 +399,7 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
length = sizeof(union acpi_object);
if (ACPI_GET_DESCRIPTOR_TYPE(internal_object) == ACPI_DESC_TYPE_NAMED) {
+
/* Object is a named object (reference), just return the length */
*obj_length = ACPI_ROUND_UP_TO_NATIVE_WORD(length);
@@ -559,7 +562,7 @@ acpi_ut_get_package_object_size(union acpi_operand_object *internal_object,
acpi_status status;
struct acpi_pkg_info info;
- ACPI_FUNCTION_TRACE_PTR("ut_get_package_object_size", internal_object);
+ ACPI_FUNCTION_TRACE_PTR(ut_get_package_object_size, internal_object);
info.length = 0;
info.object_space = 0;
diff --git a/drivers/acpi/utilities/utresrc.c b/drivers/acpi/utilities/utresrc.c
index 16461317113..5a2de92831d 100644
--- a/drivers/acpi/utilities/utresrc.c
+++ b/drivers/acpi/utilities/utresrc.c
@@ -45,113 +45,113 @@
#include <acpi/amlresrc.h>
#define _COMPONENT ACPI_UTILITIES
-ACPI_MODULE_NAME("utmisc")
+ACPI_MODULE_NAME("utresrc")
#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUGGER)
/*
* Strings used to decode resource descriptors.
* Used by both the disasssembler and the debugger resource dump routines
*/
-const char *acpi_gbl_BMdecode[2] = {
- "not_bus_master",
- "bus_master"
+const char *acpi_gbl_bm_decode[] = {
+ "NotBusMaster",
+ "BusMaster"
};
-const char *acpi_gbl_config_decode[4] = {
+const char *acpi_gbl_config_decode[] = {
"0 - Good Configuration",
"1 - Acceptable Configuration",
"2 - Suboptimal Configuration",
"3 - ***Invalid Configuration***",
};
-const char *acpi_gbl_consume_decode[2] = {
- "resource_producer",
- "resource_consumer"
+const char *acpi_gbl_consume_decode[] = {
+ "ResourceProducer",
+ "ResourceConsumer"
};
-const char *acpi_gbl_DECdecode[2] = {
- "pos_decode",
- "sub_decode"
+const char *acpi_gbl_dec_decode[] = {
+ "PosDecode",
+ "SubDecode"
};
-const char *acpi_gbl_HEdecode[2] = {
+const char *acpi_gbl_he_decode[] = {
"Level",
"Edge"
};
-const char *acpi_gbl_io_decode[2] = {
+const char *acpi_gbl_io_decode[] = {
"Decode10",
"Decode16"
};
-const char *acpi_gbl_LLdecode[2] = {
- "active_high",
- "active_low"
+const char *acpi_gbl_ll_decode[] = {
+ "ActiveHigh",
+ "ActiveLow"
};
-const char *acpi_gbl_max_decode[2] = {
- "max_not_fixed",
- "max_fixed"
+const char *acpi_gbl_max_decode[] = {
+ "MaxNotFixed",
+ "MaxFixed"
};
-const char *acpi_gbl_MEMdecode[4] = {
- "non_cacheable",
+const char *acpi_gbl_mem_decode[] = {
+ "NonCacheable",
"Cacheable",
- "write_combining",
+ "WriteCombining",
"Prefetchable"
};
-const char *acpi_gbl_min_decode[2] = {
- "min_not_fixed",
- "min_fixed"
+const char *acpi_gbl_min_decode[] = {
+ "MinNotFixed",
+ "MinFixed"
};
-const char *acpi_gbl_MTPdecode[4] = {
- "address_range_memory",
- "address_range_reserved",
- "address_range_aCPI",
- "address_range_nVS"
+const char *acpi_gbl_mtp_decode[] = {
+ "AddressRangeMemory",
+ "AddressRangeReserved",
+ "AddressRangeACPI",
+ "AddressRangeNVS"
};
-const char *acpi_gbl_RNGdecode[4] = {
- "invalid_ranges",
- "non_iSAonly_ranges",
- "ISAonly_ranges",
- "entire_range"
+const char *acpi_gbl_rng_decode[] = {
+ "InvalidRanges",
+ "NonISAOnlyRanges",
+ "ISAOnlyRanges",
+ "EntireRange"
};
-const char *acpi_gbl_RWdecode[2] = {
- "read_only",
- "read_write"
+const char *acpi_gbl_rw_decode[] = {
+ "ReadOnly",
+ "ReadWrite"
};
-const char *acpi_gbl_SHRdecode[2] = {
+const char *acpi_gbl_shr_decode[] = {
"Exclusive",
"Shared"
};
-const char *acpi_gbl_SIZdecode[4] = {
+const char *acpi_gbl_siz_decode[] = {
"Transfer8",
"Transfer8_16",
"Transfer16",
- "invalid_size"
+ "InvalidSize"
};
-const char *acpi_gbl_TRSdecode[2] = {
- "dense_translation",
- "sparse_translation"
+const char *acpi_gbl_trs_decode[] = {
+ "DenseTranslation",
+ "SparseTranslation"
};
-const char *acpi_gbl_TTPdecode[2] = {
- "type_static",
- "type_translation"
+const char *acpi_gbl_ttp_decode[] = {
+ "TypeStatic",
+ "TypeTranslation"
};
-const char *acpi_gbl_TYPdecode[4] = {
+const char *acpi_gbl_typ_decode[] = {
"Compatibility",
- "type_a",
- "type_b",
- "type_f"
+ "TypeA",
+ "TypeB",
+ "TypeF"
};
#endif
@@ -240,6 +240,104 @@ static const u8 acpi_gbl_resource_types[] = {
/*******************************************************************************
*
+ * FUNCTION: acpi_ut_walk_aml_resources
+ *
+ * PARAMETERS: Aml - Pointer to the raw AML resource template
+ * aml_length - Length of the entire template
+ * user_function - Called once for each descriptor found. If
+ * NULL, a pointer to the end_tag is returned
+ * Context - Passed to user_function
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Walk a raw AML resource list(buffer). User function called
+ * once for each resource found.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_walk_aml_resources(u8 * aml,
+ acpi_size aml_length,
+ acpi_walk_aml_callback user_function, void **context)
+{
+ acpi_status status;
+ u8 *end_aml;
+ u8 resource_index;
+ u32 length;
+ u32 offset = 0;
+
+ ACPI_FUNCTION_TRACE(ut_walk_aml_resources);
+
+ /* The absolute minimum resource template is one end_tag descriptor */
+
+ if (aml_length < sizeof(struct aml_resource_end_tag)) {
+ return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
+ }
+
+ /* Point to the end of the resource template buffer */
+
+ end_aml = aml + aml_length;
+
+ /* Walk the byte list, abort on any invalid descriptor type or length */
+
+ while (aml < end_aml) {
+
+ /* Validate the Resource Type and Resource Length */
+
+ status = acpi_ut_validate_resource(aml, &resource_index);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Get the length of this descriptor */
+
+ length = acpi_ut_get_descriptor_length(aml);
+
+ /* Invoke the user function */
+
+ if (user_function) {
+ status =
+ user_function(aml, length, offset, resource_index,
+ context);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ }
+
+ /* An end_tag descriptor terminates this resource template */
+
+ if (acpi_ut_get_resource_type(aml) ==
+ ACPI_RESOURCE_NAME_END_TAG) {
+ /*
+ * There must be at least one more byte in the buffer for
+ * the 2nd byte of the end_tag
+ */
+ if ((aml + 1) >= end_aml) {
+ return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
+ }
+
+ /* Return the pointer to the end_tag if requested */
+
+ if (!user_function) {
+ *context = aml;
+ }
+
+ /* Normal exit */
+
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ aml += length;
+ offset += length;
+ }
+
+ /* Did not find an end_tag descriptor */
+
+ return (AE_AML_NO_RESOURCE_END_TAG);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_ut_validate_resource
*
* PARAMETERS: Aml - Pointer to the raw AML resource descriptor
@@ -273,6 +371,7 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
* Examine the large/small bit in the resource header
*/
if (resource_type & ACPI_RESOURCE_NAME_LARGE) {
+
/* Verify the large resource type (name) against the max */
if (resource_type > ACPI_RESOURCE_NAME_LARGE_MAX) {
@@ -376,6 +475,7 @@ u8 acpi_ut_get_resource_type(void *aml)
* Examine the large/small bit in the resource header
*/
if (ACPI_GET8(aml) & ACPI_RESOURCE_NAME_LARGE) {
+
/* Large Resource Type -- bits 6:0 contain the name */
return (ACPI_GET8(aml));
@@ -411,6 +511,7 @@ u16 acpi_ut_get_resource_length(void *aml)
* Examine the large/small bit in the resource header
*/
if (ACPI_GET8(aml) & ACPI_RESOURCE_NAME_LARGE) {
+
/* Large Resource type -- bytes 1-2 contain the 16-bit length */
ACPI_MOVE_16_TO_16(&resource_length, ACPI_ADD_PTR(u8, aml, 1));
@@ -495,60 +596,21 @@ acpi_ut_get_resource_end_tag(union acpi_operand_object * obj_desc,
u8 ** end_tag)
{
acpi_status status;
- u8 *aml;
- u8 *end_aml;
-
- ACPI_FUNCTION_TRACE("ut_get_resource_end_tag");
- /* Get start and end pointers */
-
- aml = obj_desc->buffer.pointer;
- end_aml = aml + obj_desc->buffer.length;
+ ACPI_FUNCTION_TRACE(ut_get_resource_end_tag);
/* Allow a buffer length of zero */
if (!obj_desc->buffer.length) {
- *end_tag = aml;
+ *end_tag = obj_desc->buffer.pointer;
return_ACPI_STATUS(AE_OK);
}
- /* Walk the resource template, one descriptor per iteration */
-
- while (aml < end_aml) {
- /* Validate the Resource Type and Resource Length */
-
- status = acpi_ut_validate_resource(aml, NULL);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /* end_tag resource indicates the end of the resource template */
-
- if (acpi_ut_get_resource_type(aml) ==
- ACPI_RESOURCE_NAME_END_TAG) {
- /*
- * There must be at least one more byte in the buffer for
- * the 2nd byte of the end_tag
- */
- if ((aml + 1) >= end_aml) {
- return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
- }
-
- /* Return the pointer to the end_tag */
-
- *end_tag = aml;
- return_ACPI_STATUS(AE_OK);
- }
-
- /*
- * Point to the next resource descriptor in the AML buffer. The
- * descriptor length is guaranteed to be non-zero by resource
- * validation above.
- */
- aml += acpi_ut_get_descriptor_length(aml);
- }
+ /* Validate the template and get a pointer to the end_tag */
- /* Did not find an end_tag resource descriptor */
+ status = acpi_ut_walk_aml_resources(obj_desc->buffer.pointer,
+ obj_desc->buffer.length, NULL,
+ (void **)end_tag);
- return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
+ return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/utilities/utstate.c b/drivers/acpi/utilities/utstate.c
index 4b134a72290..0f5c5bb5def 100644
--- a/drivers/acpi/utilities/utstate.c
+++ b/drivers/acpi/utilities/utstate.c
@@ -96,7 +96,7 @@ void
acpi_ut_push_generic_state(union acpi_generic_state **list_head,
union acpi_generic_state *state)
{
- ACPI_FUNCTION_TRACE("ut_push_generic_state");
+ ACPI_FUNCTION_TRACE(ut_push_generic_state);
/* Push the state object onto the front of the list (stack) */
@@ -123,12 +123,13 @@ union acpi_generic_state *acpi_ut_pop_generic_state(union acpi_generic_state
{
union acpi_generic_state *state;
- ACPI_FUNCTION_TRACE("ut_pop_generic_state");
+ ACPI_FUNCTION_TRACE(ut_pop_generic_state);
/* Remove the state object at the head of the list (stack) */
state = *list_head;
if (state) {
+
/* Update the list head */
*list_head = state->common.next;
@@ -158,9 +159,10 @@ union acpi_generic_state *acpi_ut_create_generic_state(void)
state = acpi_os_acquire_object(acpi_gbl_state_cache);
if (state) {
+
/* Initialize */
memset(state, 0, sizeof(union acpi_generic_state));
- state->common.data_type = ACPI_DESC_TYPE_STATE;
+ state->common.descriptor_type = ACPI_DESC_TYPE_STATE;
}
return (state);
@@ -183,7 +185,7 @@ struct acpi_thread_state *acpi_ut_create_thread_state(void)
{
union acpi_generic_state *state;
- ACPI_FUNCTION_TRACE("ut_create_thread_state");
+ ACPI_FUNCTION_TRACE(ut_create_thread_state);
/* Create the generic state object */
@@ -194,7 +196,7 @@ struct acpi_thread_state *acpi_ut_create_thread_state(void)
/* Init fields specific to the update struct */
- state->common.data_type = ACPI_DESC_TYPE_STATE_THREAD;
+ state->common.descriptor_type = ACPI_DESC_TYPE_STATE_THREAD;
state->thread.thread_id = acpi_os_get_thread_id();
return_PTR((struct acpi_thread_state *)state);
@@ -220,7 +222,7 @@ union acpi_generic_state *acpi_ut_create_update_state(union acpi_operand_object
{
union acpi_generic_state *state;
- ACPI_FUNCTION_TRACE_PTR("ut_create_update_state", object);
+ ACPI_FUNCTION_TRACE_PTR(ut_create_update_state, object);
/* Create the generic state object */
@@ -231,7 +233,7 @@ union acpi_generic_state *acpi_ut_create_update_state(union acpi_operand_object
/* Init fields specific to the update struct */
- state->common.data_type = ACPI_DESC_TYPE_STATE_UPDATE;
+ state->common.descriptor_type = ACPI_DESC_TYPE_STATE_UPDATE;
state->update.object = object;
state->update.value = action;
@@ -257,7 +259,7 @@ union acpi_generic_state *acpi_ut_create_pkg_state(void *internal_object,
{
union acpi_generic_state *state;
- ACPI_FUNCTION_TRACE_PTR("ut_create_pkg_state", internal_object);
+ ACPI_FUNCTION_TRACE_PTR(ut_create_pkg_state, internal_object);
/* Create the generic state object */
@@ -268,7 +270,7 @@ union acpi_generic_state *acpi_ut_create_pkg_state(void *internal_object,
/* Init fields specific to the update struct */
- state->common.data_type = ACPI_DESC_TYPE_STATE_PACKAGE;
+ state->common.descriptor_type = ACPI_DESC_TYPE_STATE_PACKAGE;
state->pkg.source_object = (union acpi_operand_object *)internal_object;
state->pkg.dest_object = external_object;
state->pkg.index = index;
@@ -294,7 +296,7 @@ union acpi_generic_state *acpi_ut_create_control_state(void)
{
union acpi_generic_state *state;
- ACPI_FUNCTION_TRACE("ut_create_control_state");
+ ACPI_FUNCTION_TRACE(ut_create_control_state);
/* Create the generic state object */
@@ -305,7 +307,7 @@ union acpi_generic_state *acpi_ut_create_control_state(void)
/* Init fields specific to the control struct */
- state->common.data_type = ACPI_DESC_TYPE_STATE_CONTROL;
+ state->common.descriptor_type = ACPI_DESC_TYPE_STATE_CONTROL;
state->common.state = ACPI_CONTROL_CONDITIONAL_EXECUTING;
return_PTR(state);
@@ -319,15 +321,19 @@ union acpi_generic_state *acpi_ut_create_control_state(void)
*
* RETURN: None
*
- * DESCRIPTION: Put a state object back into the global state cache. The object
- * is not actually freed at this time.
+ * DESCRIPTION: Release a state object to the state cache. NULL state objects
+ * are ignored.
*
******************************************************************************/
void acpi_ut_delete_generic_state(union acpi_generic_state *state)
{
- ACPI_FUNCTION_TRACE("ut_delete_generic_state");
+ ACPI_FUNCTION_TRACE(ut_delete_generic_state);
+
+ /* Ignore null state */
- (void)acpi_os_release_object(acpi_gbl_state_cache, state);
+ if (state) {
+ (void)acpi_os_release_object(acpi_gbl_state_cache, state);
+ }
return_VOID;
}
diff --git a/drivers/acpi/utilities/utxface.c b/drivers/acpi/utilities/utxface.c
index 308a960871b..3538f69c82a 100644
--- a/drivers/acpi/utilities/utxface.c
+++ b/drivers/acpi/utilities/utxface.c
@@ -41,8 +41,6 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
-#include <linux/module.h>
-
#include <acpi/acpi.h>
#include <acpi/acevents.h>
#include <acpi/acnamesp.h>
@@ -67,7 +65,7 @@ acpi_status acpi_initialize_subsystem(void)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("acpi_initialize_subsystem");
+ ACPI_FUNCTION_TRACE(acpi_initialize_subsystem);
ACPI_DEBUG_EXEC(acpi_ut_init_stack_ptr_trace());
@@ -109,6 +107,8 @@ acpi_status acpi_initialize_subsystem(void)
return_ACPI_STATUS(status);
}
+ACPI_EXPORT_SYMBOL(acpi_initialize_subsystem)
+
/*******************************************************************************
*
* FUNCTION: acpi_enable_subsystem
@@ -121,12 +121,11 @@ acpi_status acpi_initialize_subsystem(void)
* Puts system into ACPI mode if it isn't already.
*
******************************************************************************/
-
acpi_status acpi_enable_subsystem(u32 flags)
{
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("acpi_enable_subsystem");
+ ACPI_FUNCTION_TRACE(acpi_enable_subsystem);
/*
* We must initialize the hardware before we can enable ACPI.
@@ -152,7 +151,7 @@ acpi_status acpi_enable_subsystem(u32 flags)
status = acpi_enable();
if (ACPI_FAILURE(status)) {
- ACPI_WARNING((AE_INFO, "acpi_enable failed"));
+ ACPI_WARNING((AE_INFO, "AcpiEnable failed"));
return_ACPI_STATUS(status);
}
}
@@ -229,6 +228,8 @@ acpi_status acpi_enable_subsystem(u32 flags)
return_ACPI_STATUS(status);
}
+ACPI_EXPORT_SYMBOL(acpi_enable_subsystem)
+
/*******************************************************************************
*
* FUNCTION: acpi_initialize_objects
@@ -241,12 +242,11 @@ acpi_status acpi_enable_subsystem(u32 flags)
* objects and executing AML code for Regions, buffers, etc.
*
******************************************************************************/
-
acpi_status acpi_initialize_objects(u32 flags)
{
acpi_status status = AE_OK;
- ACPI_FUNCTION_TRACE("acpi_initialize_objects");
+ ACPI_FUNCTION_TRACE(acpi_initialize_objects);
/*
* Run all _REG methods
@@ -257,7 +257,7 @@ acpi_status acpi_initialize_objects(u32 flags)
*/
if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "[Init] Executing _REG op_region methods\n"));
+ "[Init] Executing _REG OpRegion methods\n"));
status = acpi_ev_initialize_op_regions();
if (ACPI_FAILURE(status)) {
@@ -305,6 +305,8 @@ acpi_status acpi_initialize_objects(u32 flags)
return_ACPI_STATUS(status);
}
+ACPI_EXPORT_SYMBOL(acpi_initialize_objects)
+
/*******************************************************************************
*
* FUNCTION: acpi_terminate
@@ -316,12 +318,11 @@ acpi_status acpi_initialize_objects(u32 flags)
* DESCRIPTION: Shutdown the ACPI subsystem. Release all resources.
*
******************************************************************************/
-
acpi_status acpi_terminate(void)
{
acpi_status status;
- ACPI_FUNCTION_TRACE("acpi_terminate");
+ ACPI_FUNCTION_TRACE(acpi_terminate);
/* Terminate the AML Debugger if present */
@@ -348,6 +349,8 @@ acpi_status acpi_terminate(void)
return_ACPI_STATUS(status);
}
+ACPI_EXPORT_SYMBOL(acpi_terminate)
+
#ifdef ACPI_FUTURE_USAGE
/*******************************************************************************
*
@@ -362,7 +365,6 @@ acpi_status acpi_terminate(void)
* initialized successfully.
*
******************************************************************************/
-
acpi_status acpi_subsystem_status(void)
{
@@ -373,6 +375,8 @@ acpi_status acpi_subsystem_status(void)
}
}
+ACPI_EXPORT_SYMBOL(acpi_subsystem_status)
+
/*******************************************************************************
*
* FUNCTION: acpi_get_system_info
@@ -390,14 +394,13 @@ acpi_status acpi_subsystem_status(void)
* and the value of out_buffer is undefined.
*
******************************************************************************/
-
acpi_status acpi_get_system_info(struct acpi_buffer * out_buffer)
{
struct acpi_system_info *info_ptr;
acpi_status status;
u32 i;
- ACPI_FUNCTION_TRACE("acpi_get_system_info");
+ ACPI_FUNCTION_TRACE(acpi_get_system_info);
/* Parameter validation */
@@ -448,15 +451,15 @@ acpi_status acpi_get_system_info(struct acpi_buffer * out_buffer)
/* Current status of the ACPI tables, per table type */
- info_ptr->num_table_types = NUM_ACPI_TABLE_TYPES;
- for (i = 0; i < NUM_ACPI_TABLE_TYPES; i++) {
+ info_ptr->num_table_types = ACPI_TABLE_ID_MAX + 1;
+ for (i = 0; i < (ACPI_TABLE_ID_MAX + 1); i++) {
info_ptr->table_info[i].count = acpi_gbl_table_lists[i].count;
}
return_ACPI_STATUS(AE_OK);
}
-EXPORT_SYMBOL(acpi_get_system_info);
+ACPI_EXPORT_SYMBOL(acpi_get_system_info)
/*****************************************************************************
*
@@ -472,7 +475,6 @@ EXPORT_SYMBOL(acpi_get_system_info);
* TBD: When a second function is added, must save the Function also.
*
****************************************************************************/
-
acpi_status
acpi_install_initialization_handler(acpi_init_handler handler, u32 function)
{
@@ -489,6 +491,7 @@ acpi_install_initialization_handler(acpi_init_handler handler, u32 function)
return AE_OK;
}
+ACPI_EXPORT_SYMBOL(acpi_install_initialization_handler)
#endif /* ACPI_FUTURE_USAGE */
/*****************************************************************************
@@ -502,10 +505,9 @@ acpi_install_initialization_handler(acpi_init_handler handler, u32 function)
* DESCRIPTION: Empty all caches (delete the cached objects)
*
****************************************************************************/
-
acpi_status acpi_purge_cached_objects(void)
{
- ACPI_FUNCTION_TRACE("acpi_purge_cached_objects");
+ ACPI_FUNCTION_TRACE(acpi_purge_cached_objects);
(void)acpi_os_purge_cache(acpi_gbl_state_cache);
(void)acpi_os_purge_cache(acpi_gbl_operand_cache);
@@ -513,3 +515,5 @@ acpi_status acpi_purge_cached_objects(void)
(void)acpi_os_purge_cache(acpi_gbl_ps_node_ext_cache);
return_ACPI_STATUS(AE_OK);
}
+
+ACPI_EXPORT_SYMBOL(acpi_purge_cached_objects)
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 6458c47f7ac..6b516852ac1 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -273,11 +273,13 @@ acpi_evaluate_integer(acpi_handle handle,
status = acpi_evaluate_object(handle, pathname, arguments, &buffer);
if (ACPI_FAILURE(status)) {
acpi_util_eval_error(handle, pathname, status);
+ kfree(element);
return_ACPI_STATUS(status);
}
if (element->type != ACPI_TYPE_INTEGER) {
acpi_util_eval_error(handle, pathname, AE_BAD_DATA);
+ kfree(element);
return_ACPI_STATUS(AE_BAD_DATA);
}
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index bd488751837..86531ab4ee5 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -323,7 +323,7 @@ acpi_video_device_lcd_query_levels(struct acpi_video_device *device,
if (!ACPI_SUCCESS(status))
return_VALUE(status);
obj = (union acpi_object *)buffer.pointer;
- if (!obj && (obj->type != ACPI_TYPE_PACKAGE)) {
+ if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _BCL data\n"));
status = -EFAULT;
goto err;
@@ -1294,7 +1294,7 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
struct acpi_video_bus *video)
{
unsigned long device_id;
- int status, result;
+ int status;
struct acpi_video_device *data;
ACPI_FUNCTION_TRACE("acpi_video_bus_get_one_device");
@@ -1346,8 +1346,11 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Error installing notify handler\n"));
- result = -ENODEV;
- goto end;
+ if(data->brightness)
+ kfree(data->brightness->levels);
+ kfree(data->brightness);
+ kfree(data);
+ return -ENODEV;
}
down(&video->sem);
@@ -1359,7 +1362,6 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
return_VALUE(0);
}
- end:
return_VALUE(-ENOENT);
}
@@ -1643,8 +1645,9 @@ static int acpi_video_bus_put_devices(struct acpi_video_bus *video)
printk(KERN_WARNING PREFIX
"hhuuhhuu bug in acpi video driver.\n");
+ if (data->brightness);
+ kfree(data->brightness->levels);
kfree(data->brightness);
-
kfree(data);
}
@@ -1785,6 +1788,10 @@ static int acpi_video_bus_add(struct acpi_device *device)
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Error installing notify handler\n"));
+ acpi_video_bus_stop_devices(video);
+ acpi_video_bus_put_devices(video);
+ kfree(video->attached_array);
+ acpi_video_bus_remove_fs(device);
result = -ENODEV;
goto end;
}
@@ -1796,10 +1803,8 @@ static int acpi_video_bus_add(struct acpi_device *device)
video->flags.post ? "yes" : "no");
end:
- if (result) {
- acpi_video_bus_remove_fs(device);
+ if (result)
kfree(video);
- }
return_VALUE(result);
}
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index ae0949b3394..93d94749310 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -402,6 +402,7 @@ config BLK_DEV_RAM_SIZE
config BLK_DEV_INITRD
bool "Initial RAM filesystem and RAM disk (initramfs/initrd) support"
+ depends on BROKEN || !FRV
help
The initial RAM filesystem is a ramfs which is loaded by the
boot loader (loadlin or lilo) and that is mounted as root
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 9c3b94e8f03..3c74ea729fc 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -818,7 +818,7 @@ static int loop_set_fd(struct loop_device *lo, struct file *lo_file,
lo->lo_device = bdev;
lo->lo_flags = lo_flags;
lo->lo_backing_file = file;
- lo->transfer = NULL;
+ lo->transfer = transfer_none;
lo->ioctl = NULL;
lo->lo_sizelimit = 0;
lo->old_gfp_mask = mapping_gfp_mask(mapping);
diff --git a/drivers/char/agp/hp-agp.c b/drivers/char/agp/hp-agp.c
index 8c4c6ef748e..907fb66ec4a 100644
--- a/drivers/char/agp/hp-agp.c
+++ b/drivers/char/agp/hp-agp.c
@@ -497,7 +497,7 @@ zx1_gart_probe (acpi_handle obj, u32 depth, void *context, void **ret)
info = buffer.pointer;
info->hardware_id.value[sizeof(info->hardware_id)-1] = '\0';
match = (strcmp(info->hardware_id.value, "HWP0001") == 0);
- ACPI_MEM_FREE(info);
+ kfree(info);
if (match) {
status = hp_acpi_csr_space(handle, &sba_hpa, &length);
if (ACPI_SUCCESS(status))
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index ef140ebde11..07473cd8412 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -925,11 +925,8 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
status = acpi_resource_to_address64(res, &addr);
if (ACPI_SUCCESS(status)) {
- unsigned long size;
-
- size = addr.maximum - addr.minimum + 1;
hdp->hd_phys_address = addr.minimum;
- hdp->hd_address = ioremap(addr.minimum, size);
+ hdp->hd_address = ioremap(addr.minimum, addr.address_length);
if (hpet_is_known(hdp)) {
printk(KERN_DEBUG "%s: 0x%lx is busy\n",
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 2d11ddd99e5..8f886717097 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -212,24 +212,16 @@ static int set_param_str(const char *val, struct kernel_param *kp)
{
action_fn fn = (action_fn) kp->arg;
int rv = 0;
- const char *end;
- char valcp[16];
- int len;
-
- /* Truncate leading and trailing spaces. */
- while (isspace(*val))
- val++;
- end = val + strlen(val) - 1;
- while ((end >= val) && isspace(*end))
- end--;
- len = end - val + 1;
- if (len > sizeof(valcp) - 1)
- return -EINVAL;
- memcpy(valcp, val, len);
- valcp[len] = '\0';
+ char *dup, *s;
+
+ dup = kstrdup(val, GFP_KERNEL);
+ if (!dup)
+ return -ENOMEM;
+
+ s = strstrip(dup);
down_read(&register_sem);
- rv = fn(valcp, NULL);
+ rv = fn(s, NULL);
if (rv)
goto out_unlock;
@@ -239,6 +231,7 @@ static int set_param_str(const char *val, struct kernel_param *kp)
out_unlock:
up_read(&register_sem);
+ kfree(dup);
return rv;
}
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 07213454c45..0c141c295fb 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -844,7 +844,7 @@ static int bh_action(MGSLPC_INFO *info)
return rc;
}
-void bh_handler(void* Context)
+static void bh_handler(void* Context)
{
MGSLPC_INFO *info = (MGSLPC_INFO*)Context;
int action;
@@ -888,7 +888,7 @@ void bh_handler(void* Context)
__FILE__,__LINE__,info->device_name);
}
-void bh_transmit(MGSLPC_INFO *info)
+static void bh_transmit(MGSLPC_INFO *info)
{
struct tty_struct *tty = info->tty;
if (debug_level >= DEBUG_LEVEL_BH)
@@ -900,7 +900,7 @@ void bh_transmit(MGSLPC_INFO *info)
}
}
-void bh_status(MGSLPC_INFO *info)
+static void bh_status(MGSLPC_INFO *info)
{
info->ri_chkcount = 0;
info->dsr_chkcount = 0;
@@ -2305,7 +2305,7 @@ static int mgslpc_ioctl(struct tty_struct *tty, struct file * file,
return ioctl_common(info, cmd, arg);
}
-int ioctl_common(MGSLPC_INFO *info, unsigned int cmd, unsigned long arg)
+static int ioctl_common(MGSLPC_INFO *info, unsigned int cmd, unsigned long arg)
{
int error;
struct mgsl_icount cnow; /* kernel counter temps */
@@ -2877,7 +2877,7 @@ done:
return ((count < begin+len-off) ? count : begin+len-off);
}
-int rx_alloc_buffers(MGSLPC_INFO *info)
+static int rx_alloc_buffers(MGSLPC_INFO *info)
{
/* each buffer has header and data */
info->rx_buf_size = sizeof(RXBUF) + info->max_frame_size;
@@ -2900,13 +2900,13 @@ int rx_alloc_buffers(MGSLPC_INFO *info)
return 0;
}
-void rx_free_buffers(MGSLPC_INFO *info)
+static void rx_free_buffers(MGSLPC_INFO *info)
{
kfree(info->rx_buf);
info->rx_buf = NULL;
}
-int claim_resources(MGSLPC_INFO *info)
+static int claim_resources(MGSLPC_INFO *info)
{
if (rx_alloc_buffers(info) < 0 ) {
printk( "Cant allocate rx buffer %s\n", info->device_name);
@@ -2916,7 +2916,7 @@ int claim_resources(MGSLPC_INFO *info)
return 0;
}
-void release_resources(MGSLPC_INFO *info)
+static void release_resources(MGSLPC_INFO *info)
{
if (debug_level >= DEBUG_LEVEL_INFO)
printk("release_resources(%s)\n", info->device_name);
@@ -2928,7 +2928,7 @@ void release_resources(MGSLPC_INFO *info)
*
* Arguments: info pointer to device instance data
*/
-void mgslpc_add_device(MGSLPC_INFO *info)
+static void mgslpc_add_device(MGSLPC_INFO *info)
{
info->next_device = NULL;
info->line = mgslpc_device_count;
@@ -2964,7 +2964,7 @@ void mgslpc_add_device(MGSLPC_INFO *info)
#endif
}
-void mgslpc_remove_device(MGSLPC_INFO *remove_info)
+static void mgslpc_remove_device(MGSLPC_INFO *remove_info)
{
MGSLPC_INFO *info = mgslpc_device_list;
MGSLPC_INFO *last = NULL;
@@ -3257,7 +3257,7 @@ static void loopback_enable(MGSLPC_INFO *info)
write_reg(info, CHA + MODE, val);
}
-void hdlc_mode(MGSLPC_INFO *info)
+static void hdlc_mode(MGSLPC_INFO *info)
{
unsigned char val;
unsigned char clkmode, clksubmode;
@@ -3497,7 +3497,7 @@ void hdlc_mode(MGSLPC_INFO *info)
rx_stop(info);
}
-void rx_stop(MGSLPC_INFO *info)
+static void rx_stop(MGSLPC_INFO *info)
{
if (debug_level >= DEBUG_LEVEL_ISR)
printk("%s(%d):rx_stop(%s)\n",
@@ -3510,7 +3510,7 @@ void rx_stop(MGSLPC_INFO *info)
info->rx_overflow = 0;
}
-void rx_start(MGSLPC_INFO *info)
+static void rx_start(MGSLPC_INFO *info)
{
if (debug_level >= DEBUG_LEVEL_ISR)
printk("%s(%d):rx_start(%s)\n",
@@ -3526,7 +3526,7 @@ void rx_start(MGSLPC_INFO *info)
info->rx_enabled = 1;
}
-void tx_start(MGSLPC_INFO *info)
+static void tx_start(MGSLPC_INFO *info)
{
if (debug_level >= DEBUG_LEVEL_ISR)
printk("%s(%d):tx_start(%s)\n",
@@ -3564,7 +3564,7 @@ void tx_start(MGSLPC_INFO *info)
info->tx_enabled = 1;
}
-void tx_stop(MGSLPC_INFO *info)
+static void tx_stop(MGSLPC_INFO *info)
{
if (debug_level >= DEBUG_LEVEL_ISR)
printk("%s(%d):tx_stop(%s)\n",
@@ -3578,7 +3578,7 @@ void tx_stop(MGSLPC_INFO *info)
/* Reset the adapter to a known state and prepare it for further use.
*/
-void reset_device(MGSLPC_INFO *info)
+static void reset_device(MGSLPC_INFO *info)
{
/* power up both channels (set BIT7) */
write_reg(info, CHA + CCR0, 0x80);
@@ -3628,7 +3628,7 @@ void reset_device(MGSLPC_INFO *info)
write_reg(info, IPC, 0x05);
}
-void async_mode(MGSLPC_INFO *info)
+static void async_mode(MGSLPC_INFO *info)
{
unsigned char val;
@@ -3799,7 +3799,7 @@ void async_mode(MGSLPC_INFO *info)
/* Set the HDLC idle mode for the transmitter.
*/
-void tx_set_idle(MGSLPC_INFO *info)
+static void tx_set_idle(MGSLPC_INFO *info)
{
/* Note: ESCC2 only supports flags and one idle modes */
if (info->idle_mode == HDLC_TXIDLE_FLAGS)
@@ -3810,7 +3810,7 @@ void tx_set_idle(MGSLPC_INFO *info)
/* get state of the V24 status (input) signals.
*/
-void get_signals(MGSLPC_INFO *info)
+static void get_signals(MGSLPC_INFO *info)
{
unsigned char status = 0;
@@ -3832,7 +3832,7 @@ void get_signals(MGSLPC_INFO *info)
/* Set the state of DTR and RTS based on contents of
* serial_signals member of device extension.
*/
-void set_signals(MGSLPC_INFO *info)
+static void set_signals(MGSLPC_INFO *info)
{
unsigned char val;
@@ -3856,7 +3856,7 @@ void set_signals(MGSLPC_INFO *info)
set_reg_bits(info, CHA + PVR, PVR_DTR);
}
-void rx_reset_buffers(MGSLPC_INFO *info)
+static void rx_reset_buffers(MGSLPC_INFO *info)
{
RXBUF *buf;
int i;
@@ -3875,7 +3875,7 @@ void rx_reset_buffers(MGSLPC_INFO *info)
*
* Returns 1 if frame returned, otherwise 0
*/
-int rx_get_frame(MGSLPC_INFO *info)
+static int rx_get_frame(MGSLPC_INFO *info)
{
unsigned short status;
RXBUF *buf;
@@ -3961,7 +3961,7 @@ int rx_get_frame(MGSLPC_INFO *info)
return 1;
}
-BOOLEAN register_test(MGSLPC_INFO *info)
+static BOOLEAN register_test(MGSLPC_INFO *info)
{
static unsigned char patterns[] =
{ 0x00, 0xff, 0xaa, 0x55, 0x69, 0x96, 0x0f };
@@ -3987,7 +3987,7 @@ BOOLEAN register_test(MGSLPC_INFO *info)
return rc;
}
-BOOLEAN irq_test(MGSLPC_INFO *info)
+static BOOLEAN irq_test(MGSLPC_INFO *info)
{
unsigned long end_time;
unsigned long flags;
@@ -4022,7 +4022,7 @@ BOOLEAN irq_test(MGSLPC_INFO *info)
return info->irq_occurred ? TRUE : FALSE;
}
-int adapter_test(MGSLPC_INFO *info)
+static int adapter_test(MGSLPC_INFO *info)
{
if (!register_test(info)) {
info->init_error = DiagStatus_AddressFailure;
@@ -4044,7 +4044,7 @@ int adapter_test(MGSLPC_INFO *info)
return 0;
}
-void trace_block(MGSLPC_INFO *info,const char* data, int count, int xmit)
+static void trace_block(MGSLPC_INFO *info,const char* data, int count, int xmit)
{
int i;
int linecount;
@@ -4079,7 +4079,7 @@ void trace_block(MGSLPC_INFO *info,const char* data, int count, int xmit)
/* HDLC frame time out
* update stats and do tx completion processing
*/
-void tx_timeout(unsigned long context)
+static void tx_timeout(unsigned long context)
{
MGSLPC_INFO *info = (MGSLPC_INFO*)context;
unsigned long flags;
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index a90f5d97df3..43dfd8689dc 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -512,7 +512,7 @@ static struct sonypi_device {
#ifdef CONFIG_ACPI
static struct acpi_device *sonypi_acpi_device;
-static int acpi_enabled;
+static int acpi_driver_registered;
#endif
static int sonypi_ec_write(u8 addr, u8 value)
@@ -869,7 +869,7 @@ found:
sonypi_report_input_event(event);
#ifdef CONFIG_ACPI
- if (acpi_enabled)
+ if (sonypi_acpi_device)
acpi_bus_generate_event(sonypi_acpi_device, 1, event);
#endif
@@ -1551,8 +1551,8 @@ static int __init sonypi_init(void)
goto err_free_device;
#ifdef CONFIG_ACPI
- if (acpi_bus_register_driver(&sonypi_acpi_driver) > 0)
- acpi_enabled = 1;
+ if (acpi_bus_register_driver(&sonypi_acpi_driver) >= 0)
+ acpi_driver_registered = 1;
#endif
return 0;
@@ -1567,7 +1567,7 @@ static int __init sonypi_init(void)
static void __exit sonypi_exit(void)
{
#ifdef CONFIG_ACPI
- if (acpi_enabled)
+ if (acpi_driver_registered)
acpi_bus_unregister_driver(&sonypi_acpi_driver);
#endif
platform_device_unregister(sonypi_platform_device);
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 4b4d7db1ff7..498aa37bca2 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -26,6 +26,7 @@
#include <linux/kernel.h>
#include <linux/ktime.h>
#include <linux/init.h>
+#include <linux/connector.h>
#include <asm/atomic.h>
#include <linux/cn_proc.h>
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index 79d581c8652..b49bacfd8de 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -121,6 +121,7 @@ nlmsg_failure:
kfree_skb(skb);
return -EINVAL;
}
+EXPORT_SYMBOL_GPL(cn_netlink_send);
/*
* Callback helper - queues work and setup destructor for given data.
@@ -319,6 +320,7 @@ int cn_add_callback(struct cb_id *id, char *name, void (*callback)(void *))
return 0;
}
+EXPORT_SYMBOL_GPL(cn_add_callback);
/*
* Callback remove routing - removes callback
@@ -335,6 +337,7 @@ void cn_del_callback(struct cb_id *id)
cn_queue_del_callback(dev->cbdev, id);
cn_notify(id, 1);
}
+EXPORT_SYMBOL_GPL(cn_del_callback);
/*
* Checks two connector's control messages to be the same.
@@ -488,7 +491,3 @@ static void __devexit cn_fini(void)
subsys_initcall(cn_init);
module_exit(cn_fini);
-
-EXPORT_SYMBOL_GPL(cn_add_callback);
-EXPORT_SYMBOL_GPL(cn_del_callback);
-EXPORT_SYMBOL_GPL(cn_netlink_send);
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index b4a41d6d071..6de3cd3d6e8 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1451,9 +1451,12 @@ static ide_startstop_t cdrom_pc_intr (ide_drive_t *drive)
} else {
confused:
printk (KERN_ERR "%s: cdrom_pc_intr: The drive "
- "appears confused (ireason = 0x%02x)\n",
+ "appears confused (ireason = 0x%02x). "
+ "Trying to recover by ending request.\n",
drive->name, ireason);
rq->flags |= REQ_FAILED;
+ cdrom_end_request(drive, 0);
+ return ide_stopped;
}
/* Now we wait for another interrupt. */
@@ -1722,8 +1725,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
}
}
- if (HWGROUP(drive)->handler != NULL)
- BUG();
+ BUG_ON(HWGROUP(drive)->handler != NULL);
ide_set_handler(drive, cdrom_newpc_intr, rq->timeout, NULL);
return ide_started;
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index c481be8b807..783a2475ee8 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -206,8 +206,7 @@ int ide_build_sglist(ide_drive_t *drive, struct request *rq)
ide_hwif_t *hwif = HWIF(drive);
struct scatterlist *sg = hwif->sg_table;
- if ((rq->flags & REQ_DRIVE_TASKFILE) && rq->nr_sectors > 256)
- BUG();
+ BUG_ON((rq->flags & REQ_DRIVE_TASKFILE) && rq->nr_sectors > 256);
ide_map_sg(drive, rq);
@@ -947,8 +946,7 @@ void ide_setup_dma (ide_hwif_t *hwif, unsigned long dma_base, unsigned int num_p
}
printk("\n");
- if (!(hwif->dma_master))
- BUG();
+ BUG_ON(!hwif->dma_master);
}
EXPORT_SYMBOL_GPL(ide_setup_dma);
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index a53e3ce4a14..a1179e92496 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -898,8 +898,7 @@ static ide_startstop_t idefloppy_pc_intr (ide_drive_t *drive)
"to send us more data than expected "
"- discarding data\n");
idefloppy_discard_data(drive,bcount.all);
- if (HWGROUP(drive)->handler != NULL)
- BUG();
+ BUG_ON(HWGROUP(drive)->handler != NULL);
ide_set_handler(drive,
&idefloppy_pc_intr,
IDEFLOPPY_WAIT_CMD,
@@ -932,8 +931,7 @@ static ide_startstop_t idefloppy_pc_intr (ide_drive_t *drive)
pc->actually_transferred += bcount.all;
pc->current_position += bcount.all;
- if (HWGROUP(drive)->handler != NULL)
- BUG();
+ BUG_ON(HWGROUP(drive)->handler != NULL);
ide_set_handler(drive, &idefloppy_pc_intr, IDEFLOPPY_WAIT_CMD, NULL); /* And set the interrupt handler again */
return ide_started;
}
@@ -960,8 +958,7 @@ static ide_startstop_t idefloppy_transfer_pc (ide_drive_t *drive)
"issuing a packet command\n");
return ide_do_reset(drive);
}
- if (HWGROUP(drive)->handler != NULL)
- BUG();
+ BUG_ON(HWGROUP(drive)->handler != NULL);
/* Set the interrupt routine */
ide_set_handler(drive, &idefloppy_pc_intr, IDEFLOPPY_WAIT_CMD, NULL);
/* Send the actual packet */
@@ -1017,8 +1014,7 @@ static ide_startstop_t idefloppy_transfer_pc1 (ide_drive_t *drive)
* 40 and 50msec work well. idefloppy_pc_intr will not be actually
* used until after the packet is moved in about 50 msec.
*/
- if (HWGROUP(drive)->handler != NULL)
- BUG();
+ BUG_ON(HWGROUP(drive)->handler != NULL);
ide_set_handler(drive,
&idefloppy_pc_intr, /* service routine for packet command */
floppy->ticks, /* wait this long before "failing" */
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index b72dde70840..97a49e77a8f 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -939,8 +939,7 @@ void ide_execute_command(ide_drive_t *drive, task_ioreg_t cmd, ide_handler_t *ha
spin_lock_irqsave(&ide_lock, flags);
- if(hwgroup->handler)
- BUG();
+ BUG_ON(hwgroup->handler);
hwgroup->handler = handler;
hwgroup->expiry = expiry;
hwgroup->timer.expires = jiffies + timeout;
@@ -981,8 +980,7 @@ static ide_startstop_t atapi_reset_pollfunc (ide_drive_t *drive)
printk("%s: ATAPI reset complete\n", drive->name);
} else {
if (time_before(jiffies, hwgroup->poll_timeout)) {
- if (HWGROUP(drive)->handler != NULL)
- BUG();
+ BUG_ON(HWGROUP(drive)->handler != NULL);
ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, NULL);
/* continue polling */
return ide_started;
@@ -1021,8 +1019,7 @@ static ide_startstop_t reset_pollfunc (ide_drive_t *drive)
if (!OK_STAT(tmp = hwif->INB(IDE_STATUS_REG), 0, BUSY_STAT)) {
if (time_before(jiffies, hwgroup->poll_timeout)) {
- if (HWGROUP(drive)->handler != NULL)
- BUG();
+ BUG_ON(HWGROUP(drive)->handler != NULL);
ide_set_handler(drive, &reset_pollfunc, HZ/20, NULL);
/* continue polling */
return ide_started;
@@ -1138,8 +1135,7 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
hwgroup = HWGROUP(drive);
/* We must not reset with running handlers */
- if(hwgroup->handler != NULL)
- BUG();
+ BUG_ON(hwgroup->handler != NULL);
/* For an ATAPI device, first try an ATAPI SRST. */
if (drive->media != ide_disk && !do_not_try_atapi) {
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
index 41d46dbe6c2..16a143133f9 100644
--- a/drivers/ide/ide-lib.c
+++ b/drivers/ide/ide-lib.c
@@ -164,8 +164,7 @@ u8 ide_rate_filter (u8 mode, u8 speed)
// printk("%s: mode 0x%02x, speed 0x%02x\n", __FUNCTION__, mode, speed);
/* So that we remember to update this if new modes appear */
- if (mode > 4)
- BUG();
+ BUG_ON(mode > 4);
return min(speed, speed_max[mode]);
#else /* !CONFIG_BLK_DEV_IDEDMA */
return min(speed, (u8)XFER_PIO_4);
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 9233b8109a0..a839b2a8f6f 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -196,8 +196,7 @@ ide_startstop_t set_geometry_intr (ide_drive_t *drive)
if (stat & (ERR_STAT|DRQ_STAT))
return ide_error(drive, "set_geometry_intr", stat);
- if (HWGROUP(drive)->handler != NULL)
- BUG();
+ BUG_ON(HWGROUP(drive)->handler != NULL);
ide_set_handler(drive, &set_geometry_intr, WAIT_WORSTCASE, NULL);
return ide_started;
}
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index 3fdab563fec..9799aed772e 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -726,6 +726,7 @@ void ide_setup_ports ( hw_regs_t *hw,
{
int i;
+ memset(hw, 0, sizeof(hw_regs_t));
for (i = 0; i < IDE_NR_PORTS; i++) {
if (offsets[i] == -1) {
switch(i) {
@@ -1366,8 +1367,7 @@ int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device
ide_abort(drive, "drive reset");
- if(HWGROUP(drive)->handler)
- BUG();
+ BUG_ON(HWGROUP(drive)->handler);
/* Ensure nothing gets queued after we
drop the lock. Reset will clear the busy */
diff --git a/drivers/ide/legacy/q40ide.c b/drivers/ide/legacy/q40ide.c
index 2a78b792f7f..434a94faa3b 100644
--- a/drivers/ide/legacy/q40ide.c
+++ b/drivers/ide/legacy/q40ide.c
@@ -80,6 +80,7 @@ void q40_ide_setup_ports ( hw_regs_t *hw,
{
int i;
+ memset(hw, 0, sizeof(hw_regs_t));
for (i = 0; i < IDE_NR_PORTS; i++) {
/* BIG FAT WARNING:
assumption: only DATA port is ever used in 16 bit mode */
diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c
index 27c9eb989a9..e125032bb40 100644
--- a/drivers/ide/pci/sgiioc4.c
+++ b/drivers/ide/pci/sgiioc4.c
@@ -723,6 +723,12 @@ static ide_pci_device_t sgiioc4_chipsets[] __devinitdata = {
int
ioc4_ide_attach_one(struct ioc4_driver_data *idd)
{
+ /* PCI-RT does not bring out IDE connection.
+ * Do not attach to this particular IOC4.
+ */
+ if (idd->idd_variant == IOC4_VARIANT_PCI_RT)
+ return 0;
+
return pci_init_sgiioc4(idd->idd_pdev,
&sgiioc4_chipsets[idd->idd_pci_id->driver_data]);
}
diff --git a/drivers/ide/pci/trm290.c b/drivers/ide/pci/trm290.c
index c26c8ca90dd..fe80295974e 100644
--- a/drivers/ide/pci/trm290.c
+++ b/drivers/ide/pci/trm290.c
@@ -183,8 +183,7 @@ static void trm290_ide_dma_exec_cmd(ide_drive_t *drive, u8 command)
{
ide_hwif_t *hwif = HWIF(drive);
- if (HWGROUP(drive)->handler != NULL) /* paranoia check */
- BUG();
+ BUG_ON(HWGROUP(drive)->handler != NULL); /* paranoia check */
ide_set_handler(drive, &ide_dma_intr, WAIT_CMD, NULL);
/* issue cmd to drive */
hwif->OUTB(command, IDE_COMMAND_REG);
diff --git a/drivers/ieee1394/Kconfig b/drivers/ieee1394/Kconfig
index 186737539cf..79b81be6797 100644
--- a/drivers/ieee1394/Kconfig
+++ b/drivers/ieee1394/Kconfig
@@ -4,7 +4,7 @@ menu "IEEE 1394 (FireWire) support"
config IEEE1394
tristate "IEEE 1394 (FireWire) support"
- depends on PCI || BROKEN
+ depends on (PCI || BROKEN) && (BROKEN || !FRV)
select NET
help
IEEE 1394 describes a high performance serial bus, which is also
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 5ec2d49e9bb..e57d3c50f75 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -821,11 +821,12 @@ static void ib_uverbs_remove_one(struct ib_device *device)
kref_put(&uverbs_dev->ref, ib_uverbs_release_dev);
}
-static struct super_block *uverbs_event_get_sb(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
+static int uverbs_event_get_sb(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data,
+ struct vfsmount *mnt)
{
return get_sb_pseudo(fs_type, "infinibandevent:", NULL,
- INFINIBANDEVENTFS_MAGIC);
+ INFINIBANDEVENTFS_MAGIC, mnt);
}
static struct file_system_type uverbs_event_fs = {
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
index e274120567e..63de3046aff 100644
--- a/drivers/infiniband/hw/ipath/ipath_fs.c
+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
@@ -542,13 +542,14 @@ bail:
return ret;
}
-static struct super_block *ipathfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data)
+static int ipathfs_get_sb(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data, struct vfsmount *mnt)
{
- ipath_super = get_sb_single(fs_type, flags, data,
- ipathfs_fill_super);
- return ipath_super;
+ int ret = get_sb_single(fs_type, flags, data,
+ ipathfs_fill_super, mnt);
+ if (ret >= 0)
+ ipath_super = mnt->mnt_sb;
+ return ret;
}
static void ipathfs_kill_super(struct super_block *s)
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index ba325f16d07..5f561fce32d 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -82,7 +82,7 @@ static int evdev_fasync(int fd, struct file *file, int on)
return retval < 0 ? retval : 0;
}
-static int evdev_flush(struct file * file)
+static int evdev_flush(struct file * file, fl_owner_t id)
{
struct evdev_list *list = file->private_data;
if (!list->evdev->exist) return -ENODEV;
diff --git a/drivers/input/keyboard/amikbd.c b/drivers/input/keyboard/amikbd.c
index 4c8fb1f8631..f1f9db9d282 100644
--- a/drivers/input/keyboard/amikbd.c
+++ b/drivers/input/keyboard/amikbd.c
@@ -36,6 +36,7 @@
#include <linux/input.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/keyboard.h>
#include <asm/amigaints.h>
#include <asm/amigahw.h>
@@ -45,7 +46,7 @@ MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION("Amiga keyboard driver");
MODULE_LICENSE("GPL");
-static unsigned char amikbd_keycode[0x78] = {
+static unsigned char amikbd_keycode[0x78] __initdata = {
[0] = KEY_GRAVE,
[1] = KEY_1,
[2] = KEY_2,
@@ -170,12 +171,9 @@ static irqreturn_t amikbd_interrupt(int irq, void *dummy, struct pt_regs *fp)
scancode >>= 1;
if (scancode < 0x78) { /* scancodes < 0x78 are keys */
-
- scancode = amikbd_keycode[scancode];
-
input_regs(amikbd_dev, fp);
- if (scancode == KEY_CAPSLOCK) { /* CapsLock is a toggle switch key on Amiga */
+ if (scancode == 98) { /* CapsLock is a toggle switch key on Amiga */
input_report_key(amikbd_dev, scancode, 1);
input_report_key(amikbd_dev, scancode, 0);
} else {
@@ -191,7 +189,7 @@ static irqreturn_t amikbd_interrupt(int irq, void *dummy, struct pt_regs *fp)
static int __init amikbd_init(void)
{
- int i;
+ int i, j;
if (!AMIGAHW_PRESENT(AMI_KEYBOARD))
return -EIO;
@@ -214,14 +212,26 @@ static int __init amikbd_init(void)
amikbd_dev->id.version = 0x0100;
amikbd_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_REP);
- amikbd_dev->keycode = amikbd_keycode;
- amikbd_dev->keycodesize = sizeof(unsigned char);
- amikbd_dev->keycodemax = ARRAY_SIZE(amikbd_keycode);
for (i = 0; i < 0x78; i++)
- if (amikbd_keycode[i])
- set_bit(amikbd_keycode[i], amikbd_dev->keybit);
-
+ set_bit(i, amikbd_dev->keybit);
+
+ for (i = 0; i < MAX_NR_KEYMAPS; i++) {
+ static u_short temp_map[NR_KEYS] __initdata;
+ if (!key_maps[i])
+ continue;
+ memset(temp_map, 0, sizeof(temp_map));
+ for (j = 0; j < 0x78; j++) {
+ if (!amikbd_keycode[j])
+ continue;
+ temp_map[j] = key_maps[i][amikbd_keycode[j]];
+ }
+ for (j = 0; j < NR_KEYS; j++) {
+ if (!temp_map[j])
+ temp_map[j] = 0xf200;
+ }
+ memcpy(key_maps[i], temp_map, sizeof(temp_map));
+ }
ciaa.cra &= ~0x41; /* serial data in, turn off TA */
request_irq(IRQ_AMIGA_CIAA_SP, amikbd_interrupt, 0, "amikbd", amikbd_interrupt);
diff --git a/drivers/isdn/capi/capifs.c b/drivers/isdn/capi/capifs.c
index 0a37aded4b5..9ea6bd0ddc3 100644
--- a/drivers/isdn/capi/capifs.c
+++ b/drivers/isdn/capi/capifs.c
@@ -121,10 +121,10 @@ fail:
return -ENOMEM;
}
-static struct super_block *capifs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int capifs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_single(fs_type, flags, data, capifs_fill_super);
+ return get_sb_single(fs_type, flags, data, capifs_fill_super, mnt);
}
static struct file_system_type capifs_fs_type = {
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index 22759c01746..81accdf3516 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -1177,9 +1177,8 @@ isdn_write(struct file *file, const char __user *buf, size_t count, loff_t * off
goto out;
}
chidx = isdn_minor2chan(minor);
- while (isdn_writebuf_stub(drvidx, chidx, buf, count) != count)
+ while ((retval = isdn_writebuf_stub(drvidx, chidx, buf, count)) == 0)
interruptible_sleep_on(&dev->drv[drvidx]->snd_waitq[chidx]);
- retval = count;
goto out;
}
if (minor <= ISDN_MINOR_CTRLMAX) {
@@ -1951,9 +1950,10 @@ isdn_writebuf_stub(int drvidx, int chan, const u_char __user * buf, int len)
struct sk_buff *skb = alloc_skb(hl + len, GFP_ATOMIC);
if (!skb)
- return 0;
+ return -ENOMEM;
skb_reserve(skb, hl);
- copy_from_user(skb_put(skb, len), buf, len);
+ if (copy_from_user(skb_put(skb, len), buf, len))
+ return -EFAULT;
ret = dev->drv[drvidx]->interface->writebuf_skb(drvidx, chan, 1, skb);
if (ret <= 0)
dev_kfree_skb(skb);
diff --git a/drivers/isdn/sc/ioctl.c b/drivers/isdn/sc/ioctl.c
index f4f71226a07..57c4ab96d13 100644
--- a/drivers/isdn/sc/ioctl.c
+++ b/drivers/isdn/sc/ioctl.c
@@ -97,6 +97,7 @@ int sc_ioctl(int card, scs_ioctl *data)
case SCIOCSTART:
{
+ kfree(rcvmsg);
pr_debug("%s: SCIOSTART: ioctl received\n",
sc_adapter[card]->devicename);
if(sc_adapter[card]->EngineUp) {
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 626506234b7..f573d5af0b1 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -63,6 +63,12 @@ config LEDS_S3C24XX
This option enables support for LEDs connected to GPIO lines
on Samsung S3C24XX series CPUs, such as the S3C2410 and S3C2440.
+config LEDS_AMS_DELTA
+ tristate "LED Support for the Amstrad Delta (E3)"
+ depends LEDS_CLASS && MACH_AMS_DELTA
+ help
+ This option enables support for the LEDs on Amstrad Delta (E3).
+
comment "LED Triggers"
config LEDS_TRIGGERS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 40f042633bf..dcea1001faa 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_LEDS_SPITZ) += leds-spitz.o
obj-$(CONFIG_LEDS_IXP4XX) += leds-ixp4xx-gpio.o
obj-$(CONFIG_LEDS_TOSA) += leds-tosa.o
obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o
+obj-$(CONFIG_LEDS_AMS_DELTA) += leds-ams-delta.o
# LED Triggers
obj-$(CONFIG_LEDS_TRIGGER_TIMER) += ledtrig-timer.o
diff --git a/drivers/leds/leds-ams-delta.c b/drivers/leds/leds-ams-delta.c
new file mode 100644
index 00000000000..e9f06116c4d
--- /dev/null
+++ b/drivers/leds/leds-ams-delta.c
@@ -0,0 +1,162 @@
+/*
+ * LEDs driver for Amstrad Delta (E3)
+ *
+ * Copyright (C) 2006 Jonathan McDowell <noodles@earth.li>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <asm/arch/board-ams-delta.h>
+
+/*
+ * Our context
+ */
+struct ams_delta_led {
+ struct led_classdev cdev;
+ u8 bitmask;
+};
+
+static void ams_delta_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct ams_delta_led *led_dev =
+ container_of(led_cdev, struct ams_delta_led, cdev);
+
+ if (value)
+ ams_delta_latch1_write(led_dev->bitmask, led_dev->bitmask);
+ else
+ ams_delta_latch1_write(led_dev->bitmask, 0);
+}
+
+static struct ams_delta_led ams_delta_leds[] = {
+ {
+ .cdev = {
+ .name = "ams-delta:camera",
+ .brightness_set = ams_delta_led_set,
+ },
+ .bitmask = AMS_DELTA_LATCH1_LED_CAMERA,
+ },
+ {
+ .cdev = {
+ .name = "ams-delta:advert",
+ .brightness_set = ams_delta_led_set,
+ },
+ .bitmask = AMS_DELTA_LATCH1_LED_ADVERT,
+ },
+ {
+ .cdev = {
+ .name = "ams-delta:email",
+ .brightness_set = ams_delta_led_set,
+ },
+ .bitmask = AMS_DELTA_LATCH1_LED_EMAIL,
+ },
+ {
+ .cdev = {
+ .name = "ams-delta:handsfree",
+ .brightness_set = ams_delta_led_set,
+ },
+ .bitmask = AMS_DELTA_LATCH1_LED_HANDSFREE,
+ },
+ {
+ .cdev = {
+ .name = "ams-delta:voicemail",
+ .brightness_set = ams_delta_led_set,
+ },
+ .bitmask = AMS_DELTA_LATCH1_LED_VOICEMAIL,
+ },
+ {
+ .cdev = {
+ .name = "ams-delta:voice",
+ .brightness_set = ams_delta_led_set,
+ },
+ .bitmask = AMS_DELTA_LATCH1_LED_VOICE,
+ },
+};
+
+#ifdef CONFIG_PM
+static int ams_delta_led_suspend(struct platform_device *dev,
+ pm_message_t state)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ams_delta_leds); i++)
+ led_classdev_suspend(&ams_delta_leds[i].cdev);
+
+ return 0;
+}
+
+static int ams_delta_led_resume(struct platform_device *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ams_delta_leds); i++)
+ led_classdev_resume(&ams_delta_leds[i].cdev);
+
+ return 0;
+}
+#else
+#define ams_delta_led_suspend NULL
+#define ams_delta_led_resume NULL
+#endif
+
+static int ams_delta_led_probe(struct platform_device *pdev)
+{
+ int i;
+ int ret;
+
+ for (i = ret = 0; ret >= 0 && i < ARRAY_SIZE(ams_delta_leds); i++) {
+ ret = led_classdev_register(&pdev->dev,
+ &ams_delta_leds[i].cdev);
+ }
+
+ if (ret < 0 && i > 1) {
+ for (i = i - 2; i >= 0; i--)
+ led_classdev_unregister(&ams_delta_leds[i].cdev);
+ }
+
+ return ret;
+}
+
+static int ams_delta_led_remove(struct platform_device *pdev)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(ams_delta_leds) - 1; i >= 0; i--)
+ led_classdev_unregister(&ams_delta_leds[i].cdev);
+
+ return 0;
+}
+
+static struct platform_driver ams_delta_led_driver = {
+ .probe = ams_delta_led_probe,
+ .remove = ams_delta_led_remove,
+ .suspend = ams_delta_led_suspend,
+ .resume = ams_delta_led_resume,
+ .driver = {
+ .name = "ams-delta-led",
+ },
+};
+
+static int __init ams_delta_led_init(void)
+{
+ return platform_driver_register(&ams_delta_led_driver);
+}
+
+static void __exit ams_delta_led_exit(void)
+{
+ return platform_driver_unregister(&ams_delta_led_driver);
+}
+
+module_init(ams_delta_led_init);
+module_exit(ams_delta_led_exit);
+
+MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>");
+MODULE_DESCRIPTION("Amstrad Delta LED driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index 12ad462737b..ccf5df44cde 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -171,6 +171,7 @@ config THERM_PM72
config WINDFARM
tristate "New PowerMac thermal control infrastructure"
+ depends on PPC
config WINDFARM_PM81
tristate "Support for thermal management on iMac G5"
diff --git a/drivers/md/raid6algos.c b/drivers/md/raid6algos.c
index 51c63c0cf1c..92657615657 100644
--- a/drivers/md/raid6algos.c
+++ b/drivers/md/raid6algos.c
@@ -139,15 +139,14 @@ int __init raid6_select_algo(void)
}
}
- if ( best )
+ if (best) {
printk("raid6: using algorithm %s (%ld MB/s)\n",
best->name,
(bestperf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2));
- else
+ raid6_call = *best;
+ } else
printk("raid6: Yikes! No algorithm found!\n");
- raid6_call = *best;
-
free_pages((unsigned long)syndromes, 1);
return best ? 0 : -EINVAL;
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index 26a230b6ff8..4a35caff5d0 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -90,10 +90,11 @@ static void ibmasmfs_create_files (struct super_block *sb, struct dentry *root);
static int ibmasmfs_fill_super (struct super_block *sb, void *data, int silent);
-static struct super_block *ibmasmfs_get_super(struct file_system_type *fst,
- int flags, const char *name, void *data)
+static int ibmasmfs_get_super(struct file_system_type *fst,
+ int flags, const char *name, void *data,
+ struct vfsmount *mnt)
{
- return get_sb_single(fst, flags, data, ibmasmfs_fill_super);
+ return get_sb_single(fst, flags, data, ibmasmfs_fill_super, mnt);
}
static struct super_operations ibmasmfs_s_ops = {
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
index b62da9b0cbf..71c2da277d6 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
@@ -272,10 +272,10 @@ static int oprofilefs_fill_super(struct super_block * sb, void * data, int silen
}
-static struct super_block *oprofilefs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int oprofilefs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_single(fs_type, flags, data, oprofilefs_fill_super);
+ return get_sb_single(fs_type, flags, data, oprofilefs_fill_super, mnt);
}
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig
index 6c8452ede0e..4d8dc27ea9d 100644
--- a/drivers/parport/Kconfig
+++ b/drivers/parport/Kconfig
@@ -85,11 +85,6 @@ config PARPORT_PC_PCMCIA
config PARPORT_NOT_PC
bool
-config PARPORT_ARC
- tristate "Archimedes hardware"
- depends on ARM && PARPORT
- select PARPORT_NOT_PC
-
config PARPORT_IP32
tristate "SGI IP32 builtin port (EXPERIMENTAL)"
depends on SGI_IP32 && PARPORT && EXPERIMENTAL
diff --git a/drivers/parport/parport_arc.c b/drivers/parport/parport_arc.c
deleted file mode 100644
index b35bb4f48d6..00000000000
--- a/drivers/parport/parport_arc.c
+++ /dev/null
@@ -1,139 +0,0 @@
-/* Low-level parallel port routines for Archimedes onboard hardware
- *
- * Author: Phil Blundell <philb@gnu.org>
- */
-
-/* This driver is for the parallel port hardware found on Acorn's old
- * range of Archimedes machines. The A5000 and newer systems have PC-style
- * I/O hardware and should use the parport_pc driver instead.
- *
- * The Acorn printer port hardware is very simple. There is a single 8-bit
- * write-only latch for the data port and control/status bits are handled
- * with various auxilliary input and output lines. The port is not
- * bidirectional, does not support any modes other than SPP, and has only
- * a subset of the standard printer control lines connected.
- */
-
-#include <linux/threads.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/parport.h>
-
-#include <asm/ptrace.h>
-#include <asm/io.h>
-#include <asm/arch/oldlatches.h>
-#include <asm/arch/irqs.h>
-
-#define DATA_ADDRESS 0x3350010
-
-/* This is equivalent to the above and only used for request_region. */
-#define PORT_BASE 0x80000000 | ((DATA_ADDRESS - IO_BASE) >> 2)
-
-/* The hardware can't read from the data latch, so we must use a soft
- copy. */
-static unsigned char data_copy;
-
-/* These are pretty simple. We know the irq is never shared and the
- kernel does all the magic that's required. */
-static void arc_enable_irq(struct parport *p)
-{
- enable_irq(p->irq);
-}
-
-static void arc_disable_irq(struct parport *p)
-{
- disable_irq(p->irq);
-}
-
-static void arc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
-{
- parport_generic_irq(irq, (struct parport *) dev_id, regs);
-}
-
-static void arc_write_data(struct parport *p, unsigned char data)
-{
- data_copy = data;
- outb_t(data, DATA_LATCH);
-}
-
-static unsigned char arc_read_data(struct parport *p)
-{
- return data_copy;
-}
-
-static struct parport_operations parport_arc_ops =
-{
- .write_data = arc_write_data,
- .read_data = arc_read_data,
-
- .write_control = arc_write_control,
- .read_control = arc_read_control,
- .frob_control = arc_frob_control,
-
- .read_status = arc_read_status,
-
- .enable_irq = arc_enable_irq,
- .disable_irq = arc_disable_irq,
-
- .data_forward = arc_data_forward,
- .data_reverse = arc_data_reverse,
-
- .init_state = arc_init_state,
- .save_state = arc_save_state,
- .restore_state = arc_restore_state,
-
- .epp_write_data = parport_ieee1284_epp_write_data,
- .epp_read_data = parport_ieee1284_epp_read_data,
- .epp_write_addr = parport_ieee1284_epp_write_addr,
- .epp_read_addr = parport_ieee1284_epp_read_addr,
-
- .ecp_write_data = parport_ieee1284_ecp_write_data,
- .ecp_read_data = parport_ieee1284_ecp_read_data,
- .ecp_write_addr = parport_ieee1284_ecp_write_addr,
-
- .compat_write_data = parport_ieee1284_write_compat,
- .nibble_read_data = parport_ieee1284_read_nibble,
- .byte_read_data = parport_ieee1284_read_byte,
-
- .owner = THIS_MODULE,
-};
-
-/* --- Initialisation code -------------------------------- */
-
-static int parport_arc_init(void)
-{
- /* Archimedes hardware provides only one port, at a fixed address */
- struct parport *p;
- struct resource res;
- char *fake_name = "parport probe");
-
- res = request_region(PORT_BASE, 1, fake_name);
- if (res == NULL)
- return 0;
-
- p = parport_register_port (PORT_BASE, IRQ_PRINTERACK,
- PARPORT_DMA_NONE, &parport_arc_ops);
-
- if (!p) {
- release_region(PORT_BASE, 1);
- return 0;
- }
-
- p->modes = PARPORT_MODE_ARCSPP;
- p->size = 1;
- rename_region(res, p->name);
-
- printk(KERN_INFO "%s: Archimedes on-board port, using irq %d\n",
- p->irq);
-
- /* Tell the high-level drivers about the port. */
- parport_announce_port (p);
-
- return 1;
-}
-
-module_init(parport_arc_init)
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index 407b4eaddcb..3a4a644c268 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -36,13 +36,13 @@ static int irq_flags(int triggering, int polarity)
{
int flag;
if (triggering == ACPI_LEVEL_SENSITIVE) {
- if(polarity == ACPI_ACTIVE_LOW)
+ if (polarity == ACPI_ACTIVE_LOW)
flag = IORESOURCE_IRQ_LOWLEVEL;
else
flag = IORESOURCE_IRQ_HIGHLEVEL;
}
else {
- if(polarity == ACPI_ACTIVE_LOW)
+ if (polarity == ACPI_ACTIVE_LOW)
flag = IORESOURCE_IRQ_LOWEDGE;
else
flag = IORESOURCE_IRQ_HIGHEDGE;
@@ -57,7 +57,7 @@ static void decode_irq_flags(int flag, int *triggering, int *polarity)
*triggering = ACPI_LEVEL_SENSITIVE;
*polarity = ACPI_ACTIVE_LOW;
break;
- case IORESOURCE_IRQ_HIGHLEVEL:
+ case IORESOURCE_IRQ_HIGHLEVEL:
*triggering = ACPI_LEVEL_SENSITIVE;
*polarity = ACPI_ACTIVE_HIGH;
break;
@@ -73,7 +73,7 @@ static void decode_irq_flags(int flag, int *triggering, int *polarity)
}
static void
-pnpacpi_parse_allocated_irqresource(struct pnp_resource_table * res, u32 gsi,
+pnpacpi_parse_allocated_irqresource(struct pnp_resource_table *res, u32 gsi,
int triggering, int polarity)
{
int i = 0;
@@ -101,7 +101,7 @@ pnpacpi_parse_allocated_irqresource(struct pnp_resource_table * res, u32 gsi,
}
static void
-pnpacpi_parse_allocated_dmaresource(struct pnp_resource_table * res, u32 dma)
+pnpacpi_parse_allocated_dmaresource(struct pnp_resource_table *res, u32 dma)
{
int i = 0;
while (i < PNP_MAX_DMA &&
@@ -119,8 +119,8 @@ pnpacpi_parse_allocated_dmaresource(struct pnp_resource_table * res, u32 dma)
}
static void
-pnpacpi_parse_allocated_ioresource(struct pnp_resource_table * res,
- u32 io, u32 len)
+pnpacpi_parse_allocated_ioresource(struct pnp_resource_table *res,
+ u64 io, u64 len)
{
int i = 0;
while (!(res->port_resource[i].flags & IORESOURCE_UNSET) &&
@@ -138,7 +138,7 @@ pnpacpi_parse_allocated_ioresource(struct pnp_resource_table * res,
}
static void
-pnpacpi_parse_allocated_memresource(struct pnp_resource_table * res,
+pnpacpi_parse_allocated_memresource(struct pnp_resource_table *res,
u64 mem, u64 len)
{
int i = 0;
@@ -156,11 +156,32 @@ pnpacpi_parse_allocated_memresource(struct pnp_resource_table * res,
}
}
+static void
+pnpacpi_parse_allocated_address_space(struct pnp_resource_table *res_table,
+ struct acpi_resource *res)
+{
+ struct acpi_resource_address64 addr, *p = &addr;
+ acpi_status status;
+
+ status = acpi_resource_to_address64(res, p);
+ if (!ACPI_SUCCESS(status)) {
+ pnp_warn("PnPACPI: failed to convert resource type %d",
+ res->type);
+ return;
+ }
+
+ if (p->resource_type == ACPI_MEMORY_RANGE)
+ pnpacpi_parse_allocated_memresource(res_table,
+ p->minimum, p->address_length);
+ else if (p->resource_type == ACPI_IO_RANGE)
+ pnpacpi_parse_allocated_ioresource(res_table,
+ p->minimum, p->address_length);
+}
static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
void *data)
{
- struct pnp_resource_table * res_table = (struct pnp_resource_table *)data;
+ struct pnp_resource_table *res_table = (struct pnp_resource_table *)data;
int i;
switch (res->type) {
@@ -221,19 +242,9 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
res->data.fixed_memory32.address_length);
break;
case ACPI_RESOURCE_TYPE_ADDRESS16:
- pnpacpi_parse_allocated_memresource(res_table,
- res->data.address16.minimum,
- res->data.address16.address_length);
- break;
case ACPI_RESOURCE_TYPE_ADDRESS32:
- pnpacpi_parse_allocated_memresource(res_table,
- res->data.address32.minimum,
- res->data.address32.address_length);
- break;
case ACPI_RESOURCE_TYPE_ADDRESS64:
- pnpacpi_parse_allocated_memresource(res_table,
- res->data.address64.minimum,
- res->data.address64.address_length);
+ pnpacpi_parse_allocated_address_space(res_table, res);
break;
case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64:
@@ -255,11 +266,11 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
pnp_warn("PnPACPI: unknown resource type %d", res->type);
return AE_ERROR;
}
-
+
return AE_OK;
}
-acpi_status pnpacpi_parse_allocated_resource(acpi_handle handle, struct pnp_resource_table * res)
+acpi_status pnpacpi_parse_allocated_resource(acpi_handle handle, struct pnp_resource_table *res)
{
/* Blank the resource table values */
pnp_init_resource_table(res);
@@ -317,17 +328,17 @@ static void pnpacpi_parse_dma_option(struct pnp_option *option, struct acpi_reso
pnp_err("Invalid DMA transfer type");
}
- pnp_register_dma_resource(option,dma);
+ pnp_register_dma_resource(option, dma);
return;
}
-
+
static void pnpacpi_parse_irq_option(struct pnp_option *option,
struct acpi_resource_irq *p)
{
int i;
- struct pnp_irq * irq;
-
+ struct pnp_irq *irq;
+
if (p->interrupt_count == 0)
return;
irq = kcalloc(1, sizeof(struct pnp_irq), GFP_KERNEL);
@@ -347,7 +358,7 @@ static void pnpacpi_parse_ext_irq_option(struct pnp_option *option,
struct acpi_resource_extended_irq *p)
{
int i;
- struct pnp_irq * irq;
+ struct pnp_irq *irq;
if (p->interrupt_count == 0)
return;
@@ -368,7 +379,7 @@ static void
pnpacpi_parse_port_option(struct pnp_option *option,
struct acpi_resource_io *io)
{
- struct pnp_port * port;
+ struct pnp_port *port;
if (io->address_length == 0)
return;
@@ -381,7 +392,7 @@ pnpacpi_parse_port_option(struct pnp_option *option,
port->size = io->address_length;
port->flags = ACPI_DECODE_16 == io->io_decode ?
PNP_PORT_FLAG_16BITADDR : 0;
- pnp_register_port_resource(option,port);
+ pnp_register_port_resource(option, port);
return;
}
@@ -389,7 +400,7 @@ static void
pnpacpi_parse_fixed_port_option(struct pnp_option *option,
struct acpi_resource_fixed_io *io)
{
- struct pnp_port * port;
+ struct pnp_port *port;
if (io->address_length == 0)
return;
@@ -400,7 +411,7 @@ pnpacpi_parse_fixed_port_option(struct pnp_option *option,
port->size = io->address_length;
port->align = 0;
port->flags = PNP_PORT_FLAG_FIXED;
- pnp_register_port_resource(option,port);
+ pnp_register_port_resource(option, port);
return;
}
@@ -408,7 +419,7 @@ static void
pnpacpi_parse_mem24_option(struct pnp_option *option,
struct acpi_resource_memory24 *p)
{
- struct pnp_mem * mem;
+ struct pnp_mem *mem;
if (p->address_length == 0)
return;
@@ -423,7 +434,7 @@ pnpacpi_parse_mem24_option(struct pnp_option *option,
mem->flags = (ACPI_READ_WRITE_MEMORY == p->write_protect) ?
IORESOURCE_MEM_WRITEABLE : 0;
- pnp_register_mem_resource(option,mem);
+ pnp_register_mem_resource(option, mem);
return;
}
@@ -431,7 +442,7 @@ static void
pnpacpi_parse_mem32_option(struct pnp_option *option,
struct acpi_resource_memory32 *p)
{
- struct pnp_mem * mem;
+ struct pnp_mem *mem;
if (p->address_length == 0)
return;
@@ -446,7 +457,7 @@ pnpacpi_parse_mem32_option(struct pnp_option *option,
mem->flags = (ACPI_READ_WRITE_MEMORY == p->write_protect) ?
IORESOURCE_MEM_WRITEABLE : 0;
- pnp_register_mem_resource(option,mem);
+ pnp_register_mem_resource(option, mem);
return;
}
@@ -454,7 +465,7 @@ static void
pnpacpi_parse_fixed_mem32_option(struct pnp_option *option,
struct acpi_resource_fixed_memory32 *p)
{
- struct pnp_mem * mem;
+ struct pnp_mem *mem;
if (p->address_length == 0)
return;
@@ -468,7 +479,7 @@ pnpacpi_parse_fixed_mem32_option(struct pnp_option *option,
mem->flags = (ACPI_READ_WRITE_MEMORY == p->write_protect) ?
IORESOURCE_MEM_WRITEABLE : 0;
- pnp_register_mem_resource(option,mem);
+ pnp_register_mem_resource(option, mem);
return;
}
@@ -477,8 +488,8 @@ pnpacpi_parse_address_option(struct pnp_option *option, struct acpi_resource *r)
{
struct acpi_resource_address64 addr, *p = &addr;
acpi_status status;
- struct pnp_mem * mem;
- struct pnp_port * port;
+ struct pnp_mem *mem;
+ struct pnp_port *port;
status = acpi_resource_to_address64(r, p);
if (!ACPI_SUCCESS(status)) {
@@ -498,7 +509,7 @@ pnpacpi_parse_address_option(struct pnp_option *option, struct acpi_resource *r)
mem->align = 0;
mem->flags = (p->info.mem.write_protect ==
ACPI_READ_WRITE_MEMORY) ? IORESOURCE_MEM_WRITEABLE : 0;
- pnp_register_mem_resource(option,mem);
+ pnp_register_mem_resource(option, mem);
} else if (p->resource_type == ACPI_IO_RANGE) {
port = kcalloc(1, sizeof(struct pnp_port), GFP_KERNEL);
if (!port)
@@ -507,7 +518,7 @@ pnpacpi_parse_address_option(struct pnp_option *option, struct acpi_resource *r)
port->size = p->address_length;
port->align = 0;
port->flags = PNP_PORT_FLAG_FIXED;
- pnp_register_port_resource(option,port);
+ pnp_register_port_resource(option, port);
}
}
@@ -531,7 +542,7 @@ static acpi_status pnpacpi_option_resource(struct acpi_resource *res,
break;
case ACPI_RESOURCE_TYPE_DMA:
- pnpacpi_parse_dma_option(option, &res->data.dma);
+ pnpacpi_parse_dma_option(option, &res->data.dma);
break;
case ACPI_RESOURCE_TYPE_START_DEPENDENT:
@@ -539,7 +550,7 @@ static acpi_status pnpacpi_option_resource(struct acpi_resource *res,
case ACPI_GOOD_CONFIGURATION:
priority = PNP_RES_PRIORITY_PREFERRED;
break;
-
+
case ACPI_ACCEPTABLE_CONFIGURATION:
priority = PNP_RES_PRIORITY_ACCEPTABLE;
break;
@@ -555,7 +566,7 @@ static acpi_status pnpacpi_option_resource(struct acpi_resource *res,
option = pnp_register_dependent_option(dev, priority);
if (!option)
return AE_ERROR;
- parse_data->option = option;
+ parse_data->option = option;
break;
case ACPI_RESOURCE_TYPE_END_DEPENDENT:
@@ -615,7 +626,7 @@ static acpi_status pnpacpi_option_resource(struct acpi_resource *res,
pnp_warn("PnPACPI: unknown resource type %d", res->type);
return AE_ERROR;
}
-
+
return AE_OK;
}
@@ -636,13 +647,8 @@ acpi_status pnpacpi_parse_resource_option_data(acpi_handle handle,
return status;
}
-/*
- * Set resource
- */
-static acpi_status pnpacpi_count_resources(struct acpi_resource *res,
- void *data)
+static int pnpacpi_supported_resource(struct acpi_resource *res)
{
- int *res_cnt = (int *)data;
switch (res->type) {
case ACPI_RESOURCE_TYPE_IRQ:
case ACPI_RESOURCE_TYPE_DMA:
@@ -655,43 +661,32 @@ static acpi_status pnpacpi_count_resources(struct acpi_resource *res,
case ACPI_RESOURCE_TYPE_ADDRESS32:
case ACPI_RESOURCE_TYPE_ADDRESS64:
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
- (*res_cnt) ++;
- case ACPI_RESOURCE_TYPE_START_DEPENDENT:
- case ACPI_RESOURCE_TYPE_END_DEPENDENT:
- case ACPI_RESOURCE_TYPE_VENDOR:
- case ACPI_RESOURCE_TYPE_END_TAG:
- case ACPI_RESOURCE_TYPE_GENERIC_REGISTER:
- default:
- return AE_OK;
+ return 1;
}
- return AE_OK;
+ return 0;
}
-static acpi_status pnpacpi_type_resources(struct acpi_resource *res,
+/*
+ * Set resource
+ */
+static acpi_status pnpacpi_count_resources(struct acpi_resource *res,
void *data)
{
- struct acpi_resource **resource = (struct acpi_resource **)data;
- switch (res->type) {
- case ACPI_RESOURCE_TYPE_IRQ:
- case ACPI_RESOURCE_TYPE_DMA:
- case ACPI_RESOURCE_TYPE_IO:
- case ACPI_RESOURCE_TYPE_FIXED_IO:
- case ACPI_RESOURCE_TYPE_MEMORY24:
- case ACPI_RESOURCE_TYPE_MEMORY32:
- case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
- case ACPI_RESOURCE_TYPE_ADDRESS16:
- case ACPI_RESOURCE_TYPE_ADDRESS32:
- case ACPI_RESOURCE_TYPE_ADDRESS64:
- case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
+ int *res_cnt = (int *)data;
+
+ if (pnpacpi_supported_resource(res))
+ (*res_cnt)++;
+ return AE_OK;
+}
+
+static acpi_status pnpacpi_type_resources(struct acpi_resource *res, void *data)
+{
+ struct acpi_resource **resource = (struct acpi_resource **)data;
+
+ if (pnpacpi_supported_resource(res)) {
(*resource)->type = res->type;
+ (*resource)->length = sizeof(struct acpi_resource);
(*resource)++;
- case ACPI_RESOURCE_TYPE_START_DEPENDENT:
- case ACPI_RESOURCE_TYPE_END_DEPENDENT:
- case ACPI_RESOURCE_TYPE_VENDOR:
- case ACPI_RESOURCE_TYPE_END_TAG:
- case ACPI_RESOURCE_TYPE_GENERIC_REGISTER:
- default:
- return AE_OK;
}
return AE_OK;
@@ -735,11 +730,8 @@ static void pnpacpi_encode_irq(struct acpi_resource *resource,
struct resource *p)
{
int triggering, polarity;
-
- decode_irq_flags(p->flags & IORESOURCE_BITS, &triggering,
- &polarity);
- resource->type = ACPI_RESOURCE_TYPE_IRQ;
- resource->length = sizeof(struct acpi_resource);
+
+ decode_irq_flags(p->flags & IORESOURCE_BITS, &triggering, &polarity);
resource->data.irq.triggering = triggering;
resource->data.irq.polarity = polarity;
if (triggering == ACPI_EDGE_SENSITIVE)
@@ -754,11 +746,8 @@ static void pnpacpi_encode_ext_irq(struct acpi_resource *resource,
struct resource *p)
{
int triggering, polarity;
-
- decode_irq_flags(p->flags & IORESOURCE_BITS, &triggering,
- &polarity);
- resource->type = ACPI_RESOURCE_TYPE_EXTENDED_IRQ;
- resource->length = sizeof(struct acpi_resource);
+
+ decode_irq_flags(p->flags & IORESOURCE_BITS, &triggering, &polarity);
resource->data.extended_irq.producer_consumer = ACPI_CONSUMER;
resource->data.extended_irq.triggering = triggering;
resource->data.extended_irq.polarity = polarity;
@@ -773,8 +762,6 @@ static void pnpacpi_encode_ext_irq(struct acpi_resource *resource,
static void pnpacpi_encode_dma(struct acpi_resource *resource,
struct resource *p)
{
- resource->type = ACPI_RESOURCE_TYPE_DMA;
- resource->length = sizeof(struct acpi_resource);
/* Note: pnp_assign_dma will copy pnp_dma->flags into p->flags */
if (p->flags & IORESOURCE_DMA_COMPATIBLE)
resource->data.dma.type = ACPI_COMPATIBILITY;
@@ -798,8 +785,6 @@ static void pnpacpi_encode_dma(struct acpi_resource *resource,
static void pnpacpi_encode_io(struct acpi_resource *resource,
struct resource *p)
{
- resource->type = ACPI_RESOURCE_TYPE_IO;
- resource->length = sizeof(struct acpi_resource);
/* Note: pnp_assign_port will copy pnp_port->flags into p->flags */
resource->data.io.io_decode = (p->flags & PNP_PORT_FLAG_16BITADDR)?
ACPI_DECODE_16 : ACPI_DECODE_10;
@@ -812,8 +797,6 @@ static void pnpacpi_encode_io(struct acpi_resource *resource,
static void pnpacpi_encode_fixed_io(struct acpi_resource *resource,
struct resource *p)
{
- resource->type = ACPI_RESOURCE_TYPE_FIXED_IO;
- resource->length = sizeof(struct acpi_resource);
resource->data.fixed_io.address = p->start;
resource->data.fixed_io.address_length = p->end - p->start + 1;
}
@@ -821,8 +804,6 @@ static void pnpacpi_encode_fixed_io(struct acpi_resource *resource,
static void pnpacpi_encode_mem24(struct acpi_resource *resource,
struct resource *p)
{
- resource->type = ACPI_RESOURCE_TYPE_MEMORY24;
- resource->length = sizeof(struct acpi_resource);
/* Note: pnp_assign_mem will copy pnp_mem->flags into p->flags */
resource->data.memory24.write_protect =
(p->flags & IORESOURCE_MEM_WRITEABLE) ?
@@ -836,8 +817,6 @@ static void pnpacpi_encode_mem24(struct acpi_resource *resource,
static void pnpacpi_encode_mem32(struct acpi_resource *resource,
struct resource *p)
{
- resource->type = ACPI_RESOURCE_TYPE_MEMORY32;
- resource->length = sizeof(struct acpi_resource);
resource->data.memory32.write_protect =
(p->flags & IORESOURCE_MEM_WRITEABLE) ?
ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY;
@@ -850,8 +829,6 @@ static void pnpacpi_encode_mem32(struct acpi_resource *resource,
static void pnpacpi_encode_fixed_mem32(struct acpi_resource *resource,
struct resource *p)
{
- resource->type = ACPI_RESOURCE_TYPE_FIXED_MEMORY32;
- resource->length = sizeof(struct acpi_resource);
resource->data.fixed_memory32.write_protect =
(p->flags & IORESOURCE_MEM_WRITEABLE) ?
ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY;
@@ -882,37 +859,37 @@ int pnpacpi_encode_resources(struct pnp_resource_table *res_table,
pnp_dbg("Encode dma");
pnpacpi_encode_dma(resource,
&res_table->dma_resource[dma]);
- dma ++;
+ dma++;
break;
case ACPI_RESOURCE_TYPE_IO:
pnp_dbg("Encode io");
pnpacpi_encode_io(resource,
&res_table->port_resource[port]);
- port ++;
+ port++;
break;
case ACPI_RESOURCE_TYPE_FIXED_IO:
pnp_dbg("Encode fixed io");
pnpacpi_encode_fixed_io(resource,
&res_table->port_resource[port]);
- port ++;
+ port++;
break;
case ACPI_RESOURCE_TYPE_MEMORY24:
pnp_dbg("Encode mem24");
pnpacpi_encode_mem24(resource,
&res_table->mem_resource[mem]);
- mem ++;
+ mem++;
break;
case ACPI_RESOURCE_TYPE_MEMORY32:
pnp_dbg("Encode mem32");
pnpacpi_encode_mem32(resource,
&res_table->mem_resource[mem]);
- mem ++;
+ mem++;
break;
case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
pnp_dbg("Encode fixed mem32");
pnpacpi_encode_fixed_mem32(resource,
&res_table->mem_resource[mem]);
- mem ++;
+ mem++;
break;
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
pnp_dbg("Encode ext irq");
@@ -933,8 +910,8 @@ int pnpacpi_encode_resources(struct pnp_resource_table *res_table,
pnp_warn("unknown resource type %d", resource->type);
return -EINVAL;
}
- resource ++;
- i ++;
+ resource++;
+ i++;
}
return 0;
}
diff --git a/drivers/scsi/53c700.h b/drivers/scsi/53c700.h
index d8041952b1b..7f22a06fe5e 100644
--- a/drivers/scsi/53c700.h
+++ b/drivers/scsi/53c700.h
@@ -474,8 +474,7 @@ NCR_700_readl(struct Scsi_Host *host, __u32 reg)
ioread32(hostdata->base + reg);
#if 1
/* sanity check the register */
- if((reg & 0x3) != 0)
- BUG();
+ BUG_ON((reg & 0x3) != 0);
#endif
return value;
@@ -498,8 +497,7 @@ NCR_700_writel(__u32 value, struct Scsi_Host *host, __u32 reg)
#if 1
/* sanity check the register */
- if((reg & 0x3) != 0)
- BUG();
+ BUG_ON((reg & 0x3) != 0);
#endif
bEBus ? iowrite32be(value, hostdata->base + reg):
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index ea9e038813e..83b5c7d085f 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -396,8 +396,7 @@ static void get_container_name_callback(void *context, struct fib * fibptr)
scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
dprintk((KERN_DEBUG "get_container_name_callback[cpu %d]: t = %ld.\n", smp_processor_id(), jiffies));
- if (fibptr == NULL)
- BUG();
+ BUG_ON(fibptr == NULL);
get_name_reply = (struct aac_get_name_resp *) fib_data(fibptr);
/* Failure is irrelevant, using default value instead */
@@ -956,8 +955,7 @@ static void io_callback(void *context, struct fib * fibptr)
smp_processor_id(), (unsigned long long)lba, jiffies);
}
- if (fibptr == NULL)
- BUG();
+ BUG_ON(fibptr == NULL);
if(scsicmd->use_sg)
pci_unmap_sg(dev->pdev,
@@ -1092,8 +1090,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
aac_build_sgraw(scsicmd, &readcmd->sg);
fibsize = sizeof(struct aac_raw_io) + ((le32_to_cpu(readcmd->sg.count) - 1) * sizeof (struct sgentryraw));
- if (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))
- BUG();
+ BUG_ON(fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)));
/*
* Now send the Fib to the adapter
*/
@@ -1261,8 +1258,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
aac_build_sgraw(scsicmd, &writecmd->sg);
fibsize = sizeof(struct aac_raw_io) + ((le32_to_cpu(writecmd->sg.count) - 1) * sizeof (struct sgentryraw));
- if (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))
- BUG();
+ BUG_ON(fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)));
/*
* Now send the Fib to the adapter
*/
@@ -1904,8 +1900,7 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
- if (fibptr == NULL)
- BUG();
+ BUG_ON(fibptr == NULL);
srbreply = (struct aac_srb_reply *) fib_data(fibptr);
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index d2ef17ea44f..3f27419c66a 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -229,8 +229,7 @@ void aac_fib_init(struct fib *fibptr)
static void fib_dealloc(struct fib * fibptr)
{
struct hw_fib *hw_fib = fibptr->hw_fib;
- if(hw_fib->header.StructType != FIB_MAGIC)
- BUG();
+ BUG_ON(hw_fib->header.StructType != FIB_MAGIC);
hw_fib->header.XferState = 0;
}
@@ -530,8 +529,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
}
} else
down(&fibptr->event_wait);
- if(fibptr->done == 0)
- BUG();
+ BUG_ON(fibptr->done == 0);
if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)){
return -ETIMEDOUT;
diff --git a/drivers/scsi/arm/queue.c b/drivers/scsi/arm/queue.c
index b10750bb5c0..8caa5903ce3 100644
--- a/drivers/scsi/arm/queue.c
+++ b/drivers/scsi/arm/queue.c
@@ -118,8 +118,7 @@ int __queue_add(Queue_t *queue, Scsi_Cmnd *SCpnt, int head)
list_del(l);
q = list_entry(l, QE_t, list);
- if (BAD_MAGIC(q, QUEUE_MAGIC_FREE))
- BUG();
+ BUG_ON(BAD_MAGIC(q, QUEUE_MAGIC_FREE));
SET_MAGIC(q, QUEUE_MAGIC_USED);
q->SCpnt = SCpnt;
@@ -144,8 +143,7 @@ static Scsi_Cmnd *__queue_remove(Queue_t *queue, struct list_head *ent)
*/
list_del(ent);
q = list_entry(ent, QE_t, list);
- if (BAD_MAGIC(q, QUEUE_MAGIC_USED))
- BUG();
+ BUG_ON(BAD_MAGIC(q, QUEUE_MAGIC_USED));
SET_MAGIC(q, QUEUE_MAGIC_FREE);
list_add(ent, &queue->free);
diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c
index 0c6dc31bb14..115f55471ed 100644
--- a/drivers/scsi/ibmmca.c
+++ b/drivers/scsi/ibmmca.c
@@ -2241,8 +2241,7 @@ static int __ibmmca_host_reset(Scsi_Cmnd * cmd)
int host_index;
unsigned long imm_command;
- if (cmd == NULL)
- BUG();
+ BUG_ON(cmd == NULL);
ticks = IM_RESET_DELAY * HZ;
shpnt = cmd->device->host;
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index 39b760a2424..988e6f7af01 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -600,8 +600,7 @@ static ide_startstop_t idescsi_transfer_pc(ide_drive_t *drive)
"issuing a packet command\n");
return ide_do_reset (drive);
}
- if (HWGROUP(drive)->handler != NULL)
- BUG();
+ BUG_ON(HWGROUP(drive)->handler != NULL);
/* Set the interrupt routine */
ide_set_handler(drive, &idescsi_pc_intr, get_timeout(pc), idescsi_expiry);
/* Send the actual packet */
@@ -691,8 +690,7 @@ static ide_startstop_t idescsi_issue_pc (ide_drive_t *drive, idescsi_pc_t *pc)
set_bit(PC_DMA_OK, &pc->flags);
if (test_bit(IDESCSI_DRQ_INTERRUPT, &scsi->flags)) {
- if (HWGROUP(drive)->handler != NULL)
- BUG();
+ BUG_ON(HWGROUP(drive)->handler != NULL);
ide_set_handler(drive, &idescsi_transfer_pc,
get_timeout(pc), idescsi_expiry);
/* Issue the packet command */
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index c33857e7b33..5d2cefb5e52 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -1828,7 +1828,7 @@ mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
scb->dma_type = MEGA_SGLIST;
- if( sgcnt > adapter->sglen ) BUG();
+ BUG_ON(sgcnt > adapter->sglen);
*len = 0;
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index ce0ba3a174f..4a2fed350d4 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -4724,7 +4724,7 @@ err_out:
/* Flush the tape buffer before close */
-static int os_scsi_tape_flush(struct file * filp)
+static int os_scsi_tape_flush(struct file * filp, fl_owner_t id)
{
int result = 0, result2;
struct osst_tape * STp = filp->private_data;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index ad87d73f88e..1272dd249af 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -1193,7 +1193,7 @@ static int st_open(struct inode *inode, struct file *filp)
/* Flush the tape buffer before close */
-static int st_flush(struct file *filp)
+static int st_flush(struct file *filp, fl_owner_t id)
{
int result = 0, result2;
unsigned char cmd[MAX_COMMAND_SIZE];
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index 9341703dee0..27307fe5a4c 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -939,6 +939,7 @@ wd33c93_intr(struct Scsi_Host *instance)
DB(DB_INTR, printk("%02x", cmd->SCp.Status))
if (hostdata->level2 >= L2_BASIC) {
sr = read_wd33c93(regs, WD_SCSI_STATUS); /* clear interrupt */
+ udelay(7);
hostdata->state = S_RUNNING_LEVEL2;
write_wd33c93(regs, WD_COMMAND_PHASE, 0x50);
write_wd33c93_cmd(regs, WD_CMD_SEL_ATN_XFER);
@@ -955,6 +956,7 @@ wd33c93_intr(struct Scsi_Host *instance)
msg = read_1_byte(regs);
sr = read_wd33c93(regs, WD_SCSI_STATUS); /* clear interrupt */
+ udelay(7);
hostdata->incoming_msg[hostdata->incoming_ptr] = msg;
if (hostdata->incoming_msg[0] == EXTENDED_MESSAGE)
@@ -1358,6 +1360,7 @@ wd33c93_intr(struct Scsi_Host *instance)
} else {
/* Verify this is a change to MSG_IN and read the message */
sr = read_wd33c93(regs, WD_SCSI_STATUS);
+ udelay(7);
if (sr == (CSR_ABORT | PHS_MESS_IN) ||
sr == (CSR_UNEXP | PHS_MESS_IN) ||
sr == (CSR_SRV_REQ | PHS_MESS_IN)) {
@@ -1374,6 +1377,7 @@ wd33c93_intr(struct Scsi_Host *instance)
asr);
}
sr = read_wd33c93(regs, WD_SCSI_STATUS);
+ udelay(7);
if (sr != CSR_MSGIN)
printk
("wd33c93: Not paused with ACK on RESEL (%02x)\n",
diff --git a/drivers/serial/ioc4_serial.c b/drivers/serial/ioc4_serial.c
index c620209d7b9..717e47bbd78 100644
--- a/drivers/serial/ioc4_serial.c
+++ b/drivers/serial/ioc4_serial.c
@@ -2646,7 +2646,10 @@ static int ioc4_serial_remove_one(struct ioc4_driver_data *idd)
struct ioc4_port *port;
struct ioc4_soft *soft;
+ /* If serial driver did not attach, don't try to detach */
control = idd->idd_serial_data;
+ if (!control)
+ return 0;
for (port_num = 0; port_num < IOC4_NUM_SERIAL_PORTS; port_num++) {
for (port_type = UART_PORT_MIN;
@@ -2778,6 +2781,12 @@ ioc4_serial_attach_one(struct ioc4_driver_data *idd)
DPRINT_CONFIG(("%s (0x%p, 0x%p)\n", __FUNCTION__, idd->idd_pdev,
idd->idd_pci_id));
+ /* PCI-RT does not bring out serial connections.
+ * Do not attach to this particular IOC4.
+ */
+ if (idd->idd_variant == IOC4_VARIANT_PCI_RT)
+ return 0;
+
/* request serial registers */
tmp_addr1 = idd->idd_bar0 + IOC4_SERIAL_OFFSET;
diff --git a/drivers/sn/ioc4.c b/drivers/sn/ioc4.c
index cdeff909403..8256a97eb50 100644
--- a/drivers/sn/ioc4.c
+++ b/drivers/sn/ioc4.c
@@ -160,9 +160,6 @@ ioc4_clock_calibrate(struct ioc4_driver_data *idd)
writel(0, &idd->idd_misc_regs->int_out.raw);
mmiowb();
- printk(KERN_INFO
- "%s: Calibrating PCI bus speed "
- "for pci_dev %s ... ", __FUNCTION__, pci_name(idd->idd_pdev));
/* Set up square wave */
int_out.raw = 0;
int_out.fields.count = IOC4_CALIBRATE_COUNT;
@@ -206,11 +203,16 @@ ioc4_clock_calibrate(struct ioc4_driver_data *idd)
/* Bounds check the result. */
if (period > IOC4_CALIBRATE_LOW_LIMIT ||
period < IOC4_CALIBRATE_HIGH_LIMIT) {
- printk("failed. Assuming PCI clock ticks are %d ns.\n",
+ printk(KERN_INFO
+ "IOC4 %s: Clock calibration failed. Assuming"
+ "PCI clock is %d ns.\n",
+ pci_name(idd->idd_pdev),
IOC4_CALIBRATE_DEFAULT / IOC4_EXTINT_COUNT_DIVISOR);
period = IOC4_CALIBRATE_DEFAULT;
} else {
- printk("succeeded. PCI clock ticks are %ld ns.\n",
+ printk(KERN_DEBUG
+ "IOC4 %s: PCI clock is %ld ns.\n",
+ pci_name(idd->idd_pdev),
period / IOC4_EXTINT_COUNT_DIVISOR);
}
@@ -222,6 +224,51 @@ ioc4_clock_calibrate(struct ioc4_driver_data *idd)
idd->count_period = period;
}
+/* There are three variants of IOC4 cards: IO9, IO10, and PCI-RT.
+ * Each brings out different combinations of IOC4 signals, thus.
+ * the IOC4 subdrivers need to know to which we're attached.
+ *
+ * We look for the presence of a SCSI (IO9) or SATA (IO10) controller
+ * on the same PCI bus at slot number 3 to differentiate IO9 from IO10.
+ * If neither is present, it's a PCI-RT.
+ */
+static unsigned int
+ioc4_variant(struct ioc4_driver_data *idd)
+{
+ struct pci_dev *pdev = NULL;
+ int found = 0;
+
+ /* IO9: Look for a QLogic ISP 12160 at the same bus and slot 3. */
+ do {
+ pdev = pci_get_device(PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_ISP12160, pdev);
+ if (pdev &&
+ idd->idd_pdev->bus->number == pdev->bus->number &&
+ 3 == PCI_SLOT(pdev->devfn))
+ found = 1;
+ pci_dev_put(pdev);
+ } while (pdev && !found);
+ if (NULL != pdev)
+ return IOC4_VARIANT_IO9;
+
+ /* IO10: Look for a Vitesse VSC 7174 at the same bus and slot 3. */
+ pdev = NULL;
+ do {
+ pdev = pci_get_device(PCI_VENDOR_ID_VITESSE,
+ PCI_DEVICE_ID_VITESSE_VSC7174, pdev);
+ if (pdev &&
+ idd->idd_pdev->bus->number == pdev->bus->number &&
+ 3 == PCI_SLOT(pdev->devfn))
+ found = 1;
+ pci_dev_put(pdev);
+ } while (pdev && !found);
+ if (NULL != pdev)
+ return IOC4_VARIANT_IO10;
+
+ /* PCI-RT: No SCSI/SATA controller will be present */
+ return IOC4_VARIANT_PCI_RT;
+}
+
/* Adds a new instance of an IOC4 card */
static int
ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
@@ -286,6 +333,13 @@ ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
/* Failsafe portion of per-IOC4 initialization */
+ /* Detect card variant */
+ idd->idd_variant = ioc4_variant(idd);
+ printk(KERN_INFO "IOC4 %s: %s card detected.\n", pci_name(pdev),
+ idd->idd_variant == IOC4_VARIANT_IO9 ? "IO9" :
+ idd->idd_variant == IOC4_VARIANT_PCI_RT ? "PCI-RT" :
+ idd->idd_variant == IOC4_VARIANT_IO10 ? "IO10" : "unknown");
+
/* Initialize IOC4 */
pci_read_config_dword(idd->idd_pdev, PCI_COMMAND, &pcmd);
pci_write_config_dword(idd->idd_pdev, PCI_COMMAND,
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c
index 3cf945cc5b9..95f5ad923b0 100644
--- a/drivers/usb/core/inode.c
+++ b/drivers/usb/core/inode.c
@@ -543,10 +543,10 @@ static void fs_remove_file (struct dentry *dentry)
/* --------------------------------------------------------------------- */
-static struct super_block *usb_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int usb_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_single(fs_type, flags, data, usbfs_fill_super);
+ return get_sb_single(fs_type, flags, data, usbfs_fill_super, mnt);
}
static struct file_system_type usb_fs_type = {
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index aef0722b8f1..3bdc5e3ba23 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -2070,11 +2070,11 @@ enomem0:
}
/* "mount -t gadgetfs path /dev/gadget" ends up here */
-static struct super_block *
+static int
gadgetfs_get_sb (struct file_system_type *t, int flags,
- const char *path, void *opts)
+ const char *path, void *opts, struct vfsmount *mnt)
{
- return get_sb_single (t, flags, opts, gadgetfs_fill_super);
+ return get_sb_single (t, flags, opts, gadgetfs_fill_super, mnt);
}
static void
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index c060eb9b3b1..b93d71d28db 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -70,7 +70,6 @@ config USB_EHCI_TT_NEWSCHED
config USB_ISP116X_HCD
tristate "ISP116X HCD support"
depends on USB
- default N
---help---
The ISP1160 and ISP1161 chips are USB host controllers. Enable this
option if your board has this chip. If unsure, say N.
@@ -145,7 +144,6 @@ config USB_UHCI_HCD
config USB_SL811_HCD
tristate "SL811HS HCD support"
depends on USB
- default N
help
The SL811HS is a single-port USB controller that supports either
host side or peripheral side roles. Enable this option if your
@@ -158,7 +156,6 @@ config USB_SL811_HCD
config USB_SL811_CS
tristate "CF/PCMCIA support for SL811HS HCD"
depends on USB_SL811_HCD && PCMCIA
- default N
help
Wraps a PCMCIA driver around the SL811HS HCD, supporting the RATOC
REX-CFU1U CF card (often used with PDAs). If unsure, say N.
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index d9d7d3c4cae..eb6aa42be60 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -3722,7 +3722,9 @@ static int __init atyfb_init(void)
atyfb_setup(option);
#endif
+#ifdef CONFIG_PCI
pci_register_driver(&atyfb_driver);
+#endif
#ifdef CONFIG_ATARI
atyfb_atari_probe();
#endif
@@ -3731,7 +3733,9 @@ static int __init atyfb_init(void)
static void __exit atyfb_exit(void)
{
+#ifdef CONFIG_PCI
pci_unregister_driver(&atyfb_driver);
+#endif
}
module_init(atyfb_init);
diff --git a/drivers/w1/Kconfig b/drivers/w1/Kconfig
index f2d9a08e89a..8b3d0f0c7bd 100644
--- a/drivers/w1/Kconfig
+++ b/drivers/w1/Kconfig
@@ -2,6 +2,7 @@ menu "Dallas's 1-wire bus"
config W1
tristate "Dallas's 1-wire support"
+ depends on CONNECTOR
---help---
Dallas' 1-wire bus is useful to connect slow 1-pin devices
such as iButtons and thermal sensors.
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index 61c599b4a1e..872943004e5 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -99,12 +99,13 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
* @flags: mount flags
* @dev_name: device name that was mounted
* @data: mount options
+ * @mnt: mountpoint record to be instantiated
*
*/
-static struct super_block *v9fs_get_sb(struct file_system_type
- *fs_type, int flags,
- const char *dev_name, void *data)
+static int v9fs_get_sb(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data,
+ struct vfsmount *mnt)
{
struct super_block *sb = NULL;
struct v9fs_fcall *fcall = NULL;
@@ -123,17 +124,19 @@ static struct super_block *v9fs_get_sb(struct file_system_type
v9ses = kzalloc(sizeof(struct v9fs_session_info), GFP_KERNEL);
if (!v9ses)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
if ((newfid = v9fs_session_init(v9ses, dev_name, data)) < 0) {
dprintk(DEBUG_ERROR, "problem initiating session\n");
- sb = ERR_PTR(newfid);
+ retval = newfid;
goto out_free_session;
}
sb = sget(fs_type, NULL, v9fs_set_super, v9ses);
- if (IS_ERR(sb))
+ if (IS_ERR(sb)) {
+ retval = PTR_ERR(sb);
goto out_close_session;
+ }
v9fs_fill_super(sb, v9ses, flags);
inode = v9fs_get_inode(sb, S_IFDIR | mode);
@@ -184,19 +187,19 @@ static struct super_block *v9fs_get_sb(struct file_system_type
goto put_back_sb;
}
- return sb;
+ return simple_set_mnt(mnt, sb);
out_close_session:
v9fs_session_close(v9ses);
out_free_session:
kfree(v9ses);
- return sb;
+ return retval;
put_back_sb:
/* deactivate_super calls v9fs_kill_super which will frees the rest */
up_write(&sb->s_umount);
deactivate_super(sb);
- return ERR_PTR(retval);
+ return retval;
}
/**
diff --git a/fs/Kconfig b/fs/Kconfig
index 20f9b557732..2aa4624cc01 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -53,7 +53,7 @@ config EXT2_FS_SECURITY
config EXT2_FS_XIP
bool "Ext2 execute in place support"
- depends on EXT2_FS
+ depends on EXT2_FS && MMU
help
Execute in place can be used on memory-backed block devices. If you
enable this option, you can select to mount block devices which are
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index 252abda0d20..ba1c88af49f 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -196,17 +196,17 @@ static int adfs_remount(struct super_block *sb, int *flags, char *data)
return parse_options(sb, data);
}
-static int adfs_statfs(struct super_block *sb, struct kstatfs *buf)
+static int adfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
- struct adfs_sb_info *asb = ADFS_SB(sb);
+ struct adfs_sb_info *asb = ADFS_SB(dentry->d_sb);
buf->f_type = ADFS_SUPER_MAGIC;
buf->f_namelen = asb->s_namelen;
- buf->f_bsize = sb->s_blocksize;
+ buf->f_bsize = dentry->d_sb->s_blocksize;
buf->f_blocks = asb->s_size;
buf->f_files = asb->s_ids_per_zone * asb->s_map_size;
buf->f_bavail =
- buf->f_bfree = adfs_map_free(sb);
+ buf->f_bfree = adfs_map_free(dentry->d_sb);
buf->f_ffree = (long)(buf->f_bfree * buf->f_files) / (long)buf->f_blocks;
return 0;
@@ -470,10 +470,11 @@ error:
return -EINVAL;
}
-static struct super_block *adfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int adfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, adfs_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, adfs_fill_super,
+ mnt);
}
static struct file_system_type adfs_fs_type = {
diff --git a/fs/affs/super.c b/fs/affs/super.c
index 4d7e5b19e5c..8765cba35bb 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -18,7 +18,7 @@
extern struct timezone sys_tz;
-static int affs_statfs(struct super_block *sb, struct kstatfs *buf);
+static int affs_statfs(struct dentry *dentry, struct kstatfs *buf);
static int affs_remount (struct super_block *sb, int *flags, char *data);
static void
@@ -508,8 +508,9 @@ affs_remount(struct super_block *sb, int *flags, char *data)
}
static int
-affs_statfs(struct super_block *sb, struct kstatfs *buf)
+affs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
+ struct super_block *sb = dentry->d_sb;
int free;
pr_debug("AFFS: statfs() partsize=%d, reserved=%d\n",AFFS_SB(sb)->s_partition_size,
@@ -524,10 +525,11 @@ affs_statfs(struct super_block *sb, struct kstatfs *buf)
return 0;
}
-static struct super_block *affs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int affs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, affs_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, affs_fill_super,
+ mnt);
}
static struct file_system_type affs_fs_type = {
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index a6dff6a4f20..2fc99877cb0 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -185,9 +185,7 @@ static struct page *afs_dir_get_page(struct inode *dir, unsigned long index)
_enter("{%lu},%lu", dir->i_ino, index);
- page = read_cache_page(dir->i_mapping,index,
- (filler_t *) dir->i_mapping->a_ops->readpage,
- NULL);
+ page = read_mapping_page(dir->i_mapping, index, NULL);
if (!IS_ERR(page)) {
wait_on_page_locked(page);
kmap(page);
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index 4e6eeb59b83..b5cf9e1205a 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -63,7 +63,6 @@ unsigned long afs_mntpt_expiry_timeout = 20;
int afs_mntpt_check_symlink(struct afs_vnode *vnode)
{
struct page *page;
- filler_t *filler;
size_t size;
char *buf;
int ret;
@@ -71,10 +70,7 @@ int afs_mntpt_check_symlink(struct afs_vnode *vnode)
_enter("{%u,%u}", vnode->fid.vnode, vnode->fid.unique);
/* read the contents of the symlink into the pagecache */
- filler = (filler_t *) AFS_VNODE_TO_I(vnode)->i_mapping->a_ops->readpage;
-
- page = read_cache_page(AFS_VNODE_TO_I(vnode)->i_mapping, 0,
- filler, NULL);
+ page = read_mapping_page(AFS_VNODE_TO_I(vnode)->i_mapping, 0, NULL);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
goto out;
@@ -160,7 +156,6 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
struct page *page = NULL;
size_t size;
char *buf, *devname = NULL, *options = NULL;
- filler_t *filler;
int ret;
kenter("{%s}", mntpt->d_name.name);
@@ -182,9 +177,7 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
goto error;
/* read the contents of the AFS special symlink */
- filler = (filler_t *)mntpt->d_inode->i_mapping->a_ops->readpage;
-
- page = read_cache_page(mntpt->d_inode->i_mapping, 0, filler, NULL);
+ page = read_mapping_page(mntpt->d_inode->i_mapping, 0, NULL);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
goto error;
diff --git a/fs/afs/super.c b/fs/afs/super.c
index 53c56e7231a..82468df0ba5 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -38,9 +38,9 @@ struct afs_mount_params {
static void afs_i_init_once(void *foo, kmem_cache_t *cachep,
unsigned long flags);
-static struct super_block *afs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data);
+static int afs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name,
+ void *data, struct vfsmount *mnt);
static struct inode *afs_alloc_inode(struct super_block *sb);
@@ -294,10 +294,11 @@ static int afs_fill_super(struct super_block *sb, void *data, int silent)
* get an AFS superblock
* - TODO: don't use get_sb_nodev(), but rather call sget() directly
*/
-static struct super_block *afs_get_sb(struct file_system_type *fs_type,
- int flags,
- const char *dev_name,
- void *options)
+static int afs_get_sb(struct file_system_type *fs_type,
+ int flags,
+ const char *dev_name,
+ void *options,
+ struct vfsmount *mnt)
{
struct afs_mount_params params;
struct super_block *sb;
@@ -311,7 +312,7 @@ static struct super_block *afs_get_sb(struct file_system_type *fs_type,
ret = afscm_start();
if (ret < 0) {
_leave(" = %d", ret);
- return ERR_PTR(ret);
+ return ret;
}
/* parse the options */
@@ -348,18 +349,19 @@ static struct super_block *afs_get_sb(struct file_system_type *fs_type,
goto error;
}
sb->s_flags |= MS_ACTIVE;
+ simple_set_mnt(mnt, sb);
afs_put_volume(params.volume);
afs_put_cell(params.default_cell);
- _leave(" = %p", sb);
- return sb;
+ _leave(" = 0 [%p]", 0, sb);
+ return 0;
error:
afs_put_volume(params.volume);
afs_put_cell(params.default_cell);
afscm_stop();
_leave(" = %d", ret);
- return ERR_PTR(ret);
+ return ret;
} /* end afs_get_sb() */
/*****************************************************************************/
diff --git a/fs/aio.c b/fs/aio.c
index e41e932ba48..8c34a62df7d 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -777,11 +777,11 @@ out:
static int __aio_run_iocbs(struct kioctx *ctx)
{
struct kiocb *iocb;
- LIST_HEAD(run_list);
+ struct list_head run_list;
assert_spin_locked(&ctx->ctx_lock);
- list_splice_init(&ctx->run_list, &run_list);
+ list_replace_init(&ctx->run_list, &run_list);
while (!list_empty(&run_list)) {
iocb = list_entry(run_list.next, struct kiocb,
ki_run_list);
diff --git a/fs/autofs/init.c b/fs/autofs/init.c
index b977ece69f0..aca12375240 100644
--- a/fs/autofs/init.c
+++ b/fs/autofs/init.c
@@ -14,10 +14,10 @@
#include <linux/init.h>
#include "autofs_i.h"
-static struct super_block *autofs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int autofs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_nodev(fs_type, flags, data, autofs_fill_super);
+ return get_sb_nodev(fs_type, flags, data, autofs_fill_super, mnt);
}
static struct file_system_type autofs_fs_type = {
diff --git a/fs/autofs4/init.c b/fs/autofs4/init.c
index acecec8578c..5d9193332be 100644
--- a/fs/autofs4/init.c
+++ b/fs/autofs4/init.c
@@ -14,10 +14,10 @@
#include <linux/init.h>
#include "autofs_i.h"
-static struct super_block *autofs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int autofs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_nodev(fs_type, flags, data, autofs4_fill_super);
+ return get_sb_nodev(fs_type, flags, data, autofs4_fill_super, mnt);
}
static struct file_system_type autofs_fs_type = {
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index 68ebd10f345..08201fab26c 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -49,7 +49,7 @@ static int befs_nls2utf(struct super_block *sb, const char *in, int in_len,
char **out, int *out_len);
static void befs_put_super(struct super_block *);
static int befs_remount(struct super_block *, int *, char *);
-static int befs_statfs(struct super_block *, struct kstatfs *);
+static int befs_statfs(struct dentry *, struct kstatfs *);
static int parse_options(char *, befs_mount_options *);
static const struct super_operations befs_sops = {
@@ -880,8 +880,9 @@ befs_remount(struct super_block *sb, int *flags, char *data)
}
static int
-befs_statfs(struct super_block *sb, struct kstatfs *buf)
+befs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
+ struct super_block *sb = dentry->d_sb;
befs_debug(sb, "---> befs_statfs()");
@@ -899,11 +900,12 @@ befs_statfs(struct super_block *sb, struct kstatfs *buf)
return 0;
}
-static struct super_block *
+static int
befs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name,
- void *data)
+ void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, befs_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, befs_fill_super,
+ mnt);
}
static struct file_system_type befs_fs_type = {
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index 55a7a78332f..cf74f3d4d96 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -203,8 +203,9 @@ static void bfs_put_super(struct super_block *s)
s->s_fs_info = NULL;
}
-static int bfs_statfs(struct super_block *s, struct kstatfs *buf)
+static int bfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
+ struct super_block *s = dentry->d_sb;
struct bfs_sb_info *info = BFS_SB(s);
u64 id = huge_encode_dev(s->s_bdev->bd_dev);
buf->f_type = BFS_MAGIC;
@@ -410,10 +411,10 @@ out:
return -EINVAL;
}
-static struct super_block *bfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int bfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, bfs_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, bfs_fill_super, mnt);
}
static struct file_system_type bfs_fs_type = {
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 8a04216e8b4..d0434406eae 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -38,15 +38,13 @@
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/random.h>
-
+#include <linux/elf.h>
#include <asm/uaccess.h>
#include <asm/param.h>
#include <asm/page.h>
-#include <linux/elf.h>
-
-static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs);
-static int load_elf_library(struct file*);
+static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs);
+static int load_elf_library(struct file *);
static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int);
extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
@@ -59,15 +57,15 @@ extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
* don't even try.
*/
#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
-static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file);
+static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file);
#else
#define elf_core_dump NULL
#endif
#if ELF_EXEC_PAGESIZE > PAGE_SIZE
-# define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
+#define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
#else
-# define ELF_MIN_ALIGN PAGE_SIZE
+#define ELF_MIN_ALIGN PAGE_SIZE
#endif
#ifndef ELF_CORE_EFLAGS
@@ -86,7 +84,7 @@ static struct linux_binfmt elf_format = {
.min_coredump = ELF_EXEC_PAGESIZE
};
-#define BAD_ADDR(x) ((unsigned long)(x) > TASK_SIZE)
+#define BAD_ADDR(x) ((unsigned long)(x) > TASK_SIZE)
static int set_brk(unsigned long start, unsigned long end)
{
@@ -104,13 +102,11 @@ static int set_brk(unsigned long start, unsigned long end)
return 0;
}
-
/* We need to explicitly zero any fractional pages
after the data section (i.e. bss). This would
contain the junk from the file that should not
- be in memory */
-
-
+ be in memory
+ */
static int padzero(unsigned long elf_bss)
{
unsigned long nbyte;
@@ -129,7 +125,9 @@ static int padzero(unsigned long elf_bss)
#define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
#define STACK_ROUND(sp, items) \
((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
-#define STACK_ALLOC(sp, len) ({ elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; old_sp; })
+#define STACK_ALLOC(sp, len) ({ \
+ elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \
+ old_sp; })
#else
#define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
#define STACK_ROUND(sp, items) \
@@ -138,7 +136,7 @@ static int padzero(unsigned long elf_bss)
#endif
static int
-create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
+create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
int interp_aout, unsigned long load_addr,
unsigned long interp_load_addr)
{
@@ -161,7 +159,6 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
* for userspace to get any other way, in others (i386) it is
* merely difficult.
*/
-
u_platform = NULL;
if (k_platform) {
size_t len = strlen(k_platform) + 1;
@@ -171,7 +168,7 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
* evictions by the processes running on the same package. One
* thing we can do is to shuffle the initial stack for them.
*/
-
+
p = arch_align_stack(p);
u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
@@ -180,9 +177,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
}
/* Create the ELF interpreter info */
- elf_info = (elf_addr_t *) current->mm->saved_auxv;
+ elf_info = (elf_addr_t *)current->mm->saved_auxv;
#define NEW_AUX_ENT(id, val) \
- do { elf_info[ei_index++] = id; elf_info[ei_index++] = val; } while (0)
+ do { \
+ elf_info[ei_index++] = id; \
+ elf_info[ei_index++] = val; \
+ } while (0)
#ifdef ARCH_DLINFO
/*
@@ -195,21 +195,22 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
- NEW_AUX_ENT(AT_PHENT, sizeof (struct elf_phdr));
+ NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
NEW_AUX_ENT(AT_BASE, interp_load_addr);
NEW_AUX_ENT(AT_FLAGS, 0);
NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
- NEW_AUX_ENT(AT_UID, (elf_addr_t) tsk->uid);
- NEW_AUX_ENT(AT_EUID, (elf_addr_t) tsk->euid);
- NEW_AUX_ENT(AT_GID, (elf_addr_t) tsk->gid);
- NEW_AUX_ENT(AT_EGID, (elf_addr_t) tsk->egid);
- NEW_AUX_ENT(AT_SECURE, (elf_addr_t) security_bprm_secureexec(bprm));
+ NEW_AUX_ENT(AT_UID, tsk->uid);
+ NEW_AUX_ENT(AT_EUID, tsk->euid);
+ NEW_AUX_ENT(AT_GID, tsk->gid);
+ NEW_AUX_ENT(AT_EGID, tsk->egid);
+ NEW_AUX_ENT(AT_SECURE, security_bprm_secureexec(bprm));
if (k_platform) {
- NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(unsigned long)u_platform);
+ NEW_AUX_ENT(AT_PLATFORM,
+ (elf_addr_t)(unsigned long)u_platform);
}
if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
- NEW_AUX_ENT(AT_EXECFD, (elf_addr_t) bprm->interp_data);
+ NEW_AUX_ENT(AT_EXECFD, bprm->interp_data);
}
#undef NEW_AUX_ENT
/* AT_NULL is zero; clear the rest too */
@@ -232,7 +233,7 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
/* Point sp at the lowest address on the stack */
#ifdef CONFIG_STACK_GROWSUP
sp = (elf_addr_t __user *)bprm->p - items - ei_index;
- bprm->exec = (unsigned long) sp; /* XXX: PARISC HACK */
+ bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */
#else
sp = (elf_addr_t __user *)bprm->p;
#endif
@@ -285,7 +286,7 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
#ifndef elf_map
static unsigned long elf_map(struct file *filep, unsigned long addr,
- struct elf_phdr *eppnt, int prot, int type)
+ struct elf_phdr *eppnt, int prot, int type)
{
unsigned long map_addr;
unsigned long pageoffset = ELF_PAGEOFFSET(eppnt->p_vaddr);
@@ -310,9 +311,8 @@ static unsigned long elf_map(struct file *filep, unsigned long addr,
is only provided so that we can read a.out libraries that have
an ELF header */
-static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
- struct file * interpreter,
- unsigned long *interp_load_addr)
+static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
+ struct file *interpreter, unsigned long *interp_load_addr)
{
struct elf_phdr *elf_phdata;
struct elf_phdr *eppnt;
@@ -342,15 +342,15 @@ static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
goto out;
/* Now read in all of the header information */
-
size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
if (size > ELF_MIN_ALIGN)
goto out;
- elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
+ elf_phdata = kmalloc(size, GFP_KERNEL);
if (!elf_phdata)
goto out;
- retval = kernel_read(interpreter,interp_elf_ex->e_phoff,(char *)elf_phdata,size);
+ retval = kernel_read(interpreter, interp_elf_ex->e_phoff,
+ (char *)elf_phdata,size);
error = -EIO;
if (retval != size) {
if (retval < 0)
@@ -359,58 +359,65 @@ static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
}
eppnt = elf_phdata;
- for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
- if (eppnt->p_type == PT_LOAD) {
- int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
- int elf_prot = 0;
- unsigned long vaddr = 0;
- unsigned long k, map_addr;
-
- if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
- if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
- if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
- vaddr = eppnt->p_vaddr;
- if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
- elf_type |= MAP_FIXED;
-
- map_addr = elf_map(interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type);
- error = map_addr;
- if (BAD_ADDR(map_addr))
- goto out_close;
-
- if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
- load_addr = map_addr - ELF_PAGESTART(vaddr);
- load_addr_set = 1;
- }
-
- /*
- * Check to see if the section's size will overflow the
- * allowed task size. Note that p_filesz must always be
- * <= p_memsize so it is only necessary to check p_memsz.
- */
- k = load_addr + eppnt->p_vaddr;
- if (k > TASK_SIZE || eppnt->p_filesz > eppnt->p_memsz ||
- eppnt->p_memsz > TASK_SIZE || TASK_SIZE - eppnt->p_memsz < k) {
- error = -ENOMEM;
- goto out_close;
- }
-
- /*
- * Find the end of the file mapping for this phdr, and keep
- * track of the largest address we see for this.
- */
- k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
- if (k > elf_bss)
- elf_bss = k;
-
- /*
- * Do the same thing for the memory mapping - between
- * elf_bss and last_bss is the bss section.
- */
- k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
- if (k > last_bss)
- last_bss = k;
- }
+ for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
+ if (eppnt->p_type == PT_LOAD) {
+ int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
+ int elf_prot = 0;
+ unsigned long vaddr = 0;
+ unsigned long k, map_addr;
+
+ if (eppnt->p_flags & PF_R)
+ elf_prot = PROT_READ;
+ if (eppnt->p_flags & PF_W)
+ elf_prot |= PROT_WRITE;
+ if (eppnt->p_flags & PF_X)
+ elf_prot |= PROT_EXEC;
+ vaddr = eppnt->p_vaddr;
+ if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
+ elf_type |= MAP_FIXED;
+
+ map_addr = elf_map(interpreter, load_addr + vaddr,
+ eppnt, elf_prot, elf_type);
+ error = map_addr;
+ if (BAD_ADDR(map_addr))
+ goto out_close;
+
+ if (!load_addr_set &&
+ interp_elf_ex->e_type == ET_DYN) {
+ load_addr = map_addr - ELF_PAGESTART(vaddr);
+ load_addr_set = 1;
+ }
+
+ /*
+ * Check to see if the section's size will overflow the
+ * allowed task size. Note that p_filesz must always be
+ * <= p_memsize so it's only necessary to check p_memsz.
+ */
+ k = load_addr + eppnt->p_vaddr;
+ if (k > TASK_SIZE ||
+ eppnt->p_filesz > eppnt->p_memsz ||
+ eppnt->p_memsz > TASK_SIZE ||
+ TASK_SIZE - eppnt->p_memsz < k) {
+ error = -ENOMEM;
+ goto out_close;
+ }
+
+ /*
+ * Find the end of the file mapping for this phdr, and
+ * keep track of the largest address we see for this.
+ */
+ k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
+ if (k > elf_bss)
+ elf_bss = k;
+
+ /*
+ * Do the same thing for the memory mapping - between
+ * elf_bss and last_bss is the bss section.
+ */
+ k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
+ if (k > last_bss)
+ last_bss = k;
+ }
}
/*
@@ -424,7 +431,8 @@ static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
goto out_close;
}
- elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); /* What we have mapped so far */
+ /* What we have mapped so far */
+ elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
/* Map the last of the bss segment */
if (last_bss > elf_bss) {
@@ -436,7 +444,7 @@ static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
}
*interp_load_addr = load_addr;
- error = ((unsigned long) interp_elf_ex->e_entry) + load_addr;
+ error = ((unsigned long)interp_elf_ex->e_entry) + load_addr;
out_close:
kfree(elf_phdata);
@@ -444,8 +452,8 @@ out:
return error;
}
-static unsigned long load_aout_interp(struct exec * interp_ex,
- struct file * interpreter)
+static unsigned long load_aout_interp(struct exec *interp_ex,
+ struct file *interpreter)
{
unsigned long text_data, elf_entry = ~0UL;
char __user * addr;
@@ -464,7 +472,7 @@ static unsigned long load_aout_interp(struct exec * interp_ex,
case ZMAGIC:
case QMAGIC:
offset = N_TXTOFF(*interp_ex);
- addr = (char __user *) N_TXTADDR(*interp_ex);
+ addr = (char __user *)N_TXTADDR(*interp_ex);
break;
default:
goto out;
@@ -480,7 +488,6 @@ static unsigned long load_aout_interp(struct exec * interp_ex,
flush_icache_range((unsigned long)addr,
(unsigned long)addr + text_data);
-
down_write(&current->mm->mmap_sem);
do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
interp_ex->a_bss);
@@ -519,7 +526,7 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
#endif
}
-static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
+static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
{
struct file *interpreter = NULL; /* to shut gcc up */
unsigned long load_addr = 0, load_bias = 0;
@@ -528,7 +535,7 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
unsigned int interpreter_type = INTERPRETER_NONE;
unsigned char ibcs2_interpreter = 0;
unsigned long error;
- struct elf_phdr * elf_ppnt, *elf_phdata;
+ struct elf_phdr *elf_ppnt, *elf_phdata;
unsigned long elf_bss, elf_brk;
int elf_exec_fileno;
int retval, i;
@@ -553,7 +560,7 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
}
/* Get the exec-header */
- loc->elf_ex = *((struct elfhdr *) bprm->buf);
+ loc->elf_ex = *((struct elfhdr *)bprm->buf);
retval = -ENOEXEC;
/* First of all, some simple consistency checks */
@@ -568,7 +575,6 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
goto out;
/* Now read in all of the header information */
-
if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr))
goto out;
if (loc->elf_ex.e_phnum < 1 ||
@@ -576,18 +582,19 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
goto out;
size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr);
retval = -ENOMEM;
- elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
+ elf_phdata = kmalloc(size, GFP_KERNEL);
if (!elf_phdata)
goto out;
- retval = kernel_read(bprm->file, loc->elf_ex.e_phoff, (char *) elf_phdata, size);
+ retval = kernel_read(bprm->file, loc->elf_ex.e_phoff,
+ (char *)elf_phdata, size);
if (retval != size) {
if (retval >= 0)
retval = -EIO;
goto out_free_ph;
}
- files = current->files; /* Refcounted so ok */
+ files = current->files; /* Refcounted so ok */
retval = unshare_files();
if (retval < 0)
goto out_free_ph;
@@ -598,7 +605,6 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
/* exec will make our files private anyway, but for the a.out
loader stuff we need to do it earlier */
-
retval = get_unused_fd();
if (retval < 0)
goto out_free_fh;
@@ -620,7 +626,6 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
* shared libraries - for now assume that this
* is an a.out format binary
*/
-
retval = -ENOEXEC;
if (elf_ppnt->p_filesz > PATH_MAX ||
elf_ppnt->p_filesz < 2)
@@ -628,13 +633,13 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
retval = -ENOMEM;
elf_interpreter = kmalloc(elf_ppnt->p_filesz,
- GFP_KERNEL);
+ GFP_KERNEL);
if (!elf_interpreter)
goto out_free_file;
retval = kernel_read(bprm->file, elf_ppnt->p_offset,
- elf_interpreter,
- elf_ppnt->p_filesz);
+ elf_interpreter,
+ elf_ppnt->p_filesz);
if (retval != elf_ppnt->p_filesz) {
if (retval >= 0)
retval = -EIO;
@@ -678,7 +683,8 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
retval = PTR_ERR(interpreter);
if (IS_ERR(interpreter))
goto out_free_interp;
- retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE);
+ retval = kernel_read(interpreter, 0, bprm->buf,
+ BINPRM_BUF_SIZE);
if (retval != BINPRM_BUF_SIZE) {
if (retval >= 0)
retval = -EIO;
@@ -686,8 +692,8 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
}
/* Get the exec headers */
- loc->interp_ex = *((struct exec *) bprm->buf);
- loc->interp_elf_ex = *((struct elfhdr *) bprm->buf);
+ loc->interp_ex = *((struct exec *)bprm->buf);
+ loc->interp_elf_ex = *((struct elfhdr *)bprm->buf);
break;
}
elf_ppnt++;
@@ -739,7 +745,6 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
/* OK, we are done with that, now set up the arg stuff,
and then start this sucker up */
-
if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) {
char *passed_p = passed_fileno;
sprintf(passed_fileno, "%d", elf_exec_fileno);
@@ -777,7 +782,7 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
if (elf_read_implies_exec(loc->elf_ex, executable_stack))
current->personality |= READ_IMPLIES_EXEC;
- if ( !(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+ if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
current->flags |= PF_RANDOMIZE;
arch_pick_mmap_layout(current->mm);
@@ -798,8 +803,8 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
the correct location in memory. At this point, we assume that
the image should be loaded at fixed address, not at a variable
address. */
-
- for(i = 0, elf_ppnt = elf_phdata; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
+ for(i = 0, elf_ppnt = elf_phdata;
+ i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
int elf_prot = 0, elf_flags;
unsigned long k, vaddr;
@@ -827,30 +832,35 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
load_bias, nbyte)) {
/*
* This bss-zeroing can fail if the ELF
- * file specifies odd protections. So
+ * file specifies odd protections. So
* we don't check the return value
*/
}
}
}
- if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
- if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
- if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
+ if (elf_ppnt->p_flags & PF_R)
+ elf_prot |= PROT_READ;
+ if (elf_ppnt->p_flags & PF_W)
+ elf_prot |= PROT_WRITE;
+ if (elf_ppnt->p_flags & PF_X)
+ elf_prot |= PROT_EXEC;
- elf_flags = MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE;
+ elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
vaddr = elf_ppnt->p_vaddr;
if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
elf_flags |= MAP_FIXED;
} else if (loc->elf_ex.e_type == ET_DYN) {
- /* Try and get dynamic programs out of the way of the default mmap
- base, as well as whatever program they might try to exec. This
- is because the brk will follow the loader, and is not movable. */
+ /* Try and get dynamic programs out of the way of the
+ * default mmap base, as well as whatever program they
+ * might try to exec. This is because the brk will
+ * follow the loader, and is not movable. */
load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
}
- error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags);
+ error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
+ elf_prot, elf_flags);
if (BAD_ADDR(error)) {
send_sig(SIGKILL, current, 0);
goto out_free_dentry;
@@ -867,8 +877,10 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
}
}
k = elf_ppnt->p_vaddr;
- if (k < start_code) start_code = k;
- if (start_data < k) start_data = k;
+ if (k < start_code)
+ start_code = k;
+ if (start_data < k)
+ start_data = k;
/*
* Check to see if the section's size will overflow the
@@ -878,7 +890,7 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
if (k > TASK_SIZE || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
elf_ppnt->p_memsz > TASK_SIZE ||
TASK_SIZE - elf_ppnt->p_memsz < k) {
- /* set_brk can never work. Avoid overflows. */
+ /* set_brk can never work. Avoid overflows. */
send_sig(SIGKILL, current, 0);
goto out_free_dentry;
}
@@ -966,8 +978,9 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
compute_creds(bprm);
current->flags &= ~PF_FORKNOEXEC;
- create_elf_tables(bprm, &loc->elf_ex, (interpreter_type == INTERPRETER_AOUT),
- load_addr, interp_load_addr);
+ create_elf_tables(bprm, &loc->elf_ex,
+ (interpreter_type == INTERPRETER_AOUT),
+ load_addr, interp_load_addr);
/* N.B. passed_fileno might not be initialized? */
if (interpreter_type == INTERPRETER_AOUT)
current->mm->arg_start += strlen(passed_fileno) + 1;
@@ -981,7 +994,7 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
/* Why this, you ask??? Well SVr4 maps page 0 as read-only,
and some applications "depend" upon this behavior.
Since we do not have the power to recompile these, we
- emulate the SVr4 behavior. Sigh. */
+ emulate the SVr4 behavior. Sigh. */
down_write(&current->mm->mmap_sem);
error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
MAP_FIXED | MAP_PRIVATE, 0);
@@ -1036,7 +1049,6 @@ out_free_ph:
/* This is really simpleminded and specialized - we are loading an
a.out library that is given an ELF header. */
-
static int load_elf_library(struct file *file)
{
struct elf_phdr *elf_phdata;
@@ -1046,7 +1058,7 @@ static int load_elf_library(struct file *file)
struct elfhdr elf_ex;
error = -ENOEXEC;
- retval = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex));
+ retval = kernel_read(file, 0, (char *)&elf_ex, sizeof(elf_ex));
if (retval != sizeof(elf_ex))
goto out;
@@ -1055,7 +1067,7 @@ static int load_elf_library(struct file *file)
/* First of all, some simple consistency checks */
if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
- !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
+ !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
goto out;
/* Now read in all of the header information */
@@ -1103,7 +1115,8 @@ static int load_elf_library(struct file *file)
goto out_free_ph;
}
- len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr + ELF_MIN_ALIGN - 1);
+ len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr +
+ ELF_MIN_ALIGN - 1);
bss = eppnt->p_memsz + eppnt->p_vaddr;
if (bss > len) {
down_write(&current->mm->mmap_sem);
@@ -1162,7 +1175,7 @@ static int maydump(struct vm_area_struct *vma)
if (vma->vm_flags & (VM_IO | VM_RESERVED))
return 0;
- /* Dump shared memory only if mapped from an anonymous file. */
+ /* Dump shared memory only if mapped from an anonymous file. */
if (vma->vm_flags & VM_SHARED)
return vma->vm_file->f_dentry->d_inode->i_nlink == 0;
@@ -1173,7 +1186,7 @@ static int maydump(struct vm_area_struct *vma)
return 1;
}
-#define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
+#define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
/* An ELF note in memory */
struct memelfnote
@@ -1276,11 +1289,11 @@ static void fill_note(struct memelfnote *note, const char *name, int type,
}
/*
- * fill up all the fields in prstatus from the given task struct, except registers
- * which need to be filled up separately.
+ * fill up all the fields in prstatus from the given task struct, except
+ * registers which need to be filled up separately.
*/
static void fill_prstatus(struct elf_prstatus *prstatus,
- struct task_struct *p, long signr)
+ struct task_struct *p, long signr)
{
prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
prstatus->pr_sigpend = p->pending.signal.sig[0];
@@ -1365,8 +1378,8 @@ struct elf_thread_status
/*
* In order to add the specific thread information for the elf file format,
- * we need to keep a linked list of every threads pr_status and then
- * create a single section for them in the final core file.
+ * we need to keep a linked list of every threads pr_status and then create
+ * a single section for them in the final core file.
*/
static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
{
@@ -1377,19 +1390,23 @@ static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
fill_prstatus(&t->prstatus, p, signr);
elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
- fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), &(t->prstatus));
+ fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
+ &(t->prstatus));
t->num_notes++;
sz += notesize(&t->notes[0]);
- if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL, &t->fpu))) {
- fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu), &(t->fpu));
+ if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL,
+ &t->fpu))) {
+ fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
+ &(t->fpu));
t->num_notes++;
sz += notesize(&t->notes[1]);
}
#ifdef ELF_CORE_COPY_XFPREGS
if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
- fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu), &t->xfpu);
+ fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu),
+ &t->xfpu);
t->num_notes++;
sz += notesize(&t->notes[2]);
}
@@ -1404,7 +1421,7 @@ static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
* and then they are actually written out. If we run out of core limit
* we just truncate.
*/
-static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
+static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file)
{
#define NUM_NOTES 6
int has_dumped = 0;
@@ -1433,12 +1450,12 @@ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
/*
* We no longer stop all VM operations.
*
- * This is because those proceses that could possibly change map_count or
- * the mmap / vma pages are now blocked in do_exit on current finishing
- * this core dump.
+ * This is because those proceses that could possibly change map_count
+ * or the mmap / vma pages are now blocked in do_exit on current
+ * finishing this core dump.
*
* Only ptrace can touch these memory addresses, but it doesn't change
- * the map_count or the pages allocated. So no possibility of crashing
+ * the map_count or the pages allocated. So no possibility of crashing
* exists while dumping the mm->vm_next areas to the core file.
*/
@@ -1500,7 +1517,7 @@ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
#endif
/* Set up header */
- fill_elf_header(elf, segs+1); /* including notes section */
+ fill_elf_header(elf, segs + 1); /* including notes section */
has_dumped = 1;
current->flags |= PF_DUMPCORE;
@@ -1510,24 +1527,24 @@ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
* with info from their /proc.
*/
- fill_note(notes +0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus);
-
+ fill_note(notes + 0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus);
fill_psinfo(psinfo, current->group_leader, current->mm);
- fill_note(notes +1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
+ fill_note(notes + 1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
numnote = 2;
- auxv = (elf_addr_t *) current->mm->saved_auxv;
+ auxv = (elf_addr_t *)current->mm->saved_auxv;
i = 0;
do
i += 2;
while (auxv[i - 2] != AT_NULL);
fill_note(&notes[numnote++], "CORE", NT_AUXV,
- i * sizeof (elf_addr_t), auxv);
+ i * sizeof(elf_addr_t), auxv);
/* Try to dump the FPU. */
- if ((prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs, fpu)))
+ if ((prstatus->pr_fpvalid =
+ elf_core_copy_task_fpregs(current, regs, fpu)))
fill_note(notes + numnote++,
"CORE", NT_PRFPREG, sizeof(*fpu), fpu);
#ifdef ELF_CORE_COPY_XFPREGS
@@ -1576,8 +1593,10 @@ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
phdr.p_memsz = sz;
offset += phdr.p_filesz;
phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
- if (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W;
- if (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X;
+ if (vma->vm_flags & VM_WRITE)
+ phdr.p_flags |= PF_W;
+ if (vma->vm_flags & VM_EXEC)
+ phdr.p_flags |= PF_X;
phdr.p_align = ELF_EXEC_PAGESIZE;
DUMP_WRITE(&phdr, sizeof(phdr));
@@ -1594,7 +1613,9 @@ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
/* write out the thread status notes section */
list_for_each(t, &thread_list) {
- struct elf_thread_status *tmp = list_entry(t, struct elf_thread_status, list);
+ struct elf_thread_status *tmp =
+ list_entry(t, struct elf_thread_status, list);
+
for (i = 0; i < tmp->num_notes; i++)
if (!writenote(&tmp->notes[i], file))
goto end_coredump;
@@ -1611,18 +1632,19 @@ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
for (addr = vma->vm_start;
addr < vma->vm_end;
addr += PAGE_SIZE) {
- struct page* page;
+ struct page *page;
struct vm_area_struct *vma;
if (get_user_pages(current, current->mm, addr, 1, 0, 1,
&page, &vma) <= 0) {
- DUMP_SEEK (file->f_pos + PAGE_SIZE);
+ DUMP_SEEK(file->f_pos + PAGE_SIZE);
} else {
if (page == ZERO_PAGE(addr)) {
- DUMP_SEEK (file->f_pos + PAGE_SIZE);
+ DUMP_SEEK(file->f_pos + PAGE_SIZE);
} else {
void *kaddr;
- flush_cache_page(vma, addr, page_to_pfn(page));
+ flush_cache_page(vma, addr,
+ page_to_pfn(page));
kaddr = kmap(page);
if ((size += PAGE_SIZE) > limit ||
!dump_write(file, kaddr,
@@ -1644,7 +1666,8 @@ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
if ((off_t)file->f_pos != offset) {
/* Sanity check */
- printk(KERN_WARNING "elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
+ printk(KERN_WARNING
+ "elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
(off_t)file->f_pos, offset);
}
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index a2e48c999c2..eba4e23b9ca 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -435,9 +435,10 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
struct elf_fdpic_params *interp_params)
{
unsigned long sp, csp, nitems;
- elf_caddr_t *argv, *envp;
+ elf_caddr_t __user *argv, *envp;
size_t platform_len = 0, len;
- char *k_platform, *u_platform, *p;
+ char *k_platform;
+ char __user *u_platform, *p;
long hwcap;
int loop;
@@ -462,12 +463,11 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
if (k_platform) {
platform_len = strlen(k_platform) + 1;
sp -= platform_len;
+ u_platform = (char __user *) sp;
if (__copy_to_user(u_platform, k_platform, platform_len) != 0)
return -EFAULT;
}
- u_platform = (char *) sp;
-
#if defined(__i386__) && defined(CONFIG_SMP)
/* in some cases (e.g. Hyper-Threading), we want to avoid L1 evictions
* by the processes running on the same package. One thing we can do
@@ -490,7 +490,7 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
sp = (sp - len) & ~7UL;
exec_params->map_addr = sp;
- if (copy_to_user((void *) sp, exec_params->loadmap, len) != 0)
+ if (copy_to_user((void __user *) sp, exec_params->loadmap, len) != 0)
return -EFAULT;
current->mm->context.exec_fdpic_loadmap = (unsigned long) sp;
@@ -501,7 +501,7 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
sp = (sp - len) & ~7UL;
interp_params->map_addr = sp;
- if (copy_to_user((void *) sp, interp_params->loadmap, len) != 0)
+ if (copy_to_user((void __user *) sp, interp_params->loadmap, len) != 0)
return -EFAULT;
current->mm->context.interp_fdpic_loadmap = (unsigned long) sp;
@@ -527,7 +527,7 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
/* put the ELF interpreter info on the stack */
#define NEW_AUX_ENT(nr, id, val) \
do { \
- struct { unsigned long _id, _val; } *ent = (void *) csp; \
+ struct { unsigned long _id, _val; } __user *ent = (void __user *) csp; \
__put_user((id), &ent[nr]._id); \
__put_user((val), &ent[nr]._val); \
} while (0)
@@ -564,13 +564,13 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
/* allocate room for argv[] and envv[] */
csp -= (bprm->envc + 1) * sizeof(elf_caddr_t);
- envp = (elf_caddr_t *) csp;
+ envp = (elf_caddr_t __user *) csp;
csp -= (bprm->argc + 1) * sizeof(elf_caddr_t);
- argv = (elf_caddr_t *) csp;
+ argv = (elf_caddr_t __user *) csp;
/* stack argc */
csp -= sizeof(unsigned long);
- __put_user(bprm->argc, (unsigned long *) csp);
+ __put_user(bprm->argc, (unsigned long __user *) csp);
BUG_ON(csp != sp);
@@ -581,7 +581,7 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
current->mm->arg_start = current->mm->start_stack - (MAX_ARG_PAGES * PAGE_SIZE - bprm->p);
#endif
- p = (char *) current->mm->arg_start;
+ p = (char __user *) current->mm->arg_start;
for (loop = bprm->argc; loop > 0; loop--) {
__put_user((elf_caddr_t) p, argv++);
len = strnlen_user(p, PAGE_SIZE * MAX_ARG_PAGES);
@@ -1025,7 +1025,7 @@ static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params,
/* clear the bit between beginning of mapping and beginning of PT_LOAD */
if (prot & PROT_WRITE && disp > 0) {
kdebug("clear[%d] ad=%lx sz=%lx", loop, maddr, disp);
- clear_user((void *) maddr, disp);
+ clear_user((void __user *) maddr, disp);
maddr += disp;
}
@@ -1059,7 +1059,7 @@ static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params,
if (prot & PROT_WRITE && excess1 > 0) {
kdebug("clear[%d] ad=%lx sz=%lx",
loop, maddr + phdr->p_filesz, excess1);
- clear_user((void *) maddr + phdr->p_filesz, excess1);
+ clear_user((void __user *) maddr + phdr->p_filesz, excess1);
}
#else
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 599f36fd0f6..07a4996cca3 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -739,10 +739,10 @@ static int bm_fill_super(struct super_block * sb, void * data, int silent)
return err;
}
-static struct super_block *bm_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int bm_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_single(fs_type, flags, data, bm_fill_super);
+ return get_sb_single(fs_type, flags, data, bm_fill_super, mnt);
}
static struct linux_binfmt misc_format = {
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 44aaba202f7..028d9fb9c2d 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -300,10 +300,10 @@ static struct super_operations bdev_sops = {
.clear_inode = bdev_clear_inode,
};
-static struct super_block *bd_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int bd_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_pseudo(fs_type, "bdev:", &bdev_sops, 0x62646576);
+ return get_sb_pseudo(fs_type, "bdev:", &bdev_sops, 0x62646576, mnt);
}
static struct file_system_type bd_type = {
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index c262d8874ce..7520f468715 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -166,8 +166,9 @@ cifs_put_super(struct super_block *sb)
}
static int
-cifs_statfs(struct super_block *sb, struct kstatfs *buf)
+cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
+ struct super_block *sb = dentry->d_sb;
int xid;
int rc = -EOPNOTSUPP;
struct cifs_sb_info *cifs_sb;
@@ -460,9 +461,9 @@ struct super_operations cifs_super_ops = {
.remount_fs = cifs_remount,
};
-static struct super_block *
+static int
cifs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
int rc;
struct super_block *sb = sget(fs_type, NULL, set_anon_super, NULL);
@@ -470,7 +471,7 @@ cifs_get_sb(struct file_system_type *fs_type,
cFYI(1, ("Devname: %s flags: %d ", dev_name, flags));
if (IS_ERR(sb))
- return sb;
+ return PTR_ERR(sb);
sb->s_flags = flags;
@@ -478,10 +479,10 @@ cifs_get_sb(struct file_system_type *fs_type,
if (rc) {
up_write(&sb->s_umount);
deactivate_super(sb);
- return ERR_PTR(rc);
+ return rc;
}
sb->s_flags |= MS_ACTIVE;
- return sb;
+ return simple_set_mnt(mnt, sb);
}
static ssize_t cifs_file_writev(struct file *file, const struct iovec *iov,
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index c98755dca86..d56c0577c71 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -74,7 +74,7 @@ extern ssize_t cifs_user_write(struct file *file, const char __user *write_data,
size_t write_size, loff_t * poffset);
extern int cifs_lock(struct file *, int, struct file_lock *);
extern int cifs_fsync(struct file *, struct dentry *, int);
-extern int cifs_flush(struct file *);
+extern int cifs_flush(struct file *, fl_owner_t id);
extern int cifs_file_mmap(struct file * , struct vm_area_struct *);
extern const struct file_operations cifs_dir_ops;
extern int cifs_dir_open(struct inode *inode, struct file *file);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index e2b4ce1dad6..b4a18c1cab0 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1079,9 +1079,9 @@ static int cifs_writepages(struct address_space *mapping,
unsigned int bytes_written;
struct cifs_sb_info *cifs_sb;
int done = 0;
- pgoff_t end = -1;
+ pgoff_t end;
pgoff_t index;
- int is_range = 0;
+ int range_whole = 0;
struct kvec iov[32];
int len;
int n_iov = 0;
@@ -1122,16 +1122,14 @@ static int cifs_writepages(struct address_space *mapping,
xid = GetXid();
pagevec_init(&pvec, 0);
- if (wbc->sync_mode == WB_SYNC_NONE)
+ if (wbc->range_cyclic) {
index = mapping->writeback_index; /* Start from prev offset */
- else {
- index = 0;
- scanned = 1;
- }
- if (wbc->start || wbc->end) {
- index = wbc->start >> PAGE_CACHE_SHIFT;
- end = wbc->end >> PAGE_CACHE_SHIFT;
- is_range = 1;
+ end = -1;
+ } else {
+ index = wbc->range_start >> PAGE_CACHE_SHIFT;
+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
+ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
+ range_whole = 1;
scanned = 1;
}
retry:
@@ -1167,7 +1165,7 @@ retry:
break;
}
- if (unlikely(is_range) && (page->index > end)) {
+ if (!wbc->range_cyclic && page->index > end) {
done = 1;
unlock_page(page);
break;
@@ -1271,7 +1269,7 @@ retry:
index = 0;
goto retry;
}
- if (!is_range)
+ if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
mapping->writeback_index = index;
FreeXid(xid);
@@ -1419,7 +1417,7 @@ int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
* As file closes, flush all cached write data for this inode checking
* for write behind errors.
*/
-int cifs_flush(struct file *file)
+int cifs_flush(struct file *file, fl_owner_t id)
{
struct inode * inode = file->f_dentry->d_inode;
int rc = 0;
diff --git a/fs/coda/file.c b/fs/coda/file.c
index 7c2642431fa..cc66c681bd1 100644
--- a/fs/coda/file.c
+++ b/fs/coda/file.c
@@ -164,7 +164,7 @@ int coda_open(struct inode *coda_inode, struct file *coda_file)
return 0;
}
-int coda_flush(struct file *coda_file)
+int coda_flush(struct file *coda_file, fl_owner_t id)
{
unsigned short flags = coda_file->f_flags & ~O_EXCL;
unsigned short coda_flags = coda_flags_to_cflags(flags);
diff --git a/fs/coda/inode.c b/fs/coda/inode.c
index ada1a81df6b..87f1dc8aa24 100644
--- a/fs/coda/inode.c
+++ b/fs/coda/inode.c
@@ -36,7 +36,7 @@
/* VFS super_block ops */
static void coda_clear_inode(struct inode *);
static void coda_put_super(struct super_block *);
-static int coda_statfs(struct super_block *sb, struct kstatfs *buf);
+static int coda_statfs(struct dentry *dentry, struct kstatfs *buf);
static kmem_cache_t * coda_inode_cachep;
@@ -278,13 +278,13 @@ struct inode_operations coda_file_inode_operations = {
.setattr = coda_setattr,
};
-static int coda_statfs(struct super_block *sb, struct kstatfs *buf)
+static int coda_statfs(struct dentry *dentry, struct kstatfs *buf)
{
int error;
lock_kernel();
- error = venus_statfs(sb, buf);
+ error = venus_statfs(dentry, buf);
unlock_kernel();
@@ -307,10 +307,10 @@ static int coda_statfs(struct super_block *sb, struct kstatfs *buf)
/* init_coda: used by filesystems.c to register coda */
-static struct super_block *coda_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int coda_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_nodev(fs_type, flags, data, coda_fill_super);
+ return get_sb_nodev(fs_type, flags, data, coda_fill_super, mnt);
}
struct file_system_type coda_fs_type = {
diff --git a/fs/coda/upcall.c b/fs/coda/upcall.c
index 1bae99650a9..b040eba13a7 100644
--- a/fs/coda/upcall.c
+++ b/fs/coda/upcall.c
@@ -611,7 +611,7 @@ int venus_pioctl(struct super_block *sb, struct CodaFid *fid,
return error;
}
-int venus_statfs(struct super_block *sb, struct kstatfs *sfs)
+int venus_statfs(struct dentry *dentry, struct kstatfs *sfs)
{
union inputArgs *inp;
union outputArgs *outp;
@@ -620,7 +620,7 @@ int venus_statfs(struct super_block *sb, struct kstatfs *sfs)
insize = max_t(unsigned int, INSIZE(statfs), OUTSIZE(statfs));
UPARG(CODA_STATFS);
- error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+ error = coda_upcall(coda_sbp(dentry->d_sb), insize, &outsize, inp);
if (!error) {
sfs->f_blocks = outp->coda_statfs.stat.f_blocks;
diff --git a/fs/compat.c b/fs/compat.c
index b1f64786a61..7e7e5bc4f3c 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -197,7 +197,7 @@ asmlinkage long compat_sys_statfs(const char __user *path, struct compat_statfs
error = user_path_walk(path, &nd);
if (!error) {
struct kstatfs tmp;
- error = vfs_statfs(nd.dentry->d_inode->i_sb, &tmp);
+ error = vfs_statfs(nd.dentry, &tmp);
if (!error)
error = put_compat_statfs(buf, &tmp);
path_release(&nd);
@@ -215,7 +215,7 @@ asmlinkage long compat_sys_fstatfs(unsigned int fd, struct compat_statfs __user
file = fget(fd);
if (!file)
goto out;
- error = vfs_statfs(file->f_dentry->d_inode->i_sb, &tmp);
+ error = vfs_statfs(file->f_dentry, &tmp);
if (!error)
error = put_compat_statfs(buf, &tmp);
fput(file);
@@ -265,7 +265,7 @@ asmlinkage long compat_sys_statfs64(const char __user *path, compat_size_t sz, s
error = user_path_walk(path, &nd);
if (!error) {
struct kstatfs tmp;
- error = vfs_statfs(nd.dentry->d_inode->i_sb, &tmp);
+ error = vfs_statfs(nd.dentry, &tmp);
if (!error)
error = put_compat_statfs64(buf, &tmp);
path_release(&nd);
@@ -286,7 +286,7 @@ asmlinkage long compat_sys_fstatfs64(unsigned int fd, compat_size_t sz, struct c
file = fget(fd);
if (!file)
goto out;
- error = vfs_statfs(file->f_dentry->d_inode->i_sb, &tmp);
+ error = vfs_statfs(file->f_dentry, &tmp);
if (!error)
error = put_compat_statfs64(buf, &tmp);
fput(file);
diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c
index f920d30478e..94dab7bdd85 100644
--- a/fs/configfs/mount.c
+++ b/fs/configfs/mount.c
@@ -103,10 +103,10 @@ static int configfs_fill_super(struct super_block *sb, void *data, int silent)
return 0;
}
-static struct super_block *configfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int configfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_single(fs_type, flags, data, configfs_fill_super);
+ return get_sb_single(fs_type, flags, data, configfs_fill_super, mnt);
}
static struct file_system_type configfs_fs_type = {
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index 9efcc3a164e..c45d7386080 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -181,9 +181,7 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i
struct page *page = NULL;
if (blocknr + i < devsize) {
- page = read_cache_page(mapping, blocknr + i,
- (filler_t *)mapping->a_ops->readpage,
- NULL);
+ page = read_mapping_page(mapping, blocknr + i, NULL);
/* synchronous error? */
if (IS_ERR(page))
page = NULL;
@@ -322,8 +320,10 @@ out:
return -EINVAL;
}
-static int cramfs_statfs(struct super_block *sb, struct kstatfs *buf)
+static int cramfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
+ struct super_block *sb = dentry->d_sb;
+
buf->f_type = CRAMFS_MAGIC;
buf->f_bsize = PAGE_CACHE_SIZE;
buf->f_blocks = CRAMFS_SB(sb)->blocks;
@@ -528,10 +528,11 @@ static struct super_operations cramfs_ops = {
.statfs = cramfs_statfs,
};
-static struct super_block *cramfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int cramfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, cramfs_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, cramfs_fill_super,
+ mnt);
}
static struct file_system_type cramfs_fs_type = {
diff --git a/fs/dcache.c b/fs/dcache.c
index 59dbc92c207..313b54b2b8f 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -687,46 +687,6 @@ void shrink_dcache_parent(struct dentry * parent)
prune_dcache(found, parent->d_sb);
}
-/**
- * shrink_dcache_anon - further prune the cache
- * @head: head of d_hash list of dentries to prune
- *
- * Prune the dentries that are anonymous
- *
- * parsing d_hash list does not hlist_for_each_entry_rcu() as it
- * done under dcache_lock.
- *
- */
-void shrink_dcache_anon(struct super_block *sb)
-{
- struct hlist_node *lp;
- struct hlist_head *head = &sb->s_anon;
- int found;
- do {
- found = 0;
- spin_lock(&dcache_lock);
- hlist_for_each(lp, head) {
- struct dentry *this = hlist_entry(lp, struct dentry, d_hash);
- if (!list_empty(&this->d_lru)) {
- dentry_stat.nr_unused--;
- list_del_init(&this->d_lru);
- }
-
- /*
- * move only zero ref count dentries to the end
- * of the unused list for prune_dcache
- */
- if (!atomic_read(&this->d_count)) {
- list_add_tail(&this->d_lru, &dentry_unused);
- dentry_stat.nr_unused++;
- found++;
- }
- }
- spin_unlock(&dcache_lock);
- prune_dcache(found, sb);
- } while(found);
-}
-
/*
* Scan `nr' dentries and return the number which remain.
*
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index b55b4ea9a67..440128ebef3 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -111,11 +111,11 @@ static int debug_fill_super(struct super_block *sb, void *data, int silent)
return simple_fill_super(sb, DEBUGFS_MAGIC, debug_files);
}
-static struct super_block *debug_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data)
+static int debug_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name,
+ void *data, struct vfsmount *mnt)
{
- return get_sb_single(fs_type, flags, data, debug_fill_super);
+ return get_sb_single(fs_type, flags, data, debug_fill_super, mnt);
}
static struct file_system_type debug_fs_type = {
diff --git a/fs/devfs/base.c b/fs/devfs/base.c
index 52f5059c4f3..51a97f13274 100644
--- a/fs/devfs/base.c
+++ b/fs/devfs/base.c
@@ -2549,11 +2549,11 @@ static int devfs_fill_super(struct super_block *sb, void *data, int silent)
return -EINVAL;
} /* End Function devfs_fill_super */
-static struct super_block *devfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data)
+static int devfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name,
+ void *data, struct vfsmount *mnt)
{
- return get_sb_single(fs_type, flags, data, devfs_fill_super);
+ return get_sb_single(fs_type, flags, data, devfs_fill_super, mnt);
}
static struct file_system_type devfs_fs_type = {
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index 14c5620b5ca..f7aef5bb584 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -130,10 +130,10 @@ fail:
return -ENOMEM;
}
-static struct super_block *devpts_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int devpts_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_single(fs_type, flags, data, devpts_fill_super);
+ return get_sb_single(fs_type, flags, data, devpts_fill_super, mnt);
}
static struct file_system_type devpts_fs_type = {
diff --git a/fs/efs/super.c b/fs/efs/super.c
index dff623e3ddb..8ac2462ae5d 100644
--- a/fs/efs/super.c
+++ b/fs/efs/super.c
@@ -15,13 +15,13 @@
#include <linux/buffer_head.h>
#include <linux/vfs.h>
-static int efs_statfs(struct super_block *s, struct kstatfs *buf);
+static int efs_statfs(struct dentry *dentry, struct kstatfs *buf);
static int efs_fill_super(struct super_block *s, void *d, int silent);
-static struct super_block *efs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int efs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, efs_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, efs_fill_super, mnt);
}
static struct file_system_type efs_fs_type = {
@@ -322,8 +322,8 @@ out_no_fs:
return -EINVAL;
}
-static int efs_statfs(struct super_block *s, struct kstatfs *buf) {
- struct efs_sb_info *sb = SUPER_INFO(s);
+static int efs_statfs(struct dentry *dentry, struct kstatfs *buf) {
+ struct efs_sb_info *sb = SUPER_INFO(dentry->d_sb);
buf->f_type = EFS_SUPER_MAGIC; /* efs magic number */
buf->f_bsize = EFS_BLOCKSIZE; /* blocksize */
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 2695337d4d6..08e7e6a555c 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -268,9 +268,9 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
int maxevents, long timeout);
static int eventpollfs_delete_dentry(struct dentry *dentry);
static struct inode *ep_eventpoll_inode(void);
-static struct super_block *eventpollfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data);
+static int eventpollfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name,
+ void *data, struct vfsmount *mnt);
/*
* This semaphore is used to serialize ep_free() and eventpoll_release_file().
@@ -1595,11 +1595,12 @@ eexit_1:
}
-static struct super_block *
+static int
eventpollfs_get_sb(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
+ const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_pseudo(fs_type, "eventpoll:", NULL, EVENTPOLLFS_MAGIC);
+ return get_sb_pseudo(fs_type, "eventpoll:", NULL, EVENTPOLLFS_MAGIC,
+ mnt);
}
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index d672aa9f406..3c1c9aaaca6 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -159,8 +159,7 @@ fail:
static struct page * ext2_get_page(struct inode *dir, unsigned long n)
{
struct address_space *mapping = dir->i_mapping;
- struct page *page = read_cache_page(mapping, n,
- (filler_t*)mapping->a_ops->readpage, NULL);
+ struct page *page = read_mapping_page(mapping, n, NULL);
if (!IS_ERR(page)) {
wait_on_page_locked(page);
kmap(page);
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 7e30bae174e..ee4ba759581 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -39,7 +39,7 @@
static void ext2_sync_super(struct super_block *sb,
struct ext2_super_block *es);
static int ext2_remount (struct super_block * sb, int * flags, char * data);
-static int ext2_statfs (struct super_block * sb, struct kstatfs * buf);
+static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf);
void ext2_error (struct super_block * sb, const char * function,
const char * fmt, ...)
@@ -834,9 +834,6 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
printk ("EXT2-fs: not enough memory\n");
goto failed_mount;
}
- percpu_counter_init(&sbi->s_freeblocks_counter);
- percpu_counter_init(&sbi->s_freeinodes_counter);
- percpu_counter_init(&sbi->s_dirs_counter);
bgl_lock_init(&sbi->s_blockgroup_lock);
sbi->s_debts = kmalloc(sbi->s_groups_count * sizeof(*sbi->s_debts),
GFP_KERNEL);
@@ -863,6 +860,13 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_gdb_count = db_count;
get_random_bytes(&sbi->s_next_generation, sizeof(u32));
spin_lock_init(&sbi->s_next_gen_lock);
+
+ percpu_counter_init(&sbi->s_freeblocks_counter,
+ ext2_count_free_blocks(sb));
+ percpu_counter_init(&sbi->s_freeinodes_counter,
+ ext2_count_free_inodes(sb));
+ percpu_counter_init(&sbi->s_dirs_counter,
+ ext2_count_dirs(sb));
/*
* set up enough so that it can read an inode
*/
@@ -874,24 +878,18 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
if (!sb->s_root) {
iput(root);
printk(KERN_ERR "EXT2-fs: get root inode failed\n");
- goto failed_mount2;
+ goto failed_mount3;
}
if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
dput(sb->s_root);
sb->s_root = NULL;
printk(KERN_ERR "EXT2-fs: corrupt root inode, run e2fsck\n");
- goto failed_mount2;
+ goto failed_mount3;
}
if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL))
ext2_warning(sb, __FUNCTION__,
"mounting ext3 filesystem as ext2");
ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY);
- percpu_counter_mod(&sbi->s_freeblocks_counter,
- ext2_count_free_blocks(sb));
- percpu_counter_mod(&sbi->s_freeinodes_counter,
- ext2_count_free_inodes(sb));
- percpu_counter_mod(&sbi->s_dirs_counter,
- ext2_count_dirs(sb));
return 0;
cantfind_ext2:
@@ -899,7 +897,10 @@ cantfind_ext2:
printk("VFS: Can't find an ext2 filesystem on dev %s.\n",
sb->s_id);
goto failed_mount;
-
+failed_mount3:
+ percpu_counter_destroy(&sbi->s_freeblocks_counter);
+ percpu_counter_destroy(&sbi->s_freeinodes_counter);
+ percpu_counter_destroy(&sbi->s_dirs_counter);
failed_mount2:
for (i = 0; i < db_count; i++)
brelse(sbi->s_group_desc[i]);
@@ -1038,8 +1039,9 @@ restore_opts:
return err;
}
-static int ext2_statfs (struct super_block * sb, struct kstatfs * buf)
+static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf)
{
+ struct super_block *sb = dentry->d_sb;
struct ext2_sb_info *sbi = EXT2_SB(sb);
unsigned long overhead;
int i;
@@ -1087,10 +1089,10 @@ static int ext2_statfs (struct super_block * sb, struct kstatfs * buf)
return 0;
}
-static struct super_block *ext2_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int ext2_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, ext2_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, ext2_fill_super, mnt);
}
#ifdef CONFIG_QUOTA
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index f8a5266ea1f..a60cc6ec130 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -58,7 +58,7 @@ static int ext3_sync_fs(struct super_block *sb, int wait);
static const char *ext3_decode_error(struct super_block * sb, int errno,
char nbuf[16]);
static int ext3_remount (struct super_block * sb, int * flags, char * data);
-static int ext3_statfs (struct super_block * sb, struct kstatfs * buf);
+static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf);
static void ext3_unlockfs(struct super_block *sb);
static void ext3_write_super (struct super_block * sb);
static void ext3_write_super_lockfs(struct super_block *sb);
@@ -499,20 +499,21 @@ static void ext3_clear_inode(struct inode *inode)
{
struct ext3_block_alloc_info *rsv = EXT3_I(inode)->i_block_alloc_info;
#ifdef CONFIG_EXT3_FS_POSIX_ACL
- if (EXT3_I(inode)->i_acl &&
- EXT3_I(inode)->i_acl != EXT3_ACL_NOT_CACHED) {
- posix_acl_release(EXT3_I(inode)->i_acl);
- EXT3_I(inode)->i_acl = EXT3_ACL_NOT_CACHED;
- }
- if (EXT3_I(inode)->i_default_acl &&
- EXT3_I(inode)->i_default_acl != EXT3_ACL_NOT_CACHED) {
- posix_acl_release(EXT3_I(inode)->i_default_acl);
- EXT3_I(inode)->i_default_acl = EXT3_ACL_NOT_CACHED;
- }
+ if (EXT3_I(inode)->i_acl &&
+ EXT3_I(inode)->i_acl != EXT3_ACL_NOT_CACHED) {
+ posix_acl_release(EXT3_I(inode)->i_acl);
+ EXT3_I(inode)->i_acl = EXT3_ACL_NOT_CACHED;
+ }
+ if (EXT3_I(inode)->i_default_acl &&
+ EXT3_I(inode)->i_default_acl != EXT3_ACL_NOT_CACHED) {
+ posix_acl_release(EXT3_I(inode)->i_default_acl);
+ EXT3_I(inode)->i_default_acl = EXT3_ACL_NOT_CACHED;
+ }
#endif
ext3_discard_reservation(inode);
EXT3_I(inode)->i_block_alloc_info = NULL;
- kfree(rsv);
+ if (unlikely(rsv))
+ kfree(rsv);
}
static inline void ext3_show_quota_options(struct seq_file *seq, struct super_block *sb)
@@ -1579,9 +1580,6 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
goto failed_mount;
}
- percpu_counter_init(&sbi->s_freeblocks_counter);
- percpu_counter_init(&sbi->s_freeinodes_counter);
- percpu_counter_init(&sbi->s_dirs_counter);
bgl_lock_init(&sbi->s_blockgroup_lock);
for (i = 0; i < db_count; i++) {
@@ -1601,6 +1599,14 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
sbi->s_gdb_count = db_count;
get_random_bytes(&sbi->s_next_generation, sizeof(u32));
spin_lock_init(&sbi->s_next_gen_lock);
+
+ percpu_counter_init(&sbi->s_freeblocks_counter,
+ ext3_count_free_blocks(sb));
+ percpu_counter_init(&sbi->s_freeinodes_counter,
+ ext3_count_free_inodes(sb));
+ percpu_counter_init(&sbi->s_dirs_counter,
+ ext3_count_dirs(sb));
+
/* per fileystem reservation list head & lock */
spin_lock_init(&sbi->s_rsv_window_lock);
sbi->s_rsv_window_root = RB_ROOT;
@@ -1639,16 +1645,16 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
if (!test_opt(sb, NOLOAD) &&
EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL)) {
if (ext3_load_journal(sb, es, journal_devnum))
- goto failed_mount2;
+ goto failed_mount3;
} else if (journal_inum) {
if (ext3_create_journal(sb, es, journal_inum))
- goto failed_mount2;
+ goto failed_mount3;
} else {
if (!silent)
printk (KERN_ERR
"ext3: No journal on filesystem on %s\n",
sb->s_id);
- goto failed_mount2;
+ goto failed_mount3;
}
/* We have now updated the journal if required, so we can
@@ -1671,7 +1677,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
(sbi->s_journal, 0, 0, JFS_FEATURE_INCOMPAT_REVOKE)) {
printk(KERN_ERR "EXT3-fs: Journal does not support "
"requested data journaling mode\n");
- goto failed_mount3;
+ goto failed_mount4;
}
default:
break;
@@ -1694,13 +1700,13 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
if (!sb->s_root) {
printk(KERN_ERR "EXT3-fs: get root inode failed\n");
iput(root);
- goto failed_mount3;
+ goto failed_mount4;
}
if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
dput(sb->s_root);
sb->s_root = NULL;
printk(KERN_ERR "EXT3-fs: corrupt root inode, run e2fsck\n");
- goto failed_mount3;
+ goto failed_mount4;
}
ext3_setup_super (sb, es, sb->s_flags & MS_RDONLY);
@@ -1723,13 +1729,6 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA ? "ordered":
"writeback");
- percpu_counter_mod(&sbi->s_freeblocks_counter,
- ext3_count_free_blocks(sb));
- percpu_counter_mod(&sbi->s_freeinodes_counter,
- ext3_count_free_inodes(sb));
- percpu_counter_mod(&sbi->s_dirs_counter,
- ext3_count_dirs(sb));
-
lock_kernel();
return 0;
@@ -1739,8 +1738,12 @@ cantfind_ext3:
sb->s_id);
goto failed_mount;
-failed_mount3:
+failed_mount4:
journal_destroy(sbi->s_journal);
+failed_mount3:
+ percpu_counter_destroy(&sbi->s_freeblocks_counter);
+ percpu_counter_destroy(&sbi->s_freeinodes_counter);
+ percpu_counter_destroy(&sbi->s_dirs_counter);
failed_mount2:
for (i = 0; i < db_count; i++)
brelse(sbi->s_group_desc[i]);
@@ -2318,8 +2321,9 @@ restore_opts:
return err;
}
-static int ext3_statfs (struct super_block * sb, struct kstatfs * buf)
+static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf)
{
+ struct super_block *sb = dentry->d_sb;
struct ext3_sb_info *sbi = EXT3_SB(sb);
struct ext3_super_block *es = sbi->s_es;
unsigned long overhead;
@@ -2646,10 +2650,10 @@ out:
#endif
-static struct super_block *ext3_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int ext3_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, ext3_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, ext3_fill_super, mnt);
}
static struct file_system_type ext3_fs_type = {
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index c1ce284f8a9..7c35d582ec1 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -539,18 +539,18 @@ static int fat_remount(struct super_block *sb, int *flags, char *data)
return 0;
}
-static int fat_statfs(struct super_block *sb, struct kstatfs *buf)
+static int fat_statfs(struct dentry *dentry, struct kstatfs *buf)
{
- struct msdos_sb_info *sbi = MSDOS_SB(sb);
+ struct msdos_sb_info *sbi = MSDOS_SB(dentry->d_sb);
/* If the count of free cluster is still unknown, counts it here. */
if (sbi->free_clusters == -1) {
- int err = fat_count_free_clusters(sb);
+ int err = fat_count_free_clusters(dentry->d_sb);
if (err)
return err;
}
- buf->f_type = sb->s_magic;
+ buf->f_type = dentry->d_sb->s_magic;
buf->f_bsize = sbi->cluster_size;
buf->f_blocks = sbi->max_cluster - FAT_START_ENT;
buf->f_bfree = sbi->free_clusters;
diff --git a/fs/fat/misc.c b/fs/fat/misc.c
index 944652e9dde..308f2b6b502 100644
--- a/fs/fat/misc.c
+++ b/fs/fat/misc.c
@@ -210,4 +210,3 @@ int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs)
return err;
}
-EXPORT_SYMBOL_GPL(fat_sync_bhs);
diff --git a/fs/file_table.c b/fs/file_table.c
index bcea1998b4d..506d5307108 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -300,5 +300,5 @@ void __init files_init(unsigned long mempages)
if (files_stat.max_files < NR_FILE)
files_stat.max_files = NR_FILE;
files_defer_init();
- percpu_counter_init(&nr_files);
+ percpu_counter_init(&nr_files, 0);
}
diff --git a/fs/freevxfs/vxfs_subr.c b/fs/freevxfs/vxfs_subr.c
index 50aae77651b..c1be118fc06 100644
--- a/fs/freevxfs/vxfs_subr.c
+++ b/fs/freevxfs/vxfs_subr.c
@@ -71,8 +71,7 @@ vxfs_get_page(struct address_space *mapping, u_long n)
{
struct page * pp;
- pp = read_cache_page(mapping, n,
- (filler_t*)mapping->a_ops->readpage, NULL);
+ pp = read_mapping_page(mapping, n, NULL);
if (!IS_ERR(pp)) {
wait_on_page_locked(pp);
diff --git a/fs/freevxfs/vxfs_super.c b/fs/freevxfs/vxfs_super.c
index b44c916d24a..b74b791fc23 100644
--- a/fs/freevxfs/vxfs_super.c
+++ b/fs/freevxfs/vxfs_super.c
@@ -40,6 +40,7 @@
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/vfs.h>
+#include <linux/mount.h>
#include "vxfs.h"
#include "vxfs_extern.h"
@@ -55,7 +56,7 @@ MODULE_ALIAS("vxfs"); /* makes mount -t vxfs autoload the module */
static void vxfs_put_super(struct super_block *);
-static int vxfs_statfs(struct super_block *, struct kstatfs *);
+static int vxfs_statfs(struct dentry *, struct kstatfs *);
static int vxfs_remount(struct super_block *, int *, char *);
static struct super_operations vxfs_super_ops = {
@@ -90,12 +91,12 @@ vxfs_put_super(struct super_block *sbp)
/**
* vxfs_statfs - get filesystem information
- * @sbp: VFS superblock
+ * @dentry: VFS dentry to locate superblock
* @bufp: output buffer
*
* Description:
* vxfs_statfs fills the statfs buffer @bufp with information
- * about the filesystem described by @sbp.
+ * about the filesystem described by @dentry.
*
* Returns:
* Zero.
@@ -107,12 +108,12 @@ vxfs_put_super(struct super_block *sbp)
* This is everything but complete...
*/
static int
-vxfs_statfs(struct super_block *sbp, struct kstatfs *bufp)
+vxfs_statfs(struct dentry *dentry, struct kstatfs *bufp)
{
- struct vxfs_sb_info *infp = VXFS_SBI(sbp);
+ struct vxfs_sb_info *infp = VXFS_SBI(dentry->d_sb);
bufp->f_type = VXFS_SUPER_MAGIC;
- bufp->f_bsize = sbp->s_blocksize;
+ bufp->f_bsize = dentry->d_sb->s_blocksize;
bufp->f_blocks = infp->vsi_raw->vs_dsize;
bufp->f_bfree = infp->vsi_raw->vs_free;
bufp->f_bavail = 0;
@@ -241,10 +242,11 @@ out:
/*
* The usual module blurb.
*/
-static struct super_block *vxfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int vxfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, vxfs_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, vxfs_fill_super,
+ mnt);
}
static struct file_system_type vxfs_fs_type = {
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index f3fbe2d030f..6db95cf3aaa 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -461,6 +461,8 @@ void sync_inodes_sb(struct super_block *sb, int wait)
{
struct writeback_control wbc = {
.sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_HOLD,
+ .range_start = 0,
+ .range_end = LLONG_MAX,
};
unsigned long nr_dirty = read_page_state(nr_dirty);
unsigned long nr_unstable = read_page_state(nr_unstable);
@@ -559,6 +561,8 @@ int write_inode_now(struct inode *inode, int sync)
struct writeback_control wbc = {
.nr_to_write = LONG_MAX,
.sync_mode = WB_SYNC_ALL,
+ .range_start = 0,
+ .range_end = LLONG_MAX,
};
if (!mapping_cap_writeback_dirty(inode->i_mapping))
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index fc342cf7c2c..087f3b734f4 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -169,7 +169,7 @@ static int fuse_release(struct inode *inode, struct file *file)
return fuse_release_common(inode, file, 0);
}
-static int fuse_flush(struct file *file)
+static int fuse_flush(struct file *file, fl_owner_t id)
{
struct inode *inode = file->f_dentry->d_inode;
struct fuse_conn *fc = get_fuse_conn(inode);
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 7627022446b..a13c0f52905 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -236,8 +236,9 @@ static void convert_fuse_statfs(struct kstatfs *stbuf, struct fuse_kstatfs *attr
/* fsid is left zero */
}
-static int fuse_statfs(struct super_block *sb, struct kstatfs *buf)
+static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf)
{
+ struct super_block *sb = dentry->d_sb;
struct fuse_conn *fc = get_fuse_conn_super(sb);
struct fuse_req *req;
struct fuse_statfs_out outarg;
@@ -569,11 +570,11 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
return err;
}
-static struct super_block *fuse_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *raw_data)
+static int fuse_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name,
+ void *raw_data, struct vfsmount *mnt)
{
- return get_sb_nodev(fs_type, flags, raw_data, fuse_fill_super);
+ return get_sb_nodev(fs_type, flags, raw_data, fuse_fill_super, mnt);
}
static struct file_system_type fuse_fs_type = {
diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
index 1e44dcfe49c..13231dd5ce6 100644
--- a/fs/hfs/bnode.c
+++ b/fs/hfs/bnode.c
@@ -280,7 +280,7 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
block = off >> PAGE_CACHE_SHIFT;
node->page_offset = off & ~PAGE_CACHE_MASK;
for (i = 0; i < tree->pages_per_bnode; i++) {
- page = read_cache_page(mapping, block++, (filler_t *)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, block++, NULL);
if (IS_ERR(page))
goto fail;
if (PageError(page)) {
diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
index d20131ce4b9..40035799431 100644
--- a/fs/hfs/btree.c
+++ b/fs/hfs/btree.c
@@ -59,7 +59,7 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
unlock_new_inode(tree->inode);
mapping = tree->inode->i_mapping;
- page = read_cache_page(mapping, 0, (filler_t *)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, 0, NULL);
if (IS_ERR(page))
goto free_tree;
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index 1181d116117..d9227bf14e8 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -80,8 +80,10 @@ static void hfs_put_super(struct super_block *sb)
*
* changed f_files/f_ffree to reflect the fs_ablock/free_ablocks.
*/
-static int hfs_statfs(struct super_block *sb, struct kstatfs *buf)
+static int hfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
+ struct super_block *sb = dentry->d_sb;
+
buf->f_type = HFS_SUPER_MAGIC;
buf->f_bsize = sb->s_blocksize;
buf->f_blocks = (u32)HFS_SB(sb)->fs_ablocks * HFS_SB(sb)->fs_div;
@@ -413,10 +415,11 @@ bail:
return res;
}
-static struct super_block *hfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int hfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data,
+ struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, hfs_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, hfs_fill_super, mnt);
}
static struct file_system_type hfs_fs_type = {
diff --git a/fs/hfsplus/bitmap.c b/fs/hfsplus/bitmap.c
index 9fb51632303..d128a25b74d 100644
--- a/fs/hfsplus/bitmap.c
+++ b/fs/hfsplus/bitmap.c
@@ -31,8 +31,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size, u32 offset, u32 *ma
dprint(DBG_BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len);
mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
mapping = HFSPLUS_SB(sb).alloc_file->i_mapping;
- page = read_cache_page(mapping, offset / PAGE_CACHE_BITS,
- (filler_t *)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL);
pptr = kmap(page);
curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
i = offset % 32;
@@ -72,8 +71,8 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size, u32 offset, u32 *ma
offset += PAGE_CACHE_BITS;
if (offset >= size)
break;
- page = read_cache_page(mapping, offset / PAGE_CACHE_BITS,
- (filler_t *)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
+ NULL);
curr = pptr = kmap(page);
if ((size ^ offset) / PAGE_CACHE_BITS)
end = pptr + PAGE_CACHE_BITS / 32;
@@ -119,8 +118,8 @@ found:
set_page_dirty(page);
kunmap(page);
offset += PAGE_CACHE_BITS;
- page = read_cache_page(mapping, offset / PAGE_CACHE_BITS,
- (filler_t *)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
+ NULL);
pptr = kmap(page);
curr = pptr;
end = pptr + PAGE_CACHE_BITS / 32;
@@ -167,7 +166,7 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
mapping = HFSPLUS_SB(sb).alloc_file->i_mapping;
pnr = offset / PAGE_CACHE_BITS;
- page = read_cache_page(mapping, pnr, (filler_t *)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, pnr, NULL);
pptr = kmap(page);
curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
end = pptr + PAGE_CACHE_BITS / 32;
@@ -199,7 +198,7 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
break;
set_page_dirty(page);
kunmap(page);
- page = read_cache_page(mapping, ++pnr, (filler_t *)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, ++pnr, NULL);
pptr = kmap(page);
curr = pptr;
end = pptr + PAGE_CACHE_BITS / 32;
diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c
index 746abc9ecf7..77bf434da67 100644
--- a/fs/hfsplus/bnode.c
+++ b/fs/hfsplus/bnode.c
@@ -440,7 +440,7 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
block = off >> PAGE_CACHE_SHIFT;
node->page_offset = off & ~PAGE_CACHE_MASK;
for (i = 0; i < tree->pages_per_bnode; block++, i++) {
- page = read_cache_page(mapping, block, (filler_t *)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, block, NULL);
if (IS_ERR(page))
goto fail;
if (PageError(page)) {
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
index effa8991999..cfc852fdd1b 100644
--- a/fs/hfsplus/btree.c
+++ b/fs/hfsplus/btree.c
@@ -38,7 +38,7 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
goto free_tree;
mapping = tree->inode->i_mapping;
- page = read_cache_page(mapping, 0, (filler_t *)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, 0, NULL);
if (IS_ERR(page))
goto free_tree;
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 7843f792a4b..0a92fa2336a 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -212,8 +212,10 @@ static void hfsplus_put_super(struct super_block *sb)
sb->s_fs_info = NULL;
}
-static int hfsplus_statfs(struct super_block *sb, struct kstatfs *buf)
+static int hfsplus_statfs(struct dentry *dentry, struct kstatfs *buf)
{
+ struct super_block *sb = dentry->d_sb;
+
buf->f_type = HFSPLUS_SUPER_MAGIC;
buf->f_bsize = sb->s_blocksize;
buf->f_blocks = HFSPLUS_SB(sb).total_blocks << HFSPLUS_SB(sb).fs_shift;
@@ -450,10 +452,12 @@ static void hfsplus_destroy_inode(struct inode *inode)
#define HFSPLUS_INODE_SIZE sizeof(struct hfsplus_inode_info)
-static struct super_block *hfsplus_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int hfsplus_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data,
+ struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, hfsplus_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, hfsplus_fill_super,
+ mnt);
}
static struct file_system_type hfsplus_fs_type = {
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index bf0f8e16e43..8e0d37743e7 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -239,7 +239,7 @@ static int read_inode(struct inode *ino)
return(err);
}
-int hostfs_statfs(struct super_block *sb, struct kstatfs *sf)
+int hostfs_statfs(struct dentry *dentry, struct kstatfs *sf)
{
/* do_statfs uses struct statfs64 internally, but the linux kernel
* struct statfs still has 32-bit versions for most of these fields,
@@ -252,7 +252,7 @@ int hostfs_statfs(struct super_block *sb, struct kstatfs *sf)
long long f_files;
long long f_ffree;
- err = do_statfs(HOSTFS_I(sb->s_root->d_inode)->host_filename,
+ err = do_statfs(HOSTFS_I(dentry->d_sb->s_root->d_inode)->host_filename,
&sf->f_bsize, &f_blocks, &f_bfree, &f_bavail, &f_files,
&f_ffree, &sf->f_fsid, sizeof(sf->f_fsid),
&sf->f_namelen, sf->f_spare);
@@ -993,11 +993,11 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent)
return(err);
}
-static struct super_block *hostfs_read_sb(struct file_system_type *type,
- int flags, const char *dev_name,
- void *data)
+static int hostfs_read_sb(struct file_system_type *type,
+ int flags, const char *dev_name,
+ void *data, struct vfsmount *mnt)
{
- return(get_sb_nodev(type, flags, data, hostfs_fill_sb_common));
+ return get_sb_nodev(type, flags, data, hostfs_fill_sb_common, mnt);
}
static struct file_system_type hostfs_type = {
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index d72d8c87c99..f798480a363 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -135,8 +135,9 @@ static unsigned count_bitmaps(struct super_block *s)
return count;
}
-static int hpfs_statfs(struct super_block *s, struct kstatfs *buf)
+static int hpfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
+ struct super_block *s = dentry->d_sb;
struct hpfs_sb_info *sbi = hpfs_sb(s);
lock_kernel();
@@ -662,10 +663,11 @@ bail0:
return -EINVAL;
}
-static struct super_block *hpfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int hpfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, hpfs_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, hpfs_fill_super,
+ mnt);
}
static struct file_system_type hpfs_fs_type = {
diff --git a/fs/hppfs/hppfs_kern.c b/fs/hppfs/hppfs_kern.c
index 5e6363be246..3a9bdf58166 100644
--- a/fs/hppfs/hppfs_kern.c
+++ b/fs/hppfs/hppfs_kern.c
@@ -616,7 +616,7 @@ static const struct file_operations hppfs_dir_fops = {
.fsync = hppfs_fsync,
};
-static int hppfs_statfs(struct super_block *sb, struct kstatfs *sf)
+static int hppfs_statfs(struct dentry *dentry, struct kstatfs *sf)
{
sf->f_blocks = 0;
sf->f_bfree = 0;
@@ -769,11 +769,11 @@ static int hppfs_fill_super(struct super_block *sb, void *d, int silent)
return(err);
}
-static struct super_block *hppfs_read_super(struct file_system_type *type,
- int flags, const char *dev_name,
- void *data)
+static int hppfs_read_super(struct file_system_type *type,
+ int flags, const char *dev_name,
+ void *data, struct vfsmount *mnt)
{
- return(get_sb_nodev(type, flags, data, hppfs_fill_super));
+ return get_sb_nodev(type, flags, data, hppfs_fill_super, mnt);
}
static struct file_system_type hppfs_type = {
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 3a5b4e92345..e6410d8edd0 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -59,7 +59,6 @@ static void huge_pagevec_release(struct pagevec *pvec)
static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
struct inode *inode = file->f_dentry->d_inode;
- struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
loff_t len, vma_len;
int ret;
@@ -87,9 +86,10 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
if (!(vma->vm_flags & VM_WRITE) && len > inode->i_size)
goto out;
- if (vma->vm_flags & VM_MAYSHARE)
- if (hugetlb_extend_reservation(info, len >> HPAGE_SHIFT) != 0)
- goto out;
+ if (vma->vm_flags & VM_MAYSHARE &&
+ hugetlb_reserve_pages(inode, vma->vm_pgoff >> (HPAGE_SHIFT-PAGE_SHIFT),
+ len >> HPAGE_SHIFT))
+ goto out;
ret = 0;
hugetlb_prefault_arch_hook(vma->vm_mm);
@@ -195,12 +195,8 @@ static void truncate_hugepages(struct inode *inode, loff_t lstart)
const pgoff_t start = lstart >> HPAGE_SHIFT;
struct pagevec pvec;
pgoff_t next;
- int i;
+ int i, freed = 0;
- hugetlb_truncate_reservation(HUGETLBFS_I(inode),
- lstart >> HPAGE_SHIFT);
- if (!mapping->nrpages)
- return;
pagevec_init(&pvec, 0);
next = start;
while (1) {
@@ -221,10 +217,12 @@ static void truncate_hugepages(struct inode *inode, loff_t lstart)
truncate_huge_page(page);
unlock_page(page);
hugetlb_put_quota(mapping);
+ freed++;
}
huge_pagevec_release(&pvec);
}
BUG_ON(!lstart && mapping->nrpages);
+ hugetlb_unreserve_pages(inode, start, freed);
}
static void hugetlbfs_delete_inode(struct inode *inode)
@@ -366,6 +364,7 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb, uid_t uid,
inode->i_mapping->a_ops = &hugetlbfs_aops;
inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ INIT_LIST_HEAD(&inode->i_mapping->private_list);
info = HUGETLBFS_I(inode);
mpol_shared_policy_init(&info->policy, MPOL_DEFAULT, NULL);
switch (mode & S_IFMT) {
@@ -467,9 +466,9 @@ static int hugetlbfs_set_page_dirty(struct page *page)
return 0;
}
-static int hugetlbfs_statfs(struct super_block *sb, struct kstatfs *buf)
+static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
- struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
+ struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
buf->f_type = HUGETLBFS_MAGIC;
buf->f_bsize = HPAGE_SIZE;
@@ -538,7 +537,6 @@ static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
hugetlbfs_inc_free_inodes(sbinfo);
return NULL;
}
- p->prereserved_hpages = 0;
return &p->vfs_inode;
}
@@ -723,10 +721,10 @@ void hugetlb_put_quota(struct address_space *mapping)
}
}
-static struct super_block *hugetlbfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int hugetlbfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_nodev(fs_type, flags, data, hugetlbfs_fill_super);
+ return get_sb_nodev(fs_type, flags, data, hugetlbfs_fill_super, mnt);
}
static struct file_system_type hugetlbfs_fs_type = {
@@ -781,8 +779,7 @@ struct file *hugetlb_zero_setup(size_t size)
goto out_file;
error = -ENOMEM;
- if (hugetlb_extend_reservation(HUGETLBFS_I(inode),
- size >> HPAGE_SHIFT) != 0)
+ if (hugetlb_reserve_pages(inode, 0, size >> HPAGE_SHIFT))
goto out_inode;
d_instantiate(dentry, inode);
diff --git a/fs/inotify_user.c b/fs/inotify_user.c
index 9e9931e2bad..f2386442ade 100644
--- a/fs/inotify_user.c
+++ b/fs/inotify_user.c
@@ -672,11 +672,11 @@ out:
return ret;
}
-static struct super_block *
+static int
inotify_get_sb(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
+ const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_pseudo(fs_type, "inotify", NULL, 0xBAD1DEA);
+ return get_sb_pseudo(fs_type, "inotify", NULL, 0xBAD1DEA, mnt);
}
static struct file_system_type inotify_fs_type = {
diff --git a/fs/ioprio.c b/fs/ioprio.c
index ca77008146c..7fa76ed53c1 100644
--- a/fs/ioprio.c
+++ b/fs/ioprio.c
@@ -24,15 +24,21 @@
#include <linux/blkdev.h>
#include <linux/capability.h>
#include <linux/syscalls.h>
+#include <linux/security.h>
static int set_task_ioprio(struct task_struct *task, int ioprio)
{
+ int err;
struct io_context *ioc;
if (task->uid != current->euid &&
task->uid != current->uid && !capable(CAP_SYS_NICE))
return -EPERM;
+ err = security_task_setioprio(task, ioprio);
+ if (err)
+ return err;
+
task_lock(task);
task->ioprio = ioprio;
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 70adbb98bad..3f9c8ba1fa1 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -56,7 +56,7 @@ static void isofs_put_super(struct super_block *sb)
}
static void isofs_read_inode(struct inode *);
-static int isofs_statfs (struct super_block *, struct kstatfs *);
+static int isofs_statfs (struct dentry *, struct kstatfs *);
static kmem_cache_t *isofs_inode_cachep;
@@ -901,8 +901,10 @@ out_freesbi:
return -EINVAL;
}
-static int isofs_statfs (struct super_block *sb, struct kstatfs *buf)
+static int isofs_statfs (struct dentry *dentry, struct kstatfs *buf)
{
+ struct super_block *sb = dentry->d_sb;
+
buf->f_type = ISOFS_SUPER_MAGIC;
buf->f_bsize = sb->s_blocksize;
buf->f_blocks = (ISOFS_SB(sb)->s_nzones
@@ -1399,10 +1401,11 @@ struct inode *isofs_iget(struct super_block *sb,
return inode;
}
-static struct super_block *isofs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int isofs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, isofs_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, isofs_fill_super,
+ mnt);
}
static struct file_system_type iso9660_fs_type = {
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
index 3f5102b069d..47678a26c13 100644
--- a/fs/jbd/checkpoint.c
+++ b/fs/jbd/checkpoint.c
@@ -24,29 +24,67 @@
#include <linux/slab.h>
/*
- * Unlink a buffer from a transaction.
+ * Unlink a buffer from a transaction checkpoint list.
*
* Called with j_list_lock held.
*/
-
-static inline void __buffer_unlink(struct journal_head *jh)
+static inline void __buffer_unlink_first(struct journal_head *jh)
{
- transaction_t *transaction;
-
- transaction = jh->b_cp_transaction;
- jh->b_cp_transaction = NULL;
+ transaction_t *transaction = jh->b_cp_transaction;
jh->b_cpnext->b_cpprev = jh->b_cpprev;
jh->b_cpprev->b_cpnext = jh->b_cpnext;
- if (transaction->t_checkpoint_list == jh)
+ if (transaction->t_checkpoint_list == jh) {
transaction->t_checkpoint_list = jh->b_cpnext;
- if (transaction->t_checkpoint_list == jh)
- transaction->t_checkpoint_list = NULL;
+ if (transaction->t_checkpoint_list == jh)
+ transaction->t_checkpoint_list = NULL;
+ }
+}
+
+/*
+ * Unlink a buffer from a transaction checkpoint(io) list.
+ *
+ * Called with j_list_lock held.
+ */
+static inline void __buffer_unlink(struct journal_head *jh)
+{
+ transaction_t *transaction = jh->b_cp_transaction;
+
+ __buffer_unlink_first(jh);
+ if (transaction->t_checkpoint_io_list == jh) {
+ transaction->t_checkpoint_io_list = jh->b_cpnext;
+ if (transaction->t_checkpoint_io_list == jh)
+ transaction->t_checkpoint_io_list = NULL;
+ }
+}
+
+/*
+ * Move a buffer from the checkpoint list to the checkpoint io list
+ *
+ * Called with j_list_lock held
+ */
+static inline void __buffer_relink_io(struct journal_head *jh)
+{
+ transaction_t *transaction = jh->b_cp_transaction;
+
+ __buffer_unlink_first(jh);
+
+ if (!transaction->t_checkpoint_io_list) {
+ jh->b_cpnext = jh->b_cpprev = jh;
+ } else {
+ jh->b_cpnext = transaction->t_checkpoint_io_list;
+ jh->b_cpprev = transaction->t_checkpoint_io_list->b_cpprev;
+ jh->b_cpprev->b_cpnext = jh;
+ jh->b_cpnext->b_cpprev = jh;
+ }
+ transaction->t_checkpoint_io_list = jh;
}
/*
* Try to release a checkpointed buffer from its transaction.
- * Returns 1 if we released it.
+ * Returns 1 if we released it and 2 if we also released the
+ * whole transaction.
+ *
* Requires j_list_lock
* Called under jbd_lock_bh_state(jh2bh(jh)), and drops it
*/
@@ -57,12 +95,11 @@ static int __try_to_free_cp_buf(struct journal_head *jh)
if (jh->b_jlist == BJ_None && !buffer_locked(bh) && !buffer_dirty(bh)) {
JBUFFER_TRACE(jh, "remove from checkpoint list");
- __journal_remove_checkpoint(jh);
+ ret = __journal_remove_checkpoint(jh) + 1;
jbd_unlock_bh_state(bh);
journal_remove_journal_head(bh);
BUFFER_TRACE(bh, "release");
__brelse(bh);
- ret = 1;
} else {
jbd_unlock_bh_state(bh);
}
@@ -117,83 +154,54 @@ static void jbd_sync_bh(journal_t *journal, struct buffer_head *bh)
}
/*
- * Clean up a transaction's checkpoint list.
- *
- * We wait for any pending IO to complete and make sure any clean
- * buffers are removed from the transaction.
- *
- * Return 1 if we performed any actions which might have destroyed the
- * checkpoint. (journal_remove_checkpoint() deletes the transaction when
- * the last checkpoint buffer is cleansed)
+ * Clean up transaction's list of buffers submitted for io.
+ * We wait for any pending IO to complete and remove any clean
+ * buffers. Note that we take the buffers in the opposite ordering
+ * from the one in which they were submitted for IO.
*
* Called with j_list_lock held.
*/
-static int __cleanup_transaction(journal_t *journal, transaction_t *transaction)
+static void __wait_cp_io(journal_t *journal, transaction_t *transaction)
{
- struct journal_head *jh, *next_jh, *last_jh;
+ struct journal_head *jh;
struct buffer_head *bh;
- int ret = 0;
-
- assert_spin_locked(&journal->j_list_lock);
- jh = transaction->t_checkpoint_list;
- if (!jh)
- return 0;
-
- last_jh = jh->b_cpprev;
- next_jh = jh;
- do {
- jh = next_jh;
+ tid_t this_tid;
+ int released = 0;
+
+ this_tid = transaction->t_tid;
+restart:
+ /* Did somebody clean up the transaction in the meanwhile? */
+ if (journal->j_checkpoint_transactions != transaction ||
+ transaction->t_tid != this_tid)
+ return;
+ while (!released && transaction->t_checkpoint_io_list) {
+ jh = transaction->t_checkpoint_io_list;
bh = jh2bh(jh);
+ if (!jbd_trylock_bh_state(bh)) {
+ jbd_sync_bh(journal, bh);
+ spin_lock(&journal->j_list_lock);
+ goto restart;
+ }
if (buffer_locked(bh)) {
atomic_inc(&bh->b_count);
spin_unlock(&journal->j_list_lock);
+ jbd_unlock_bh_state(bh);
wait_on_buffer(bh);
/* the journal_head may have gone by now */
BUFFER_TRACE(bh, "brelse");
__brelse(bh);
- goto out_return_1;
+ spin_lock(&journal->j_list_lock);
+ goto restart;
}
-
/*
- * This is foul
+ * Now in whatever state the buffer currently is, we know that
+ * it has been written out and so we can drop it from the list
*/
- if (!jbd_trylock_bh_state(bh)) {
- jbd_sync_bh(journal, bh);
- goto out_return_1;
- }
-
- if (jh->b_transaction != NULL) {
- transaction_t *t = jh->b_transaction;
- tid_t tid = t->t_tid;
-
- spin_unlock(&journal->j_list_lock);
- jbd_unlock_bh_state(bh);
- log_start_commit(journal, tid);
- log_wait_commit(journal, tid);
- goto out_return_1;
- }
-
- /*
- * AKPM: I think the buffer_jbddirty test is redundant - it
- * shouldn't have NULL b_transaction?
- */
- next_jh = jh->b_cpnext;
- if (!buffer_dirty(bh) && !buffer_jbddirty(bh)) {
- BUFFER_TRACE(bh, "remove from checkpoint");
- __journal_remove_checkpoint(jh);
- jbd_unlock_bh_state(bh);
- journal_remove_journal_head(bh);
- __brelse(bh);
- ret = 1;
- } else {
- jbd_unlock_bh_state(bh);
- }
- } while (jh != last_jh);
-
- return ret;
-out_return_1:
- spin_lock(&journal->j_list_lock);
- return 1;
+ released = __journal_remove_checkpoint(jh);
+ jbd_unlock_bh_state(bh);
+ journal_remove_journal_head(bh);
+ __brelse(bh);
+ }
}
#define NR_BATCH 64
@@ -203,9 +211,7 @@ __flush_batch(journal_t *journal, struct buffer_head **bhs, int *batch_count)
{
int i;
- spin_unlock(&journal->j_list_lock);
ll_rw_block(SWRITE, *batch_count, bhs);
- spin_lock(&journal->j_list_lock);
for (i = 0; i < *batch_count; i++) {
struct buffer_head *bh = bhs[i];
clear_buffer_jwrite(bh);
@@ -221,19 +227,43 @@ __flush_batch(journal_t *journal, struct buffer_head **bhs, int *batch_count)
* Return 1 if something happened which requires us to abort the current
* scan of the checkpoint list.
*
- * Called with j_list_lock held.
+ * Called with j_list_lock held and drops it if 1 is returned
* Called under jbd_lock_bh_state(jh2bh(jh)), and drops it
*/
-static int __flush_buffer(journal_t *journal, struct journal_head *jh,
- struct buffer_head **bhs, int *batch_count,
- int *drop_count)
+static int __process_buffer(journal_t *journal, struct journal_head *jh,
+ struct buffer_head **bhs, int *batch_count)
{
struct buffer_head *bh = jh2bh(jh);
int ret = 0;
- if (buffer_dirty(bh) && !buffer_locked(bh) && jh->b_jlist == BJ_None) {
- J_ASSERT_JH(jh, jh->b_transaction == NULL);
+ if (buffer_locked(bh)) {
+ atomic_inc(&bh->b_count);
+ spin_unlock(&journal->j_list_lock);
+ jbd_unlock_bh_state(bh);
+ wait_on_buffer(bh);
+ /* the journal_head may have gone by now */
+ BUFFER_TRACE(bh, "brelse");
+ __brelse(bh);
+ ret = 1;
+ } else if (jh->b_transaction != NULL) {
+ transaction_t *t = jh->b_transaction;
+ tid_t tid = t->t_tid;
+ spin_unlock(&journal->j_list_lock);
+ jbd_unlock_bh_state(bh);
+ log_start_commit(journal, tid);
+ log_wait_commit(journal, tid);
+ ret = 1;
+ } else if (!buffer_dirty(bh)) {
+ J_ASSERT_JH(jh, !buffer_jbddirty(bh));
+ BUFFER_TRACE(bh, "remove from checkpoint");
+ __journal_remove_checkpoint(jh);
+ spin_unlock(&journal->j_list_lock);
+ jbd_unlock_bh_state(bh);
+ journal_remove_journal_head(bh);
+ __brelse(bh);
+ ret = 1;
+ } else {
/*
* Important: we are about to write the buffer, and
* possibly block, while still holding the journal lock.
@@ -246,45 +276,30 @@ static int __flush_buffer(journal_t *journal, struct journal_head *jh,
J_ASSERT_BH(bh, !buffer_jwrite(bh));
set_buffer_jwrite(bh);
bhs[*batch_count] = bh;
+ __buffer_relink_io(jh);
jbd_unlock_bh_state(bh);
(*batch_count)++;
if (*batch_count == NR_BATCH) {
+ spin_unlock(&journal->j_list_lock);
__flush_batch(journal, bhs, batch_count);
ret = 1;
}
- } else {
- int last_buffer = 0;
- if (jh->b_cpnext == jh) {
- /* We may be about to drop the transaction. Tell the
- * caller that the lists have changed.
- */
- last_buffer = 1;
- }
- if (__try_to_free_cp_buf(jh)) {
- (*drop_count)++;
- ret = last_buffer;
- }
}
return ret;
}
/*
- * Perform an actual checkpoint. We don't write out only enough to
- * satisfy the current blocked requests: rather we submit a reasonably
- * sized chunk of the outstanding data to disk at once for
- * efficiency. __log_wait_for_space() will retry if we didn't free enough.
+ * Perform an actual checkpoint. We take the first transaction on the
+ * list of transactions to be checkpointed and send all its buffers
+ * to disk. We submit larger chunks of data at once.
*
- * However, we _do_ take into account the amount requested so that once
- * the IO has been queued, we can return as soon as enough of it has
- * completed to disk.
- *
* The journal should be locked before calling this function.
*/
int log_do_checkpoint(journal_t *journal)
{
+ transaction_t *transaction;
+ tid_t this_tid;
int result;
- int batch_count = 0;
- struct buffer_head *bhs[NR_BATCH];
jbd_debug(1, "Start checkpoint\n");
@@ -299,79 +314,68 @@ int log_do_checkpoint(journal_t *journal)
return result;
/*
- * OK, we need to start writing disk blocks. Try to free up a
- * quarter of the log in a single checkpoint if we can.
+ * OK, we need to start writing disk blocks. Take one transaction
+ * and write it.
*/
+ spin_lock(&journal->j_list_lock);
+ if (!journal->j_checkpoint_transactions)
+ goto out;
+ transaction = journal->j_checkpoint_transactions;
+ this_tid = transaction->t_tid;
+restart:
/*
- * AKPM: check this code. I had a feeling a while back that it
- * degenerates into a busy loop at unmount time.
+ * If someone cleaned up this transaction while we slept, we're
+ * done (maybe it's a new transaction, but it fell at the same
+ * address).
*/
- spin_lock(&journal->j_list_lock);
- while (journal->j_checkpoint_transactions) {
- transaction_t *transaction;
- struct journal_head *jh, *last_jh, *next_jh;
- int drop_count = 0;
- int cleanup_ret, retry = 0;
- tid_t this_tid;
-
- transaction = journal->j_checkpoint_transactions;
- this_tid = transaction->t_tid;
- jh = transaction->t_checkpoint_list;
- last_jh = jh->b_cpprev;
- next_jh = jh;
- do {
+ if (journal->j_checkpoint_transactions == transaction &&
+ transaction->t_tid == this_tid) {
+ int batch_count = 0;
+ struct buffer_head *bhs[NR_BATCH];
+ struct journal_head *jh;
+ int retry = 0;
+
+ while (!retry && transaction->t_checkpoint_list) {
struct buffer_head *bh;
- jh = next_jh;
- next_jh = jh->b_cpnext;
+ jh = transaction->t_checkpoint_list;
bh = jh2bh(jh);
if (!jbd_trylock_bh_state(bh)) {
jbd_sync_bh(journal, bh);
- spin_lock(&journal->j_list_lock);
retry = 1;
break;
}
- retry = __flush_buffer(journal, jh, bhs, &batch_count, &drop_count);
- if (cond_resched_lock(&journal->j_list_lock)) {
+ retry = __process_buffer(journal, jh, bhs,&batch_count);
+ if (!retry && lock_need_resched(&journal->j_list_lock)){
+ spin_unlock(&journal->j_list_lock);
retry = 1;
break;
}
- } while (jh != last_jh && !retry);
+ }
if (batch_count) {
+ if (!retry) {
+ spin_unlock(&journal->j_list_lock);
+ retry = 1;
+ }
__flush_batch(journal, bhs, &batch_count);
- retry = 1;
}
+ if (retry) {
+ spin_lock(&journal->j_list_lock);
+ goto restart;
+ }
/*
- * If someone cleaned up this transaction while we slept, we're
- * done
- */
- if (journal->j_checkpoint_transactions != transaction)
- break;
- if (retry)
- continue;
- /*
- * Maybe it's a new transaction, but it fell at the same
- * address
- */
- if (transaction->t_tid != this_tid)
- continue;
- /*
- * We have walked the whole transaction list without
- * finding anything to write to disk. We had better be
- * able to make some progress or we are in trouble.
+ * Now we have cleaned up the first transaction's checkpoint
+ * list. Let's clean up the second one
*/
- cleanup_ret = __cleanup_transaction(journal, transaction);
- J_ASSERT(drop_count != 0 || cleanup_ret != 0);
- if (journal->j_checkpoint_transactions != transaction)
- break;
+ __wait_cp_io(journal, transaction);
}
+out:
spin_unlock(&journal->j_list_lock);
result = cleanup_journal_tail(journal);
if (result < 0)
return result;
-
return 0;
}
@@ -456,52 +460,98 @@ int cleanup_journal_tail(journal_t *journal)
/* Checkpoint list management */
/*
+ * journal_clean_one_cp_list
+ *
+ * Find all the written-back checkpoint buffers in the given list and release them.
+ *
+ * Called with the journal locked.
+ * Called with j_list_lock held.
+ * Returns number of bufers reaped (for debug)
+ */
+
+static int journal_clean_one_cp_list(struct journal_head *jh, int *released)
+{
+ struct journal_head *last_jh;
+ struct journal_head *next_jh = jh;
+ int ret, freed = 0;
+
+ *released = 0;
+ if (!jh)
+ return 0;
+
+ last_jh = jh->b_cpprev;
+ do {
+ jh = next_jh;
+ next_jh = jh->b_cpnext;
+ /* Use trylock because of the ranking */
+ if (jbd_trylock_bh_state(jh2bh(jh))) {
+ ret = __try_to_free_cp_buf(jh);
+ if (ret) {
+ freed++;
+ if (ret == 2) {
+ *released = 1;
+ return freed;
+ }
+ }
+ }
+ /*
+ * This function only frees up some memory
+ * if possible so we dont have an obligation
+ * to finish processing. Bail out if preemption
+ * requested:
+ */
+ if (need_resched())
+ return freed;
+ } while (jh != last_jh);
+
+ return freed;
+}
+
+/*
* journal_clean_checkpoint_list
*
* Find all the written-back checkpoint buffers in the journal and release them.
*
* Called with the journal locked.
* Called with j_list_lock held.
- * Returns number of bufers reaped (for debug)
+ * Returns number of buffers reaped (for debug)
*/
int __journal_clean_checkpoint_list(journal_t *journal)
{
transaction_t *transaction, *last_transaction, *next_transaction;
int ret = 0;
+ int released;
transaction = journal->j_checkpoint_transactions;
- if (transaction == 0)
+ if (!transaction)
goto out;
last_transaction = transaction->t_cpprev;
next_transaction = transaction;
do {
- struct journal_head *jh;
-
transaction = next_transaction;
next_transaction = transaction->t_cpnext;
- jh = transaction->t_checkpoint_list;
- if (jh) {
- struct journal_head *last_jh = jh->b_cpprev;
- struct journal_head *next_jh = jh;
-
- do {
- jh = next_jh;
- next_jh = jh->b_cpnext;
- /* Use trylock because of the ranknig */
- if (jbd_trylock_bh_state(jh2bh(jh)))
- ret += __try_to_free_cp_buf(jh);
- /*
- * This function only frees up some memory
- * if possible so we dont have an obligation
- * to finish processing. Bail out if preemption
- * requested:
- */
- if (need_resched())
- goto out;
- } while (jh != last_jh);
- }
+ ret += journal_clean_one_cp_list(transaction->
+ t_checkpoint_list, &released);
+ /*
+ * This function only frees up some memory if possible so we
+ * dont have an obligation to finish processing. Bail out if
+ * preemption requested:
+ */
+ if (need_resched())
+ goto out;
+ if (released)
+ continue;
+ /*
+ * It is essential that we are as careful as in the case of
+ * t_checkpoint_list with removing the buffer from the list as
+ * we can possibly see not yet submitted buffers on io_list
+ */
+ ret += journal_clean_one_cp_list(transaction->
+ t_checkpoint_io_list, &released);
+ if (need_resched())
+ goto out;
} while (transaction != last_transaction);
out:
return ret;
@@ -516,18 +566,22 @@ out:
* buffer updates committed in that transaction have safely been stored
* elsewhere on disk. To achieve this, all of the buffers in a
* transaction need to be maintained on the transaction's checkpoint
- * list until they have been rewritten, at which point this function is
+ * lists until they have been rewritten, at which point this function is
* called to remove the buffer from the existing transaction's
- * checkpoint list.
+ * checkpoint lists.
+ *
+ * The function returns 1 if it frees the transaction, 0 otherwise.
*
* This function is called with the journal locked.
* This function is called with j_list_lock held.
+ * This function is called with jbd_lock_bh_state(jh2bh(jh))
*/
-void __journal_remove_checkpoint(struct journal_head *jh)
+int __journal_remove_checkpoint(struct journal_head *jh)
{
transaction_t *transaction;
journal_t *journal;
+ int ret = 0;
JBUFFER_TRACE(jh, "entry");
@@ -538,8 +592,10 @@ void __journal_remove_checkpoint(struct journal_head *jh)
journal = transaction->t_journal;
__buffer_unlink(jh);
+ jh->b_cp_transaction = NULL;
- if (transaction->t_checkpoint_list != NULL)
+ if (transaction->t_checkpoint_list != NULL ||
+ transaction->t_checkpoint_io_list != NULL)
goto out;
JBUFFER_TRACE(jh, "transaction has no more buffers");
@@ -565,8 +621,10 @@ void __journal_remove_checkpoint(struct journal_head *jh)
/* Just in case anybody was waiting for more transactions to be
checkpointed... */
wake_up(&journal->j_wait_logspace);
+ ret = 1;
out:
JBUFFER_TRACE(jh, "exit");
+ return ret;
}
/*
@@ -628,6 +686,7 @@ void __journal_drop_transaction(journal_t *journal, transaction_t *transaction)
J_ASSERT(transaction->t_shadow_list == NULL);
J_ASSERT(transaction->t_log_list == NULL);
J_ASSERT(transaction->t_checkpoint_list == NULL);
+ J_ASSERT(transaction->t_checkpoint_io_list == NULL);
J_ASSERT(transaction->t_updates == 0);
J_ASSERT(journal->j_committing_transaction != transaction);
J_ASSERT(journal->j_running_transaction != transaction);
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index 002ad2bbc76..0971814c38b 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -790,11 +790,22 @@ restart_loop:
jbd_unlock_bh_state(bh);
} else {
J_ASSERT_BH(bh, !buffer_dirty(bh));
- J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
- __journal_unfile_buffer(jh);
- jbd_unlock_bh_state(bh);
- journal_remove_journal_head(bh); /* needs a brelse */
- release_buffer_page(bh);
+ /* The buffer on BJ_Forget list and not jbddirty means
+ * it has been freed by this transaction and hence it
+ * could not have been reallocated until this
+ * transaction has committed. *BUT* it could be
+ * reallocated once we have written all the data to
+ * disk and before we process the buffer on BJ_Forget
+ * list. */
+ JBUFFER_TRACE(jh, "refile or unfile freed buffer");
+ __journal_refile_buffer(jh);
+ if (!jh->b_transaction) {
+ jbd_unlock_bh_state(bh);
+ /* needs a brelse */
+ journal_remove_journal_head(bh);
+ release_buffer_page(bh);
+ } else
+ jbd_unlock_bh_state(bh);
}
cond_resched_lock(&journal->j_list_lock);
}
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index c609f5034fc..508b2ea91f4 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -227,7 +227,8 @@ repeat_locked:
spin_unlock(&transaction->t_handle_lock);
spin_unlock(&journal->j_state_lock);
out:
- kfree(new_transaction);
+ if (unlikely(new_transaction)) /* It's usually NULL */
+ kfree(new_transaction);
return ret;
}
@@ -724,7 +725,8 @@ done:
journal_cancel_revoke(handle, jh);
out:
- kfree(frozen_buffer);
+ if (unlikely(frozen_buffer)) /* It's usually NULL */
+ kfree(frozen_buffer);
JBUFFER_TRACE(jh, "exit");
return error;
@@ -903,7 +905,8 @@ repeat:
jbd_unlock_bh_state(bh);
out:
journal_put_journal_head(jh);
- kfree(committed_data);
+ if (unlikely(committed_data))
+ kfree(committed_data);
return err;
}
@@ -2038,7 +2041,8 @@ void __journal_refile_buffer(struct journal_head *jh)
__journal_temp_unlink_buffer(jh);
jh->b_transaction = jh->b_next_transaction;
jh->b_next_transaction = NULL;
- __journal_file_buffer(jh, jh->b_transaction, BJ_Metadata);
+ __journal_file_buffer(jh, jh->b_transaction,
+ was_dirty ? BJ_Metadata : BJ_Reserved);
J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING);
if (was_dirty)
diff --git a/fs/jffs/inode-v23.c b/fs/jffs/inode-v23.c
index 020cc097c53..9e46ea6da75 100644
--- a/fs/jffs/inode-v23.c
+++ b/fs/jffs/inode-v23.c
@@ -377,9 +377,9 @@ jffs_new_inode(const struct inode * dir, struct jffs_raw_inode *raw_inode,
/* Get statistics of the file system. */
static int
-jffs_statfs(struct super_block *sb, struct kstatfs *buf)
+jffs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
- struct jffs_control *c = (struct jffs_control *) sb->s_fs_info;
+ struct jffs_control *c = (struct jffs_control *) dentry->d_sb->s_fs_info;
struct jffs_fmcontrol *fmc;
lock_kernel();
@@ -1785,10 +1785,11 @@ static struct super_operations jffs_ops =
.remount_fs = jffs_remount,
};
-static struct super_block *jffs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int jffs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, jffs_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, jffs_fill_super,
+ mnt);
}
static struct file_system_type jffs_fs_type = {
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index 7b6c24b14f8..2900ec3ec3a 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -192,9 +192,9 @@ int jffs2_setattr(struct dentry *dentry, struct iattr *iattr)
return rc;
}
-int jffs2_statfs(struct super_block *sb, struct kstatfs *buf)
+int jffs2_statfs(struct dentry *dentry, struct kstatfs *buf)
{
- struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
+ struct jffs2_sb_info *c = JFFS2_SB_INFO(dentry->d_sb);
unsigned long avail;
buf->f_type = JFFS2_SUPER_MAGIC;
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
index cd4021bcb94..6b522356540 100644
--- a/fs/jffs2/os-linux.h
+++ b/fs/jffs2/os-linux.h
@@ -175,7 +175,7 @@ void jffs2_clear_inode (struct inode *);
void jffs2_dirty_inode(struct inode *inode);
struct inode *jffs2_new_inode (struct inode *dir_i, int mode,
struct jffs2_raw_inode *ri);
-int jffs2_statfs (struct super_block *, struct kstatfs *);
+int jffs2_statfs (struct dentry *, struct kstatfs *);
void jffs2_write_super (struct super_block *);
int jffs2_remount_fs (struct super_block *, int *, char *);
int jffs2_do_fill_super(struct super_block *sb, void *data, int silent);
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index 9d0521451f5..2378a662c25 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -111,9 +111,10 @@ static int jffs2_sb_set(struct super_block *sb, void *data)
return 0;
}
-static struct super_block *jffs2_get_sb_mtd(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data, struct mtd_info *mtd)
+static int jffs2_get_sb_mtd(struct file_system_type *fs_type,
+ int flags, const char *dev_name,
+ void *data, struct mtd_info *mtd,
+ struct vfsmount *mnt)
{
struct super_block *sb;
struct jffs2_sb_info *c;
@@ -121,19 +122,20 @@ static struct super_block *jffs2_get_sb_mtd(struct file_system_type *fs_type,
c = kmalloc(sizeof(*c), GFP_KERNEL);
if (!c)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
memset(c, 0, sizeof(*c));
c->mtd = mtd;
sb = sget(fs_type, jffs2_sb_compare, jffs2_sb_set, c);
if (IS_ERR(sb))
- goto out_put;
+ goto out_error;
if (sb->s_root) {
/* New mountpoint for JFFS2 which is already mounted */
D1(printk(KERN_DEBUG "jffs2_get_sb_mtd(): Device %d (\"%s\") is already mounted\n",
mtd->index, mtd->name));
+ ret = simple_set_mnt(mnt, sb);
goto out_put;
}
@@ -161,44 +163,47 @@ static struct super_block *jffs2_get_sb_mtd(struct file_system_type *fs_type,
/* Failure case... */
up_write(&sb->s_umount);
deactivate_super(sb);
- return ERR_PTR(ret);
+ return ret;
}
sb->s_flags |= MS_ACTIVE;
- return sb;
+ return simple_set_mnt(mnt, sb);
+out_error:
+ ret = PTR_ERR(sb);
out_put:
kfree(c);
put_mtd_device(mtd);
- return sb;
+ return ret;
}
-static struct super_block *jffs2_get_sb_mtdnr(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data, int mtdnr)
+static int jffs2_get_sb_mtdnr(struct file_system_type *fs_type,
+ int flags, const char *dev_name,
+ void *data, int mtdnr,
+ struct vfsmount *mnt)
{
struct mtd_info *mtd;
mtd = get_mtd_device(NULL, mtdnr);
if (!mtd) {
D1(printk(KERN_DEBUG "jffs2: MTD device #%u doesn't appear to exist\n", mtdnr));
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
- return jffs2_get_sb_mtd(fs_type, flags, dev_name, data, mtd);
+ return jffs2_get_sb_mtd(fs_type, flags, dev_name, data, mtd, mnt);
}
-static struct super_block *jffs2_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data)
+static int jffs2_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name,
+ void *data, struct vfsmount *mnt)
{
int err;
struct nameidata nd;
int mtdnr;
if (!dev_name)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
D1(printk(KERN_DEBUG "jffs2_get_sb(): dev_name \"%s\"\n", dev_name));
@@ -220,7 +225,7 @@ static struct super_block *jffs2_get_sb(struct file_system_type *fs_type,
mtd = get_mtd_device(NULL, mtdnr);
if (mtd) {
if (!strcmp(mtd->name, dev_name+4))
- return jffs2_get_sb_mtd(fs_type, flags, dev_name, data, mtd);
+ return jffs2_get_sb_mtd(fs_type, flags, dev_name, data, mtd, mnt);
put_mtd_device(mtd);
}
}
@@ -233,7 +238,7 @@ static struct super_block *jffs2_get_sb(struct file_system_type *fs_type,
if (!*endptr) {
/* It was a valid number */
D1(printk(KERN_DEBUG "jffs2_get_sb(): mtd%%d, mtdnr %d\n", mtdnr));
- return jffs2_get_sb_mtdnr(fs_type, flags, dev_name, data, mtdnr);
+ return jffs2_get_sb_mtdnr(fs_type, flags, dev_name, data, mtdnr, mnt);
}
}
}
@@ -247,7 +252,7 @@ static struct super_block *jffs2_get_sb(struct file_system_type *fs_type,
err, nd.dentry->d_inode));
if (err)
- return ERR_PTR(err);
+ return err;
err = -EINVAL;
@@ -269,11 +274,11 @@ static struct super_block *jffs2_get_sb(struct file_system_type *fs_type,
mtdnr = iminor(nd.dentry->d_inode);
path_release(&nd);
- return jffs2_get_sb_mtdnr(fs_type, flags, dev_name, data, mtdnr);
+ return jffs2_get_sb_mtdnr(fs_type, flags, dev_name, data, mtdnr, mnt);
out:
path_release(&nd);
- return ERR_PTR(err);
+ return err;
}
static void jffs2_put_super (struct super_block *sb)
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index 2b220dd6b4e..7f6e8803970 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -632,10 +632,9 @@ struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
}
SetPageUptodate(page);
} else {
- page = read_cache_page(mapping, page_index,
- (filler_t *)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, page_index, NULL);
if (IS_ERR(page) || !PageUptodate(page)) {
- jfs_err("read_cache_page failed!");
+ jfs_err("read_mapping_page failed!");
return NULL;
}
lock_page(page);
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index db6f41d6dd6..73d2aba084c 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -139,9 +139,9 @@ static void jfs_destroy_inode(struct inode *inode)
kmem_cache_free(jfs_inode_cachep, ji);
}
-static int jfs_statfs(struct super_block *sb, struct kstatfs *buf)
+static int jfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
- struct jfs_sb_info *sbi = JFS_SBI(sb);
+ struct jfs_sb_info *sbi = JFS_SBI(dentry->d_sb);
s64 maxinodes;
struct inomap *imap = JFS_IP(sbi->ipimap)->i_imap;
@@ -565,10 +565,11 @@ static void jfs_unlockfs(struct super_block *sb)
}
}
-static struct super_block *jfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int jfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, jfs_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, jfs_fill_super,
+ mnt);
}
static int jfs_sync_fs(struct super_block *sb, int wait)
diff --git a/fs/libfs.c b/fs/libfs.c
index 7145ba7a48d..1b115638178 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -20,9 +20,9 @@ int simple_getattr(struct vfsmount *mnt, struct dentry *dentry,
return 0;
}
-int simple_statfs(struct super_block *sb, struct kstatfs *buf)
+int simple_statfs(struct dentry *dentry, struct kstatfs *buf)
{
- buf->f_type = sb->s_magic;
+ buf->f_type = dentry->d_sb->s_magic;
buf->f_bsize = PAGE_CACHE_SIZE;
buf->f_namelen = NAME_MAX;
return 0;
@@ -196,9 +196,9 @@ struct inode_operations simple_dir_inode_operations = {
* Common helper for pseudo-filesystems (sockfs, pipefs, bdev - stuff that
* will never be mountable)
*/
-struct super_block *
-get_sb_pseudo(struct file_system_type *fs_type, char *name,
- struct super_operations *ops, unsigned long magic)
+int get_sb_pseudo(struct file_system_type *fs_type, char *name,
+ struct super_operations *ops, unsigned long magic,
+ struct vfsmount *mnt)
{
struct super_block *s = sget(fs_type, NULL, set_anon_super, NULL);
static struct super_operations default_ops = {.statfs = simple_statfs};
@@ -207,7 +207,7 @@ get_sb_pseudo(struct file_system_type *fs_type, char *name,
struct qstr d_name = {.name = name, .len = strlen(name)};
if (IS_ERR(s))
- return s;
+ return PTR_ERR(s);
s->s_flags = MS_NOUSER;
s->s_maxbytes = ~0ULL;
@@ -232,12 +232,12 @@ get_sb_pseudo(struct file_system_type *fs_type, char *name,
d_instantiate(dentry, root);
s->s_root = dentry;
s->s_flags |= MS_ACTIVE;
- return s;
+ return simple_set_mnt(mnt, s);
Enomem:
up_write(&s->s_umount);
deactivate_super(s);
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
int simple_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
diff --git a/fs/locks.c b/fs/locks.c
index 69435c68c1e..1ad29c9b625 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -703,7 +703,7 @@ EXPORT_SYMBOL(posix_test_lock);
* from a broken NFS client. But broken NFS clients have a lot more to
* worry about than proper deadlock detection anyway... --okir
*/
-int posix_locks_deadlock(struct file_lock *caller_fl,
+static int posix_locks_deadlock(struct file_lock *caller_fl,
struct file_lock *block_fl)
{
struct list_head *tmp;
@@ -722,8 +722,6 @@ next_task:
return 0;
}
-EXPORT_SYMBOL(posix_locks_deadlock);
-
/* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
* at the head of the list, but that's secret knowledge known only to
* flock_lock_file and posix_lock_file.
@@ -794,7 +792,8 @@ out:
static int __posix_lock_file_conf(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
{
struct file_lock *fl;
- struct file_lock *new_fl, *new_fl2;
+ struct file_lock *new_fl = NULL;
+ struct file_lock *new_fl2 = NULL;
struct file_lock *left = NULL;
struct file_lock *right = NULL;
struct file_lock **before;
@@ -803,9 +802,15 @@ static int __posix_lock_file_conf(struct inode *inode, struct file_lock *request
/*
* We may need two file_lock structures for this operation,
* so we get them in advance to avoid races.
+ *
+ * In some cases we can be sure, that no new locks will be needed
*/
- new_fl = locks_alloc_lock();
- new_fl2 = locks_alloc_lock();
+ if (!(request->fl_flags & FL_ACCESS) &&
+ (request->fl_type != F_UNLCK ||
+ request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
+ new_fl = locks_alloc_lock();
+ new_fl2 = locks_alloc_lock();
+ }
lock_kernel();
if (request->fl_type != F_UNLCK) {
@@ -834,14 +839,7 @@ static int __posix_lock_file_conf(struct inode *inode, struct file_lock *request
if (request->fl_flags & FL_ACCESS)
goto out;
- error = -ENOLCK; /* "no luck" */
- if (!(new_fl && new_fl2))
- goto out;
-
/*
- * We've allocated the new locks in advance, so there are no
- * errors possible (and no blocking operations) from here on.
- *
* Find the first old lock with the same owner as the new lock.
*/
@@ -938,10 +936,25 @@ static int __posix_lock_file_conf(struct inode *inode, struct file_lock *request
before = &fl->fl_next;
}
+ /*
+ * The above code only modifies existing locks in case of
+ * merging or replacing. If new lock(s) need to be inserted
+ * all modifications are done bellow this, so it's safe yet to
+ * bail out.
+ */
+ error = -ENOLCK; /* "no luck" */
+ if (right && left == right && !new_fl2)
+ goto out;
+
error = 0;
if (!added) {
if (request->fl_type == F_UNLCK)
goto out;
+
+ if (!new_fl) {
+ error = -ENOLCK;
+ goto out;
+ }
locks_copy_lock(new_fl, request);
locks_insert_lock(before, new_fl);
new_fl = NULL;
@@ -1881,19 +1894,18 @@ out:
*/
void locks_remove_posix(struct file *filp, fl_owner_t owner)
{
- struct file_lock lock, **before;
+ struct file_lock lock;
/*
* If there are no locks held on this file, we don't need to call
* posix_lock_file(). Another process could be setting a lock on this
* file at the same time, but we wouldn't remove that lock anyway.
*/
- before = &filp->f_dentry->d_inode->i_flock;
- if (*before == NULL)
+ if (!filp->f_dentry->d_inode->i_flock)
return;
lock.fl_type = F_UNLCK;
- lock.fl_flags = FL_POSIX;
+ lock.fl_flags = FL_POSIX | FL_CLOSE;
lock.fl_start = 0;
lock.fl_end = OFFSET_MAX;
lock.fl_owner = owner;
@@ -1902,25 +1914,11 @@ void locks_remove_posix(struct file *filp, fl_owner_t owner)
lock.fl_ops = NULL;
lock.fl_lmops = NULL;
- if (filp->f_op && filp->f_op->lock != NULL) {
+ if (filp->f_op && filp->f_op->lock != NULL)
filp->f_op->lock(filp, F_SETLK, &lock);
- goto out;
- }
+ else
+ posix_lock_file(filp, &lock);
- /* Can't use posix_lock_file here; we need to remove it no matter
- * which pid we have.
- */
- lock_kernel();
- while (*before != NULL) {
- struct file_lock *fl = *before;
- if (IS_POSIX(fl) && posix_same_owner(fl, &lock)) {
- locks_delete_lock(before);
- continue;
- }
- before = &fl->fl_next;
- }
- unlock_kernel();
-out:
if (lock.fl_ops && lock.fl_ops->fl_release_private)
lock.fl_ops->fl_release_private(&lock);
}
diff --git a/fs/minix/dir.c b/fs/minix/dir.c
index 69224d1fe04..2b0a389d198 100644
--- a/fs/minix/dir.c
+++ b/fs/minix/dir.c
@@ -60,8 +60,7 @@ static int dir_commit_chunk(struct page *page, unsigned from, unsigned to)
static struct page * dir_get_page(struct inode *dir, unsigned long n)
{
struct address_space *mapping = dir->i_mapping;
- struct page *page = read_cache_page(mapping, n,
- (filler_t*)mapping->a_ops->readpage, NULL);
+ struct page *page = read_mapping_page(mapping, n, NULL);
if (!IS_ERR(page)) {
wait_on_page_locked(page);
kmap(page);
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index 2dcccf1d1b7..a6fb509b734 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -19,7 +19,7 @@
static void minix_read_inode(struct inode * inode);
static int minix_write_inode(struct inode * inode, int wait);
-static int minix_statfs(struct super_block *sb, struct kstatfs *buf);
+static int minix_statfs(struct dentry *dentry, struct kstatfs *buf);
static int minix_remount (struct super_block * sb, int * flags, char * data);
static void minix_delete_inode(struct inode *inode)
@@ -296,11 +296,11 @@ out_bad_sb:
return -EINVAL;
}
-static int minix_statfs(struct super_block *sb, struct kstatfs *buf)
+static int minix_statfs(struct dentry *dentry, struct kstatfs *buf)
{
- struct minix_sb_info *sbi = minix_sb(sb);
- buf->f_type = sb->s_magic;
- buf->f_bsize = sb->s_blocksize;
+ struct minix_sb_info *sbi = minix_sb(dentry->d_sb);
+ buf->f_type = dentry->d_sb->s_magic;
+ buf->f_bsize = dentry->d_sb->s_blocksize;
buf->f_blocks = (sbi->s_nzones - sbi->s_firstdatazone) << sbi->s_log_zone_size;
buf->f_bfree = minix_count_free_blocks(sbi);
buf->f_bavail = buf->f_bfree;
@@ -559,10 +559,11 @@ void minix_truncate(struct inode * inode)
V2_minix_truncate(inode);
}
-static struct super_block *minix_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int minix_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, minix_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, minix_fill_super,
+ mnt);
}
static struct file_system_type minix_fs_type = {
diff --git a/fs/mpage.c b/fs/mpage.c
index 9bf2eb30e6f..1e4598247d0 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -707,9 +707,9 @@ mpage_writepages(struct address_space *mapping,
struct pagevec pvec;
int nr_pages;
pgoff_t index;
- pgoff_t end = -1; /* Inclusive */
+ pgoff_t end; /* Inclusive */
int scanned = 0;
- int is_range = 0;
+ int range_whole = 0;
if (wbc->nonblocking && bdi_write_congested(bdi)) {
wbc->encountered_congestion = 1;
@@ -721,16 +721,14 @@ mpage_writepages(struct address_space *mapping,
writepage = mapping->a_ops->writepage;
pagevec_init(&pvec, 0);
- if (wbc->sync_mode == WB_SYNC_NONE) {
+ if (wbc->range_cyclic) {
index = mapping->writeback_index; /* Start from prev offset */
+ end = -1;
} else {
- index = 0; /* whole-file sweep */
- scanned = 1;
- }
- if (wbc->start || wbc->end) {
- index = wbc->start >> PAGE_CACHE_SHIFT;
- end = wbc->end >> PAGE_CACHE_SHIFT;
- is_range = 1;
+ index = wbc->range_start >> PAGE_CACHE_SHIFT;
+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
+ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
+ range_whole = 1;
scanned = 1;
}
retry:
@@ -759,7 +757,7 @@ retry:
continue;
}
- if (unlikely(is_range) && page->index > end) {
+ if (!wbc->range_cyclic && page->index > end) {
done = 1;
unlock_page(page);
continue;
@@ -810,7 +808,7 @@ retry:
index = 0;
goto retry;
}
- if (!is_range)
+ if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
mapping->writeback_index = index;
if (bio)
mpage_bio_submit(WRITE, bio);
diff --git a/fs/msdos/namei.c b/fs/msdos/namei.c
index 5b76ccd19e3..9e44158a754 100644
--- a/fs/msdos/namei.c
+++ b/fs/msdos/namei.c
@@ -661,11 +661,12 @@ static int msdos_fill_super(struct super_block *sb, void *data, int silent)
return 0;
}
-static struct super_block *msdos_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data)
+static int msdos_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name,
+ void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, msdos_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, msdos_fill_super,
+ mnt);
}
static struct file_system_type msdos_fs_type = {
diff --git a/fs/namei.c b/fs/namei.c
index 184fe4acf82..bb4a3e40e43 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2577,8 +2577,7 @@ static char *page_getlink(struct dentry * dentry, struct page **ppage)
{
struct page * page;
struct address_space *mapping = dentry->d_inode->i_mapping;
- page = read_cache_page(mapping, 0, (filler_t *)mapping->a_ops->readpage,
- NULL);
+ page = read_mapping_page(mapping, 0, NULL);
if (IS_ERR(page))
goto sync_fail;
wait_on_page_locked(page);
diff --git a/fs/namespace.c b/fs/namespace.c
index bf478addb85..c13072a5f1e 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -86,6 +86,15 @@ struct vfsmount *alloc_vfsmnt(const char *name)
return mnt;
}
+int simple_set_mnt(struct vfsmount *mnt, struct super_block *sb)
+{
+ mnt->mnt_sb = sb;
+ mnt->mnt_root = dget(sb->s_root);
+ return 0;
+}
+
+EXPORT_SYMBOL(simple_set_mnt);
+
void free_vfsmnt(struct vfsmount *mnt)
{
kfree(mnt->mnt_devname);
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index a1f3e972c6e..90d2ea28f33 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -39,7 +39,7 @@
static void ncp_delete_inode(struct inode *);
static void ncp_put_super(struct super_block *);
-static int ncp_statfs(struct super_block *, struct kstatfs *);
+static int ncp_statfs(struct dentry *, struct kstatfs *);
static kmem_cache_t * ncp_inode_cachep;
@@ -724,13 +724,14 @@ static void ncp_put_super(struct super_block *sb)
kfree(server);
}
-static int ncp_statfs(struct super_block *sb, struct kstatfs *buf)
+static int ncp_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct dentry* d;
struct inode* i;
struct ncp_inode_info* ni;
struct ncp_server* s;
struct ncp_volume_info vi;
+ struct super_block *sb = dentry->d_sb;
int err;
__u8 dh;
@@ -957,10 +958,10 @@ out:
return result;
}
-static struct super_block *ncp_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int ncp_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_nodev(fs_type, flags, data, ncp_fill_super);
+ return get_sb_nodev(fs_type, flags, data, ncp_fill_super, mnt);
}
static struct file_system_type ncp_fs_type = {
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index fade02c15e6..fa05c027ea1 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -43,7 +43,7 @@ static int nfs_file_mmap(struct file *, struct vm_area_struct *);
static ssize_t nfs_file_sendfile(struct file *, loff_t *, size_t, read_actor_t, void *);
static ssize_t nfs_file_read(struct kiocb *, char __user *, size_t, loff_t);
static ssize_t nfs_file_write(struct kiocb *, const char __user *, size_t, loff_t);
-static int nfs_file_flush(struct file *);
+static int nfs_file_flush(struct file *, fl_owner_t id);
static int nfs_fsync(struct file *, struct dentry *dentry, int datasync);
static int nfs_check_flags(int flags);
static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl);
@@ -188,7 +188,7 @@ static loff_t nfs_file_llseek(struct file *filp, loff_t offset, int origin)
*
*/
static int
-nfs_file_flush(struct file *file)
+nfs_file_flush(struct file *file, fl_owner_t id)
{
struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
struct inode *inode = file->f_dentry->d_inode;
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index d0b991a9232..937fbfc381b 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -65,7 +65,7 @@ static int nfs_write_inode(struct inode *,int);
static void nfs_delete_inode(struct inode *);
static void nfs_clear_inode(struct inode *);
static void nfs_umount_begin(struct super_block *);
-static int nfs_statfs(struct super_block *, struct kstatfs *);
+static int nfs_statfs(struct dentry *, struct kstatfs *);
static int nfs_show_options(struct seq_file *, struct vfsmount *);
static int nfs_show_stats(struct seq_file *, struct vfsmount *);
static void nfs_zap_acl_cache(struct inode *);
@@ -534,8 +534,9 @@ nfs_fill_super(struct super_block *sb, struct nfs_mount_data *data, int silent)
}
static int
-nfs_statfs(struct super_block *sb, struct kstatfs *buf)
+nfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
+ struct super_block *sb = dentry->d_sb;
struct nfs_server *server = NFS_SB(sb);
unsigned char blockbits;
unsigned long blockres;
@@ -1690,8 +1691,8 @@ static int nfs_compare_super(struct super_block *sb, void *data)
return !nfs_compare_fh(&old->fh, &server->fh);
}
-static struct super_block *nfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *raw_data)
+static int nfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt)
{
int error;
struct nfs_server *server = NULL;
@@ -1699,14 +1700,14 @@ static struct super_block *nfs_get_sb(struct file_system_type *fs_type,
struct nfs_fh *root;
struct nfs_mount_data *data = raw_data;
- s = ERR_PTR(-EINVAL);
+ error = -EINVAL;
if (data == NULL) {
dprintk("%s: missing data argument\n", __FUNCTION__);
- goto out_err;
+ goto out_err_noserver;
}
if (data->version <= 0 || data->version > NFS_MOUNT_VERSION) {
dprintk("%s: bad mount version\n", __FUNCTION__);
- goto out_err;
+ goto out_err_noserver;
}
switch (data->version) {
case 1:
@@ -1718,7 +1719,7 @@ static struct super_block *nfs_get_sb(struct file_system_type *fs_type,
dprintk("%s: mount structure version %d does not support NFSv3\n",
__FUNCTION__,
data->version);
- goto out_err;
+ goto out_err_noserver;
}
data->root.size = NFS2_FHSIZE;
memcpy(data->root.data, data->old_root.data, NFS2_FHSIZE);
@@ -1727,24 +1728,24 @@ static struct super_block *nfs_get_sb(struct file_system_type *fs_type,
dprintk("%s: mount structure version %d does not support strong security\n",
__FUNCTION__,
data->version);
- goto out_err;
+ goto out_err_noserver;
}
case 5:
memset(data->context, 0, sizeof(data->context));
}
#ifndef CONFIG_NFS_V3
/* If NFSv3 is not compiled in, return -EPROTONOSUPPORT */
- s = ERR_PTR(-EPROTONOSUPPORT);
+ error = -EPROTONOSUPPORT;
if (data->flags & NFS_MOUNT_VER3) {
dprintk("%s: NFSv3 not compiled into kernel\n", __FUNCTION__);
- goto out_err;
+ goto out_err_noserver;
}
#endif /* CONFIG_NFS_V3 */
- s = ERR_PTR(-ENOMEM);
+ error = -ENOMEM;
server = kzalloc(sizeof(struct nfs_server), GFP_KERNEL);
if (!server)
- goto out_err;
+ goto out_err_noserver;
/* Zero out the NFS state stuff */
init_nfsv4_state(server);
server->client = server->client_sys = server->client_acl = ERR_PTR(-EINVAL);
@@ -1754,7 +1755,7 @@ static struct super_block *nfs_get_sb(struct file_system_type *fs_type,
root->size = data->root.size;
else
root->size = NFS2_FHSIZE;
- s = ERR_PTR(-EINVAL);
+ error = -EINVAL;
if (root->size > sizeof(root->data)) {
dprintk("%s: invalid root filehandle\n", __FUNCTION__);
goto out_err;
@@ -1770,15 +1771,20 @@ static struct super_block *nfs_get_sb(struct file_system_type *fs_type,
}
/* Fire up rpciod if not yet running */
- s = ERR_PTR(rpciod_up());
- if (IS_ERR(s)) {
- dprintk("%s: couldn't start rpciod! Error = %ld\n",
- __FUNCTION__, PTR_ERR(s));
+ error = rpciod_up();
+ if (error < 0) {
+ dprintk("%s: couldn't start rpciod! Error = %d\n",
+ __FUNCTION__, error);
goto out_err;
}
s = sget(fs_type, nfs_compare_super, nfs_set_super, server);
- if (IS_ERR(s) || s->s_root)
+ if (IS_ERR(s)) {
+ error = PTR_ERR(s);
+ goto out_err_rpciod;
+ }
+
+ if (s->s_root)
goto out_rpciod_down;
s->s_flags = flags;
@@ -1787,15 +1793,22 @@ static struct super_block *nfs_get_sb(struct file_system_type *fs_type,
if (error) {
up_write(&s->s_umount);
deactivate_super(s);
- return ERR_PTR(error);
+ return error;
}
s->s_flags |= MS_ACTIVE;
- return s;
+ return simple_set_mnt(mnt, s);
+
out_rpciod_down:
rpciod_down();
+ kfree(server);
+ return simple_set_mnt(mnt, s);
+
+out_err_rpciod:
+ rpciod_down();
out_err:
kfree(server);
- return s;
+out_err_noserver:
+ return error;
}
static void nfs_kill_super(struct super_block *s)
@@ -2032,8 +2045,8 @@ nfs_copy_user_string(char *dst, struct nfs_string *src, int maxlen)
return dst;
}
-static struct super_block *nfs4_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *raw_data)
+static int nfs4_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt)
{
int error;
struct nfs_server *server;
@@ -2043,16 +2056,16 @@ static struct super_block *nfs4_get_sb(struct file_system_type *fs_type,
if (data == NULL) {
dprintk("%s: missing data argument\n", __FUNCTION__);
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
if (data->version <= 0 || data->version > NFS4_MOUNT_VERSION) {
dprintk("%s: bad mount version\n", __FUNCTION__);
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
server = kzalloc(sizeof(struct nfs_server), GFP_KERNEL);
if (!server)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
/* Zero out the NFS state stuff */
init_nfsv4_state(server);
server->client = server->client_sys = server->client_acl = ERR_PTR(-EINVAL);
@@ -2074,33 +2087,41 @@ static struct super_block *nfs4_get_sb(struct file_system_type *fs_type,
/* We now require that the mount process passes the remote address */
if (data->host_addrlen != sizeof(server->addr)) {
- s = ERR_PTR(-EINVAL);
+ error = -EINVAL;
goto out_free;
}
if (copy_from_user(&server->addr, data->host_addr, sizeof(server->addr))) {
- s = ERR_PTR(-EFAULT);
+ error = -EFAULT;
goto out_free;
}
if (server->addr.sin_family != AF_INET ||
server->addr.sin_addr.s_addr == INADDR_ANY) {
dprintk("%s: mount program didn't pass remote IP address!\n",
__FUNCTION__);
- s = ERR_PTR(-EINVAL);
+ error = -EINVAL;
goto out_free;
}
/* Fire up rpciod if not yet running */
- s = ERR_PTR(rpciod_up());
- if (IS_ERR(s)) {
- dprintk("%s: couldn't start rpciod! Error = %ld\n",
- __FUNCTION__, PTR_ERR(s));
+ error = rpciod_up();
+ if (error < 0) {
+ dprintk("%s: couldn't start rpciod! Error = %d\n",
+ __FUNCTION__, error);
goto out_free;
}
s = sget(fs_type, nfs4_compare_super, nfs_set_super, server);
-
- if (IS_ERR(s) || s->s_root)
+ if (IS_ERR(s)) {
+ error = PTR_ERR(s);
goto out_free;
+ }
+
+ if (s->s_root) {
+ kfree(server->mnt_path);
+ kfree(server->hostname);
+ kfree(server);
+ return simple_set_mnt(mnt, s);
+ }
s->s_flags = flags;
@@ -2108,17 +2129,17 @@ static struct super_block *nfs4_get_sb(struct file_system_type *fs_type,
if (error) {
up_write(&s->s_umount);
deactivate_super(s);
- return ERR_PTR(error);
+ return error;
}
s->s_flags |= MS_ACTIVE;
- return s;
+ return simple_set_mnt(mnt, s);
out_err:
- s = (struct super_block *)p;
+ error = PTR_ERR(p);
out_free:
kfree(server->mnt_path);
kfree(server->hostname);
kfree(server);
- return s;
+ return error;
}
static void nfs4_kill_super(struct super_block *sb)
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index de3998f15f1..5446a0861d1 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1310,7 +1310,7 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
if ((bmval0 & (FATTR4_WORD0_FILES_FREE | FATTR4_WORD0_FILES_TOTAL)) ||
(bmval1 & (FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE |
FATTR4_WORD1_SPACE_TOTAL))) {
- status = vfs_statfs(dentry->d_inode->i_sb, &statfs);
+ status = vfs_statfs(dentry, &statfs);
if (status)
goto out_nfserr;
}
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 3ef017b3b5b..a1810e6a93e 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -494,10 +494,10 @@ static int nfsd_fill_super(struct super_block * sb, void * data, int silent)
return simple_fill_super(sb, 0x6e667364, nfsd_files);
}
-static struct super_block *nfsd_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int nfsd_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_single(fs_type, flags, data, nfsd_fill_super);
+ return get_sb_single(fs_type, flags, data, nfsd_fill_super, mnt);
}
static struct file_system_type nfsd_fs_type = {
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 1d65f13f458..245eaa1fb59 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -1737,7 +1737,7 @@ int
nfsd_statfs(struct svc_rqst *rqstp, struct svc_fh *fhp, struct kstatfs *stat)
{
int err = fh_verify(rqstp, fhp, 0, MAY_NOP);
- if (!err && vfs_statfs(fhp->fh_dentry->d_inode->i_sb,stat))
+ if (!err && vfs_statfs(fhp->fh_dentry,stat))
err = nfserr_io;
return err;
}
diff --git a/fs/ntfs/aops.h b/fs/ntfs/aops.h
index 3b74e66ca2f..325ce261a10 100644
--- a/fs/ntfs/aops.h
+++ b/fs/ntfs/aops.h
@@ -86,8 +86,7 @@ static inline void ntfs_unmap_page(struct page *page)
static inline struct page *ntfs_map_page(struct address_space *mapping,
unsigned long index)
{
- struct page *page = read_cache_page(mapping, index,
- (filler_t*)mapping->a_ops->readpage, NULL);
+ struct page *page = read_mapping_page(mapping, index, NULL);
if (!IS_ERR(page)) {
wait_on_page_locked(page);
diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c
index 1663f5c3c6a..6708e1d68a9 100644
--- a/fs/ntfs/attrib.c
+++ b/fs/ntfs/attrib.c
@@ -2529,8 +2529,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
end >>= PAGE_CACHE_SHIFT;
/* If there is a first partial page, need to do it the slow way. */
if (start_ofs) {
- page = read_cache_page(mapping, idx,
- (filler_t*)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, idx, NULL);
if (IS_ERR(page)) {
ntfs_error(vol->sb, "Failed to read first partial "
"page (sync error, index 0x%lx).", idx);
@@ -2600,8 +2599,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
}
/* If there is a last partial page, need to do it the slow way. */
if (end_ofs) {
- page = read_cache_page(mapping, idx,
- (filler_t*)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, idx, NULL);
if (IS_ERR(page)) {
ntfs_error(vol->sb, "Failed to read last partial page "
"(sync error, index 0x%lx).", idx);
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index 36e1e136bb0..88292f9e4b9 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -231,8 +231,7 @@ do_non_resident_extend:
* Read the page. If the page is not present, this will zero
* the uninitialized regions for us.
*/
- page = read_cache_page(mapping, index,
- (filler_t*)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, index, NULL);
if (IS_ERR(page)) {
err = PTR_ERR(page);
goto init_err_out;
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 27833f6df49..0e14acea3f8 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -2601,10 +2601,10 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
/**
* ntfs_statfs - return information about mounted NTFS volume
- * @sb: super block of mounted volume
+ * @dentry: dentry from mounted volume
* @sfs: statfs structure in which to return the information
*
- * Return information about the mounted NTFS volume @sb in the statfs structure
+ * Return information about the mounted NTFS volume @dentry in the statfs structure
* pointed to by @sfs (this is initialized with zeros before ntfs_statfs is
* called). We interpret the values to be correct of the moment in time at
* which we are called. Most values are variable otherwise and this isn't just
@@ -2617,8 +2617,9 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
*
* Return 0 on success or -errno on error.
*/
-static int ntfs_statfs(struct super_block *sb, struct kstatfs *sfs)
+static int ntfs_statfs(struct dentry *dentry, struct kstatfs *sfs)
{
+ struct super_block *sb = dentry->d_sb;
s64 size;
ntfs_volume *vol = NTFS_SB(sb);
ntfs_inode *mft_ni = NTFS_I(vol->mft_ino);
@@ -3093,10 +3094,11 @@ struct kmem_cache *ntfs_index_ctx_cache;
/* Driver wide mutex. */
DEFINE_MUTEX(ntfs_lock);
-static struct super_block *ntfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int ntfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, ntfs_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, ntfs_fill_super,
+ mnt);
}
static struct file_system_type ntfs_fs_type = {
diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlm/dlmfs.c
index 7e88e24b347..7273d9fa6ba 100644
--- a/fs/ocfs2/dlm/dlmfs.c
+++ b/fs/ocfs2/dlm/dlmfs.c
@@ -574,10 +574,10 @@ static struct inode_operations dlmfs_file_inode_operations = {
.getattr = simple_getattr,
};
-static struct super_block *dlmfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int dlmfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_nodev(fs_type, flags, data, dlmfs_fill_super);
+ return get_sb_nodev(fs_type, flags, data, dlmfs_fill_super, mnt);
}
static struct file_system_type dlmfs_fs_type = {
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 949b3dac30f..cdf73393f09 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -100,7 +100,7 @@ static int ocfs2_initialize_mem_caches(void);
static void ocfs2_free_mem_caches(void);
static void ocfs2_delete_osb(struct ocfs2_super *osb);
-static int ocfs2_statfs(struct super_block *sb, struct kstatfs *buf);
+static int ocfs2_statfs(struct dentry *dentry, struct kstatfs *buf);
static int ocfs2_sync_fs(struct super_block *sb, int wait);
@@ -672,12 +672,14 @@ read_super_error:
return status;
}
-static struct super_block *ocfs2_get_sb(struct file_system_type *fs_type,
- int flags,
- const char *dev_name,
- void *data)
+static int ocfs2_get_sb(struct file_system_type *fs_type,
+ int flags,
+ const char *dev_name,
+ void *data,
+ struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, ocfs2_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, ocfs2_fill_super,
+ mnt);
}
static struct file_system_type ocfs2_fs_type = {
@@ -855,7 +857,7 @@ static void ocfs2_put_super(struct super_block *sb)
mlog_exit_void();
}
-static int ocfs2_statfs(struct super_block *sb, struct kstatfs *buf)
+static int ocfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct ocfs2_super *osb;
u32 numbits, freebits;
@@ -864,9 +866,9 @@ static int ocfs2_statfs(struct super_block *sb, struct kstatfs *buf)
struct buffer_head *bh = NULL;
struct inode *inode = NULL;
- mlog_entry("(%p, %p)\n", sb, buf);
+ mlog_entry("(%p, %p)\n", dentry->d_sb, buf);
- osb = OCFS2_SB(sb);
+ osb = OCFS2_SB(dentry->d_sb);
inode = ocfs2_get_system_file_inode(osb,
GLOBAL_BITMAP_SYSTEM_INODE,
@@ -889,7 +891,7 @@ static int ocfs2_statfs(struct super_block *sb, struct kstatfs *buf)
freebits = numbits - le32_to_cpu(bm_lock->id1.bitmap1.i_used);
buf->f_type = OCFS2_SUPER_MAGIC;
- buf->f_bsize = sb->s_blocksize;
+ buf->f_bsize = dentry->d_sb->s_blocksize;
buf->f_namelen = OCFS2_MAX_FILENAME_LEN;
buf->f_blocks = ((sector_t) numbits) *
(osb->s_clustersize >> osb->sb->s_blocksize_bits);
diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
index f6986bd79e7..0c8a1294ec9 100644
--- a/fs/ocfs2/symlink.c
+++ b/fs/ocfs2/symlink.c
@@ -64,8 +64,7 @@ static char *ocfs2_page_getlink(struct dentry * dentry,
{
struct page * page;
struct address_space *mapping = dentry->d_inode->i_mapping;
- page = read_cache_page(mapping, 0,
- (filler_t *)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, 0, NULL);
if (IS_ERR(page))
goto sync_fail;
wait_on_page_locked(page);
diff --git a/fs/open.c b/fs/open.c
index 4f178acd4c0..5fb16e5267d 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -31,18 +31,18 @@
#include <asm/unistd.h>
-int vfs_statfs(struct super_block *sb, struct kstatfs *buf)
+int vfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
int retval = -ENODEV;
- if (sb) {
+ if (dentry) {
retval = -ENOSYS;
- if (sb->s_op->statfs) {
+ if (dentry->d_sb->s_op->statfs) {
memset(buf, 0, sizeof(*buf));
- retval = security_sb_statfs(sb);
+ retval = security_sb_statfs(dentry);
if (retval)
return retval;
- retval = sb->s_op->statfs(sb, buf);
+ retval = dentry->d_sb->s_op->statfs(dentry, buf);
if (retval == 0 && buf->f_frsize == 0)
buf->f_frsize = buf->f_bsize;
}
@@ -52,12 +52,12 @@ int vfs_statfs(struct super_block *sb, struct kstatfs *buf)
EXPORT_SYMBOL(vfs_statfs);
-static int vfs_statfs_native(struct super_block *sb, struct statfs *buf)
+static int vfs_statfs_native(struct dentry *dentry, struct statfs *buf)
{
struct kstatfs st;
int retval;
- retval = vfs_statfs(sb, &st);
+ retval = vfs_statfs(dentry, &st);
if (retval)
return retval;
@@ -95,12 +95,12 @@ static int vfs_statfs_native(struct super_block *sb, struct statfs *buf)
return 0;
}
-static int vfs_statfs64(struct super_block *sb, struct statfs64 *buf)
+static int vfs_statfs64(struct dentry *dentry, struct statfs64 *buf)
{
struct kstatfs st;
int retval;
- retval = vfs_statfs(sb, &st);
+ retval = vfs_statfs(dentry, &st);
if (retval)
return retval;
@@ -130,7 +130,7 @@ asmlinkage long sys_statfs(const char __user * path, struct statfs __user * buf)
error = user_path_walk(path, &nd);
if (!error) {
struct statfs tmp;
- error = vfs_statfs_native(nd.dentry->d_inode->i_sb, &tmp);
+ error = vfs_statfs_native(nd.dentry, &tmp);
if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
error = -EFAULT;
path_release(&nd);
@@ -149,7 +149,7 @@ asmlinkage long sys_statfs64(const char __user *path, size_t sz, struct statfs64
error = user_path_walk(path, &nd);
if (!error) {
struct statfs64 tmp;
- error = vfs_statfs64(nd.dentry->d_inode->i_sb, &tmp);
+ error = vfs_statfs64(nd.dentry, &tmp);
if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
error = -EFAULT;
path_release(&nd);
@@ -168,7 +168,7 @@ asmlinkage long sys_fstatfs(unsigned int fd, struct statfs __user * buf)
file = fget(fd);
if (!file)
goto out;
- error = vfs_statfs_native(file->f_dentry->d_inode->i_sb, &tmp);
+ error = vfs_statfs_native(file->f_dentry, &tmp);
if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
error = -EFAULT;
fput(file);
@@ -189,7 +189,7 @@ asmlinkage long sys_fstatfs64(unsigned int fd, size_t sz, struct statfs64 __user
file = fget(fd);
if (!file)
goto out;
- error = vfs_statfs64(file->f_dentry->d_inode->i_sb, &tmp);
+ error = vfs_statfs64(file->f_dentry, &tmp);
if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
error = -EFAULT;
fput(file);
@@ -1152,7 +1152,7 @@ int filp_close(struct file *filp, fl_owner_t id)
}
if (filp->f_op && filp->f_op->flush)
- retval = filp->f_op->flush(filp);
+ retval = filp->f_op->flush(filp, id);
dnotify_flush(filp, id);
locks_remove_posix(filp, id);
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
index 0f14276a2e5..464e2bce020 100644
--- a/fs/openpromfs/inode.c
+++ b/fs/openpromfs/inode.c
@@ -1054,10 +1054,10 @@ out_no_root:
return -ENOMEM;
}
-static struct super_block *openprom_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int openprom_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_single(fs_type, flags, data, openprom_fill_super);
+ return get_sb_single(fs_type, flags, data, openprom_fill_super, mnt);
}
static struct file_system_type openprom_fs_type = {
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 8851b81e7c5..2ef313a96b6 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -484,6 +484,10 @@ int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
sector_t from = state->parts[p].from;
if (!size)
continue;
+ if (from + size > get_capacity(disk)) {
+ printk(" %s: p%d exceeds device capacity\n",
+ disk->disk_name, p);
+ }
add_partition(disk, p, from, size);
#ifdef CONFIG_BLK_DEV_MD
if (state->parts[p].flags)
@@ -499,8 +503,8 @@ unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
struct address_space *mapping = bdev->bd_inode->i_mapping;
struct page *page;
- page = read_cache_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)),
- (filler_t *)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)),
+ NULL);
if (!IS_ERR(page)) {
wait_on_page_locked(page);
if (!PageUptodate(page))
diff --git a/fs/pipe.c b/fs/pipe.c
index 5acd8954aaa..20352573e02 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -979,12 +979,11 @@ no_files:
* any operations on the root directory. However, we need a non-trivial
* d_name - pipe: will go nicely and kill the special-casing in procfs.
*/
-
-static struct super_block *
-pipefs_get_sb(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
+static int pipefs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data,
+ struct vfsmount *mnt)
{
- return get_sb_pseudo(fs_type, "pipe:", NULL, PIPEFS_MAGIC);
+ return get_sb_pseudo(fs_type, "pipe:", NULL, PIPEFS_MAGIC, mnt);
}
static struct file_system_type pipe_fs_type = {
diff --git a/fs/proc/root.c b/fs/proc/root.c
index c3fd3611112..9995356ce73 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -26,10 +26,10 @@ struct proc_dir_entry *proc_net, *proc_net_stat, *proc_bus, *proc_root_fs, *proc
struct proc_dir_entry *proc_sys_root;
#endif
-static struct super_block *proc_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int proc_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_single(fs_type, flags, data, proc_fill_super);
+ return get_sb_single(fs_type, flags, data, proc_fill_super, mnt);
}
static struct file_system_type proc_fs_type = {
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index 2ecd46f85e9..2f24c46f72a 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -128,7 +128,7 @@ static struct inode *qnx4_alloc_inode(struct super_block *sb);
static void qnx4_destroy_inode(struct inode *inode);
static void qnx4_read_inode(struct inode *);
static int qnx4_remount(struct super_block *sb, int *flags, char *data);
-static int qnx4_statfs(struct super_block *, struct kstatfs *);
+static int qnx4_statfs(struct dentry *, struct kstatfs *);
static struct super_operations qnx4_sops =
{
@@ -282,8 +282,10 @@ unsigned long qnx4_block_map( struct inode *inode, long iblock )
return block;
}
-static int qnx4_statfs(struct super_block *sb, struct kstatfs *buf)
+static int qnx4_statfs(struct dentry *dentry, struct kstatfs *buf)
{
+ struct super_block *sb = dentry->d_sb;
+
lock_kernel();
buf->f_type = sb->s_magic;
@@ -561,10 +563,11 @@ static void destroy_inodecache(void)
"qnx4_inode_cache: not all structures were freed\n");
}
-static struct super_block *qnx4_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int qnx4_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, qnx4_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, qnx4_fill_super,
+ mnt);
}
static struct file_system_type qnx4_fs_type = {
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index 14bd2246fb6..b9677335cc8 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -185,16 +185,17 @@ static int ramfs_fill_super(struct super_block * sb, void * data, int silent)
return 0;
}
-struct super_block *ramfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+int ramfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_nodev(fs_type, flags, data, ramfs_fill_super);
+ return get_sb_nodev(fs_type, flags, data, ramfs_fill_super, mnt);
}
-static struct super_block *rootfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int rootfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_nodev(fs_type, flags|MS_NOUSER, data, ramfs_fill_super);
+ return get_sb_nodev(fs_type, flags|MS_NOUSER, data, ramfs_fill_super,
+ mnt);
}
static struct file_system_type ramfs_fs_type = {
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index cae2abbc0c7..00f1321e920 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -60,7 +60,7 @@ static int is_any_reiserfs_magic_string(struct reiserfs_super_block *rs)
}
static int reiserfs_remount(struct super_block *s, int *flags, char *data);
-static int reiserfs_statfs(struct super_block *s, struct kstatfs *buf);
+static int reiserfs_statfs(struct dentry *dentry, struct kstatfs *buf);
static int reiserfs_sync_fs(struct super_block *s, int wait)
{
@@ -1938,15 +1938,15 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
return errval;
}
-static int reiserfs_statfs(struct super_block *s, struct kstatfs *buf)
+static int reiserfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
- struct reiserfs_super_block *rs = SB_DISK_SUPER_BLOCK(s);
+ struct reiserfs_super_block *rs = SB_DISK_SUPER_BLOCK(dentry->d_sb);
buf->f_namelen = (REISERFS_MAX_NAME(s->s_blocksize));
buf->f_bfree = sb_free_blocks(rs);
buf->f_bavail = buf->f_bfree;
buf->f_blocks = sb_block_count(rs) - sb_bmap_nr(rs) - 1;
- buf->f_bsize = s->s_blocksize;
+ buf->f_bsize = dentry->d_sb->s_blocksize;
/* changed to accommodate gcc folks. */
buf->f_type = REISERFS_SUPER_MAGIC;
return 0;
@@ -2249,11 +2249,12 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
#endif
-static struct super_block *get_super_block(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data)
+static int get_super_block(struct file_system_type *fs_type,
+ int flags, const char *dev_name,
+ void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, reiserfs_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, reiserfs_fill_super,
+ mnt);
}
static int __init init_reiserfs_fs(void)
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index ffb79c48c5b..39fedaa88a0 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -452,8 +452,7 @@ static struct page *reiserfs_get_page(struct inode *dir, unsigned long n)
/* We can deadlock if we try to free dentries,
and an unlink/rmdir has just occured - GFP_NOFS avoids this */
mapping_set_gfp_mask(mapping, GFP_NOFS);
- page = read_cache_page(mapping, n,
- (filler_t *) mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, n, NULL);
if (!IS_ERR(page)) {
wait_on_page_locked(page);
kmap(page);
diff --git a/fs/romfs/inode.c b/fs/romfs/inode.c
index 9b9eda7b335..283fbc6b8ee 100644
--- a/fs/romfs/inode.c
+++ b/fs/romfs/inode.c
@@ -179,12 +179,12 @@ outnobh:
/* That's simple too. */
static int
-romfs_statfs(struct super_block *sb, struct kstatfs *buf)
+romfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
buf->f_type = ROMFS_MAGIC;
buf->f_bsize = ROMBSIZE;
buf->f_bfree = buf->f_bavail = buf->f_ffree;
- buf->f_blocks = (romfs_maxsize(sb)+ROMBSIZE-1)>>ROMBSBITS;
+ buf->f_blocks = (romfs_maxsize(dentry->d_sb)+ROMBSIZE-1)>>ROMBSBITS;
buf->f_namelen = ROMFS_MAXFN;
return 0;
}
@@ -607,10 +607,11 @@ static struct super_operations romfs_ops = {
.remount_fs = romfs_remount,
};
-static struct super_block *romfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int romfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, romfs_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, romfs_fill_super,
+ mnt);
}
static struct file_system_type romfs_fs_type = {
diff --git a/fs/select.c b/fs/select.c
index a8109baa5e4..9c4f0f2604f 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -546,37 +546,38 @@ struct poll_list {
#define POLLFD_PER_PAGE ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd))
-static void do_pollfd(unsigned int num, struct pollfd * fdpage,
- poll_table ** pwait, int *count)
+/*
+ * Fish for pollable events on the pollfd->fd file descriptor. We're only
+ * interested in events matching the pollfd->events mask, and the result
+ * matching that mask is both recorded in pollfd->revents and returned. The
+ * pwait poll_table will be used by the fd-provided poll handler for waiting,
+ * if non-NULL.
+ */
+static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait)
{
- int i;
-
- for (i = 0; i < num; i++) {
- int fd;
- unsigned int mask;
- struct pollfd *fdp;
-
- mask = 0;
- fdp = fdpage+i;
- fd = fdp->fd;
- if (fd >= 0) {
- int fput_needed;
- struct file * file = fget_light(fd, &fput_needed);
- mask = POLLNVAL;
- if (file != NULL) {
- mask = DEFAULT_POLLMASK;
- if (file->f_op && file->f_op->poll)
- mask = file->f_op->poll(file, *pwait);
- mask &= fdp->events | POLLERR | POLLHUP;
- fput_light(file, fput_needed);
- }
- if (mask) {
- *pwait = NULL;
- (*count)++;
- }
+ unsigned int mask;
+ int fd;
+
+ mask = 0;
+ fd = pollfd->fd;
+ if (fd >= 0) {
+ int fput_needed;
+ struct file * file;
+
+ file = fget_light(fd, &fput_needed);
+ mask = POLLNVAL;
+ if (file != NULL) {
+ mask = DEFAULT_POLLMASK;
+ if (file->f_op && file->f_op->poll)
+ mask = file->f_op->poll(file, pwait);
+ /* Mask out unneeded events. */
+ mask &= pollfd->events | POLLERR | POLLHUP;
+ fput_light(file, fput_needed);
}
- fdp->revents = mask;
}
+ pollfd->revents = mask;
+
+ return mask;
}
static int do_poll(unsigned int nfds, struct poll_list *list,
@@ -594,11 +595,29 @@ static int do_poll(unsigned int nfds, struct poll_list *list,
long __timeout;
set_current_state(TASK_INTERRUPTIBLE);
- walk = list;
- while(walk != NULL) {
- do_pollfd( walk->len, walk->entries, &pt, &count);
- walk = walk->next;
+ for (walk = list; walk != NULL; walk = walk->next) {
+ struct pollfd * pfd, * pfd_end;
+
+ pfd = walk->entries;
+ pfd_end = pfd + walk->len;
+ for (; pfd != pfd_end; pfd++) {
+ /*
+ * Fish for events. If we found one, record it
+ * and kill the poll_table, so we don't
+ * needlessly register any other waiters after
+ * this. They'll get immediately deregistered
+ * when we break out and return.
+ */
+ if (do_pollfd(pfd, pt)) {
+ count++;
+ pt = NULL;
+ }
+ }
}
+ /*
+ * All waiters have already been registered, so don't provide
+ * a poll_table to them on the next loop iteration.
+ */
pt = NULL;
if (count || !*timeout || signal_pending(current))
break;
diff --git a/fs/smbfs/inode.c b/fs/smbfs/inode.c
index fdeabc0a34f..506ff87c1d4 100644
--- a/fs/smbfs/inode.c
+++ b/fs/smbfs/inode.c
@@ -48,7 +48,7 @@
static void smb_delete_inode(struct inode *);
static void smb_put_super(struct super_block *);
-static int smb_statfs(struct super_block *, struct kstatfs *);
+static int smb_statfs(struct dentry *, struct kstatfs *);
static int smb_show_options(struct seq_file *, struct vfsmount *);
static kmem_cache_t *smb_inode_cachep;
@@ -641,13 +641,13 @@ out_no_server:
}
static int
-smb_statfs(struct super_block *sb, struct kstatfs *buf)
+smb_statfs(struct dentry *dentry, struct kstatfs *buf)
{
int result;
lock_kernel();
- result = smb_proc_dskattr(sb, buf);
+ result = smb_proc_dskattr(dentry, buf);
unlock_kernel();
@@ -782,10 +782,10 @@ out:
return error;
}
-static struct super_block *smb_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int smb_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_nodev(fs_type, flags, data, smb_fill_super);
+ return get_sb_nodev(fs_type, flags, data, smb_fill_super, mnt);
}
static struct file_system_type smb_fs_type = {
diff --git a/fs/smbfs/proc.c b/fs/smbfs/proc.c
index b1b878b8173..c3495059889 100644
--- a/fs/smbfs/proc.c
+++ b/fs/smbfs/proc.c
@@ -3226,9 +3226,9 @@ smb_proc_settime(struct dentry *dentry, struct smb_fattr *fattr)
}
int
-smb_proc_dskattr(struct super_block *sb, struct kstatfs *attr)
+smb_proc_dskattr(struct dentry *dentry, struct kstatfs *attr)
{
- struct smb_sb_info *server = SMB_SB(sb);
+ struct smb_sb_info *server = SMB_SB(dentry->d_sb);
int result;
char *p;
long unit;
diff --git a/fs/smbfs/proto.h b/fs/smbfs/proto.h
index 47664597e6b..972ed7dad38 100644
--- a/fs/smbfs/proto.h
+++ b/fs/smbfs/proto.h
@@ -29,7 +29,7 @@ extern int smb_proc_getattr(struct dentry *dir, struct smb_fattr *fattr);
extern int smb_proc_setattr(struct dentry *dir, struct smb_fattr *fattr);
extern int smb_proc_setattr_unix(struct dentry *d, struct iattr *attr, unsigned int major, unsigned int minor);
extern int smb_proc_settime(struct dentry *dentry, struct smb_fattr *fattr);
-extern int smb_proc_dskattr(struct super_block *sb, struct kstatfs *attr);
+extern int smb_proc_dskattr(struct dentry *dentry, struct kstatfs *attr);
extern int smb_proc_read_link(struct smb_sb_info *server, struct dentry *d, char *buffer, int len);
extern int smb_proc_symlink(struct smb_sb_info *server, struct dentry *d, const char *oldpath);
extern int smb_proc_link(struct smb_sb_info *server, struct dentry *dentry, struct dentry *new_dentry);
diff --git a/fs/super.c b/fs/super.c
index 9d5c2add722..057b5325b7e 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -231,7 +231,7 @@ void generic_shutdown_super(struct super_block *sb)
if (root) {
sb->s_root = NULL;
shrink_dcache_parent(root);
- shrink_dcache_anon(sb);
+ shrink_dcache_sb(sb);
dput(root);
fsync_super(sb);
lock_super(sb);
@@ -486,7 +486,7 @@ asmlinkage long sys_ustat(unsigned dev, struct ustat __user * ubuf)
s = user_get_super(new_decode_dev(dev));
if (s == NULL)
goto out;
- err = vfs_statfs(s, &sbuf);
+ err = vfs_statfs(s->s_root, &sbuf);
drop_super(s);
if (err)
goto out;
@@ -676,9 +676,10 @@ static void bdev_uevent(struct block_device *bdev, enum kobject_action action)
}
}
-struct super_block *get_sb_bdev(struct file_system_type *fs_type,
+int get_sb_bdev(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data,
- int (*fill_super)(struct super_block *, void *, int))
+ int (*fill_super)(struct super_block *, void *, int),
+ struct vfsmount *mnt)
{
struct block_device *bdev;
struct super_block *s;
@@ -686,7 +687,7 @@ struct super_block *get_sb_bdev(struct file_system_type *fs_type,
bdev = open_bdev_excl(dev_name, flags, fs_type);
if (IS_ERR(bdev))
- return (struct super_block *)bdev;
+ return PTR_ERR(bdev);
/*
* once the super is inserted into the list by sget, s_umount
@@ -697,15 +698,17 @@ struct super_block *get_sb_bdev(struct file_system_type *fs_type,
s = sget(fs_type, test_bdev_super, set_bdev_super, bdev);
mutex_unlock(&bdev->bd_mount_mutex);
if (IS_ERR(s))
- goto out;
+ goto error_s;
if (s->s_root) {
if ((flags ^ s->s_flags) & MS_RDONLY) {
up_write(&s->s_umount);
deactivate_super(s);
- s = ERR_PTR(-EBUSY);
+ error = -EBUSY;
+ goto error_bdev;
}
- goto out;
+
+ close_bdev_excl(bdev);
} else {
char b[BDEVNAME_SIZE];
@@ -716,18 +719,21 @@ struct super_block *get_sb_bdev(struct file_system_type *fs_type,
if (error) {
up_write(&s->s_umount);
deactivate_super(s);
- s = ERR_PTR(error);
- } else {
- s->s_flags |= MS_ACTIVE;
- bdev_uevent(bdev, KOBJ_MOUNT);
+ goto error;
}
+
+ s->s_flags |= MS_ACTIVE;
+ bdev_uevent(bdev, KOBJ_MOUNT);
}
- return s;
+ return simple_set_mnt(mnt, s);
-out:
+error_s:
+ error = PTR_ERR(s);
+error_bdev:
close_bdev_excl(bdev);
- return s;
+error:
+ return error;
}
EXPORT_SYMBOL(get_sb_bdev);
@@ -744,15 +750,16 @@ void kill_block_super(struct super_block *sb)
EXPORT_SYMBOL(kill_block_super);
-struct super_block *get_sb_nodev(struct file_system_type *fs_type,
+int get_sb_nodev(struct file_system_type *fs_type,
int flags, void *data,
- int (*fill_super)(struct super_block *, void *, int))
+ int (*fill_super)(struct super_block *, void *, int),
+ struct vfsmount *mnt)
{
int error;
struct super_block *s = sget(fs_type, NULL, set_anon_super, NULL);
if (IS_ERR(s))
- return s;
+ return PTR_ERR(s);
s->s_flags = flags;
@@ -760,10 +767,10 @@ struct super_block *get_sb_nodev(struct file_system_type *fs_type,
if (error) {
up_write(&s->s_umount);
deactivate_super(s);
- return ERR_PTR(error);
+ return error;
}
s->s_flags |= MS_ACTIVE;
- return s;
+ return simple_set_mnt(mnt, s);
}
EXPORT_SYMBOL(get_sb_nodev);
@@ -773,94 +780,102 @@ static int compare_single(struct super_block *s, void *p)
return 1;
}
-struct super_block *get_sb_single(struct file_system_type *fs_type,
+int get_sb_single(struct file_system_type *fs_type,
int flags, void *data,
- int (*fill_super)(struct super_block *, void *, int))
+ int (*fill_super)(struct super_block *, void *, int),
+ struct vfsmount *mnt)
{
struct super_block *s;
int error;
s = sget(fs_type, compare_single, set_anon_super, NULL);
if (IS_ERR(s))
- return s;
+ return PTR_ERR(s);
if (!s->s_root) {
s->s_flags = flags;
error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
if (error) {
up_write(&s->s_umount);
deactivate_super(s);
- return ERR_PTR(error);
+ return error;
}
s->s_flags |= MS_ACTIVE;
}
do_remount_sb(s, flags, data, 0);
- return s;
+ return simple_set_mnt(mnt, s);
}
EXPORT_SYMBOL(get_sb_single);
struct vfsmount *
-do_kern_mount(const char *fstype, int flags, const char *name, void *data)
+vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data)
{
- struct file_system_type *type = get_fs_type(fstype);
- struct super_block *sb = ERR_PTR(-ENOMEM);
struct vfsmount *mnt;
- int error;
char *secdata = NULL;
+ int error;
if (!type)
return ERR_PTR(-ENODEV);
+ error = -ENOMEM;
mnt = alloc_vfsmnt(name);
if (!mnt)
goto out;
if (data) {
secdata = alloc_secdata();
- if (!secdata) {
- sb = ERR_PTR(-ENOMEM);
+ if (!secdata)
goto out_mnt;
- }
error = security_sb_copy_data(type, data, secdata);
- if (error) {
- sb = ERR_PTR(error);
+ if (error)
goto out_free_secdata;
- }
}
- sb = type->get_sb(type, flags, name, data);
- if (IS_ERR(sb))
+ error = type->get_sb(type, flags, name, data, mnt);
+ if (error < 0)
goto out_free_secdata;
- error = security_sb_kern_mount(sb, secdata);
+
+ error = security_sb_kern_mount(mnt->mnt_sb, secdata);
if (error)
goto out_sb;
- mnt->mnt_sb = sb;
- mnt->mnt_root = dget(sb->s_root);
- mnt->mnt_mountpoint = sb->s_root;
+
+ mnt->mnt_mountpoint = mnt->mnt_root;
mnt->mnt_parent = mnt;
- up_write(&sb->s_umount);
+ up_write(&mnt->mnt_sb->s_umount);
free_secdata(secdata);
- put_filesystem(type);
return mnt;
out_sb:
- up_write(&sb->s_umount);
- deactivate_super(sb);
- sb = ERR_PTR(error);
+ dput(mnt->mnt_root);
+ up_write(&mnt->mnt_sb->s_umount);
+ deactivate_super(mnt->mnt_sb);
out_free_secdata:
free_secdata(secdata);
out_mnt:
free_vfsmnt(mnt);
out:
+ return ERR_PTR(error);
+}
+
+EXPORT_SYMBOL_GPL(vfs_kern_mount);
+
+struct vfsmount *
+do_kern_mount(const char *fstype, int flags, const char *name, void *data)
+{
+ struct file_system_type *type = get_fs_type(fstype);
+ struct vfsmount *mnt;
+ if (!type)
+ return ERR_PTR(-ENODEV);
+ mnt = vfs_kern_mount(type, flags, name, data);
put_filesystem(type);
- return (struct vfsmount *)sb;
+ return mnt;
}
EXPORT_SYMBOL_GPL(do_kern_mount);
struct vfsmount *kern_mount(struct file_system_type *type)
{
- return do_kern_mount(type->name, 0, type->name, NULL);
+ return vfs_kern_mount(type, 0, type->name, NULL);
}
EXPORT_SYMBOL(kern_mount);
diff --git a/fs/sync.c b/fs/sync.c
index aab5ffe77e9..955aef04da2 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -100,7 +100,7 @@ asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes,
}
if (nbytes == 0)
- endbyte = -1;
+ endbyte = LLONG_MAX;
else
endbyte--; /* inclusive */
diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
index f1117e885bd..40190c48927 100644
--- a/fs/sysfs/mount.c
+++ b/fs/sysfs/mount.c
@@ -66,10 +66,10 @@ static int sysfs_fill_super(struct super_block *sb, void *data, int silent)
return 0;
}
-static struct super_block *sysfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int sysfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_single(fs_type, flags, data, sysfs_fill_super);
+ return get_sb_single(fs_type, flags, data, sysfs_fill_super, mnt);
}
static struct file_system_type sysfs_fs_type = {
diff --git a/fs/sysv/dir.c b/fs/sysv/dir.c
index d7074341ee8..f2bef962d30 100644
--- a/fs/sysv/dir.c
+++ b/fs/sysv/dir.c
@@ -53,8 +53,7 @@ static int dir_commit_chunk(struct page *page, unsigned from, unsigned to)
static struct page * dir_get_page(struct inode *dir, unsigned long n)
{
struct address_space *mapping = dir->i_mapping;
- struct page *page = read_cache_page(mapping, n,
- (filler_t*)mapping->a_ops->readpage, NULL);
+ struct page *page = read_mapping_page(mapping, n, NULL);
if (!IS_ERR(page)) {
wait_on_page_locked(page);
kmap(page);
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
index 3ff89cc5833..58b2d22142b 100644
--- a/fs/sysv/inode.c
+++ b/fs/sysv/inode.c
@@ -85,8 +85,9 @@ static void sysv_put_super(struct super_block *sb)
kfree(sbi);
}
-static int sysv_statfs(struct super_block *sb, struct kstatfs *buf)
+static int sysv_statfs(struct dentry *dentry, struct kstatfs *buf)
{
+ struct super_block *sb = dentry->d_sb;
struct sysv_sb_info *sbi = SYSV_SB(sb);
buf->f_type = sb->s_magic;
diff --git a/fs/sysv/super.c b/fs/sysv/super.c
index e92b991e6dd..876639b9332 100644
--- a/fs/sysv/super.c
+++ b/fs/sysv/super.c
@@ -506,16 +506,17 @@ failed:
/* Every kernel module contains stuff like this. */
-static struct super_block *sysv_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int sysv_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, sysv_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, sysv_fill_super,
+ mnt);
}
-static struct super_block *v7_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int v7_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, v7_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, v7_fill_super, mnt);
}
static struct file_system_type sysv_fs_type = {
diff --git a/fs/udf/super.c b/fs/udf/super.c
index e45789fe38e..44fe2cb0bbb 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -91,13 +91,13 @@ static void udf_load_partdesc(struct super_block *, struct buffer_head *);
static void udf_open_lvid(struct super_block *);
static void udf_close_lvid(struct super_block *);
static unsigned int udf_count_free(struct super_block *);
-static int udf_statfs(struct super_block *, struct kstatfs *);
+static int udf_statfs(struct dentry *, struct kstatfs *);
/* UDF filesystem type */
-static struct super_block *udf_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int udf_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, udf_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, udf_fill_super, mnt);
}
static struct file_system_type udf_fstype = {
@@ -1779,8 +1779,10 @@ udf_put_super(struct super_block *sb)
* Written, tested, and released.
*/
static int
-udf_statfs(struct super_block *sb, struct kstatfs *buf)
+udf_statfs(struct dentry *dentry, struct kstatfs *buf)
{
+ struct super_block *sb = dentry->d_sb;
+
buf->f_type = UDF_SUPER_MAGIC;
buf->f_bsize = sb->s_blocksize;
buf->f_blocks = UDF_SB_PARTLEN(sb, UDF_SB_PARTITION(sb));
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index db98a4c71e6..fe5ab2aa289 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -1113,8 +1113,9 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
return 0;
}
-static int ufs_statfs (struct super_block *sb, struct kstatfs *buf)
+static int ufs_statfs (struct dentry *dentry, struct kstatfs *buf)
{
+ struct super_block *sb = dentry->d_sb;
struct ufs_sb_private_info * uspi;
struct ufs_super_block_first * usb1;
struct ufs_super_block * usb;
@@ -1311,10 +1312,10 @@ out:
#endif
-static struct super_block *ufs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int ufs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, ufs_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, ufs_fill_super, mnt);
}
static struct file_system_type ufs_fs_type = {
diff --git a/fs/vfat/namei.c b/fs/vfat/namei.c
index a56cec3be5f..9a8f48bae95 100644
--- a/fs/vfat/namei.c
+++ b/fs/vfat/namei.c
@@ -1023,11 +1023,12 @@ static int vfat_fill_super(struct super_block *sb, void *data, int silent)
return 0;
}
-static struct super_block *vfat_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data)
+static int vfat_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name,
+ void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, vfat_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, vfat_fill_super,
+ mnt);
}
static struct file_system_type vfat_fs_type = {
diff --git a/fs/xfs/linux-2.6/xfs_stats.c b/fs/xfs/linux-2.6/xfs_stats.c
index 1f0589a05ec..e480b610205 100644
--- a/fs/xfs/linux-2.6/xfs_stats.c
+++ b/fs/xfs/linux-2.6/xfs_stats.c
@@ -62,7 +62,7 @@ xfs_read_xfsstats(
while (j < xstats[i].endpoint) {
val = 0;
/* sum over all cpus */
- for_each_cpu(c)
+ for_each_possible_cpu(c)
val += *(((__u32*)&per_cpu(xfsstats, c) + j));
len += sprintf(buffer + len, " %u", val);
j++;
@@ -70,7 +70,7 @@ xfs_read_xfsstats(
buffer[len++] = '\n';
}
/* extra precision counters */
- for_each_cpu(i) {
+ for_each_possible_cpu(i) {
xs_xstrat_bytes += per_cpu(xfsstats, i).xs_xstrat_bytes;
xs_write_bytes += per_cpu(xfsstats, i).xs_write_bytes;
xs_read_bytes += per_cpu(xfsstats, i).xs_read_bytes;
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index f2a0778536f..9bdef9d5190 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -684,10 +684,11 @@ xfs_fs_sync_super(
STATIC int
xfs_fs_statfs(
- struct super_block *sb,
+ struct dentry *dentry,
struct kstatfs *statp)
{
- return -bhv_vfs_statvfs(vfs_from_sb(sb), statp, NULL);
+ return -bhv_vfs_statvfs(vfs_from_sb(dentry->d_sb), statp,
+ vn_from_inode(dentry->d_inode));
}
STATIC int
@@ -853,14 +854,16 @@ fail_vfsop:
return -error;
}
-STATIC struct super_block *
+STATIC int
xfs_fs_get_sb(
struct file_system_type *fs_type,
int flags,
const char *dev_name,
- void *data)
+ void *data,
+ struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super);
+ return get_sb_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super,
+ mnt);
}
STATIC struct super_operations xfs_super_operations = {
diff --git a/fs/xfs/linux-2.6/xfs_sysctl.c b/fs/xfs/linux-2.6/xfs_sysctl.c
index 4af97682bec..af246532fbf 100644
--- a/fs/xfs/linux-2.6/xfs_sysctl.c
+++ b/fs/xfs/linux-2.6/xfs_sysctl.c
@@ -38,7 +38,7 @@ xfs_stats_clear_proc_handler(
if (!ret && write && *valp) {
printk("XFS Clearing xfsstats\n");
- for_each_cpu(c) {
+ for_each_possible_cpu(c) {
preempt_disable();
/* save vn_active, it's a universal truth! */
vn_active = per_cpu(xfsstats, c).vn_active;
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index e27dc8f2997..b9beceb3314 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -63,7 +63,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20060127
+#define ACPI_CA_VERSION 0x20060608
/*
* OS name, used for the _OS object. The _OS object is essentially obsolete,
@@ -81,6 +81,7 @@
#define ACPI_MAX_PARSE_CACHE_DEPTH 96 /* Parse tree objects */
#define ACPI_MAX_EXTPARSE_CACHE_DEPTH 96 /* Parse tree objects */
#define ACPI_MAX_OBJECT_CACHE_DEPTH 96 /* Interpreter operand objects */
+#define ACPI_MAX_NAMESPACE_CACHE_DEPTH 96 /* Namespace objects */
/*
* Should the subsystem abort the loading of an ACPI table if the
@@ -102,9 +103,9 @@
#define ACPI_MAX_SEMAPHORE_COUNT 256
-/* Max reference count (for debug only) */
+/* Maximum object reference count (detects object deletion issues) */
-#define ACPI_MAX_REFERENCE_COUNT 0x400
+#define ACPI_MAX_REFERENCE_COUNT 0x800
/* Size of cached memory mapping for system memory operation region */
@@ -171,12 +172,7 @@
/* Array sizes. Used for range checking also */
-#define ACPI_NUM_ACCESS_TYPES 6
-#define ACPI_NUM_UPDATE_RULES 3
-#define ACPI_NUM_LOCK_RULES 2
-#define ACPI_NUM_MATCH_OPS 6
-#define ACPI_NUM_OPCODES 256
-#define ACPI_NUM_FIELD_NAMES 2
+#define ACPI_MAX_MATCH_OPCODE 5
/* RSDP checksums */
@@ -187,10 +183,6 @@
#define ACPI_SMBUS_BUFFER_SIZE 34
-/* Number of strings associated with the _OSI reserved method */
-
-#define ACPI_NUM_OSI_STRINGS 10
-
/******************************************************************************
*
* ACPI AML Debugger
diff --git a/include/acpi/acdisasm.h b/include/acpi/acdisasm.h
index 11a8fe39cb0..9a7d6921f53 100644
--- a/include/acpi/acdisasm.h
+++ b/include/acpi/acdisasm.h
@@ -50,26 +50,72 @@
#define BLOCK_PAREN 1
#define BLOCK_BRACE 2
#define BLOCK_COMMA_LIST 4
+#define ACPI_DEFAULT_RESNAME *(u32 *) "__RD"
struct acpi_external_list {
char *path;
+ char *internal_path;
struct acpi_external_list *next;
+ u32 value;
+ u16 length;
+ u8 type;
};
extern struct acpi_external_list *acpi_gbl_external_list;
-/* Strings used for decoding flags to ASL keywords */
+typedef const struct acpi_dmtable_info {
+ u8 opcode;
+ u8 offset;
+ char *name;
+
+} acpi_dmtable_info;
+
+/*
+ * Values for Opcode above.
+ * Note: 0-7 must not change, used as a flag shift value
+ */
+#define ACPI_DMT_FLAG0 0
+#define ACPI_DMT_FLAG1 1
+#define ACPI_DMT_FLAG2 2
+#define ACPI_DMT_FLAG3 3
+#define ACPI_DMT_FLAG4 4
+#define ACPI_DMT_FLAG5 5
+#define ACPI_DMT_FLAG6 6
+#define ACPI_DMT_FLAG7 7
+#define ACPI_DMT_FLAGS0 8
+#define ACPI_DMT_FLAGS2 9
+#define ACPI_DMT_UINT8 10
+#define ACPI_DMT_UINT16 11
+#define ACPI_DMT_UINT24 12
+#define ACPI_DMT_UINT32 13
+#define ACPI_DMT_UINT56 14
+#define ACPI_DMT_UINT64 15
+#define ACPI_DMT_STRING 16
+#define ACPI_DMT_NAME4 17
+#define ACPI_DMT_NAME6 18
+#define ACPI_DMT_NAME8 19
+#define ACPI_DMT_CHKSUM 20
+#define ACPI_DMT_SPACEID 21
+#define ACPI_DMT_GAS 22
+#define ACPI_DMT_MADT 23
+#define ACPI_DMT_SRAT 24
+#define ACPI_DMT_EXIT 25
-extern const char *acpi_gbl_word_decode[4];
-extern const char *acpi_gbl_irq_decode[2];
-extern const char *acpi_gbl_lock_rule[ACPI_NUM_LOCK_RULES];
-extern const char *acpi_gbl_access_types[ACPI_NUM_ACCESS_TYPES];
-extern const char *acpi_gbl_update_rules[ACPI_NUM_UPDATE_RULES];
-extern const char *acpi_gbl_match_ops[ACPI_NUM_MATCH_OPS];
+typedef
+void (*ACPI_TABLE_HANDLER) (struct acpi_table_header * table);
+
+struct acpi_dmtable_data {
+ char *signature;
+ struct acpi_dmtable_info *table_info;
+ ACPI_TABLE_HANDLER table_handler;
+};
struct acpi_op_walk_info {
u32 level;
+ u32 last_level;
+ u32 count;
u32 bit_offset;
+ u32 flags;
struct acpi_walk_state *walk_state;
};
@@ -77,6 +123,100 @@ typedef
acpi_status(*asl_walk_callback) (union acpi_parse_object * op,
u32 level, void *context);
+struct acpi_resource_tag {
+ u32 bit_index;
+ char *tag;
+};
+
+/* Strings used for decoding flags to ASL keywords */
+
+extern const char *acpi_gbl_word_decode[];
+extern const char *acpi_gbl_irq_decode[];
+extern const char *acpi_gbl_lock_rule[];
+extern const char *acpi_gbl_access_types[];
+extern const char *acpi_gbl_update_rules[];
+extern const char *acpi_gbl_match_ops[];
+
+extern struct acpi_dmtable_info acpi_dm_table_info_asf0[];
+extern struct acpi_dmtable_info acpi_dm_table_info_asf1[];
+extern struct acpi_dmtable_info acpi_dm_table_info_asf2[];
+extern struct acpi_dmtable_info acpi_dm_table_info_asf3[];
+extern struct acpi_dmtable_info acpi_dm_table_info_asf4[];
+extern struct acpi_dmtable_info acpi_dm_table_info_asf_hdr[];
+extern struct acpi_dmtable_info acpi_dm_table_info_boot[];
+extern struct acpi_dmtable_info acpi_dm_table_info_cpep[];
+extern struct acpi_dmtable_info acpi_dm_table_info_cpep0[];
+extern struct acpi_dmtable_info acpi_dm_table_info_dbgp[];
+extern struct acpi_dmtable_info acpi_dm_table_info_ecdt[];
+extern struct acpi_dmtable_info acpi_dm_table_info_facs[];
+extern struct acpi_dmtable_info acpi_dm_table_info_fadt1[];
+extern struct acpi_dmtable_info acpi_dm_table_info_fadt2[];
+extern struct acpi_dmtable_info acpi_dm_table_info_gas[];
+extern struct acpi_dmtable_info acpi_dm_table_info_header[];
+extern struct acpi_dmtable_info acpi_dm_table_info_hpet[];
+extern struct acpi_dmtable_info acpi_dm_table_info_madt[];
+extern struct acpi_dmtable_info acpi_dm_table_info_madt0[];
+extern struct acpi_dmtable_info acpi_dm_table_info_madt1[];
+extern struct acpi_dmtable_info acpi_dm_table_info_madt2[];
+extern struct acpi_dmtable_info acpi_dm_table_info_madt3[];
+extern struct acpi_dmtable_info acpi_dm_table_info_madt4[];
+extern struct acpi_dmtable_info acpi_dm_table_info_madt5[];
+extern struct acpi_dmtable_info acpi_dm_table_info_madt6[];
+extern struct acpi_dmtable_info acpi_dm_table_info_madt7[];
+extern struct acpi_dmtable_info acpi_dm_table_info_madt8[];
+extern struct acpi_dmtable_info acpi_dm_table_info_madt_hdr[];
+extern struct acpi_dmtable_info acpi_dm_table_info_mcfg[];
+extern struct acpi_dmtable_info acpi_dm_table_info_mcfg0[];
+extern struct acpi_dmtable_info acpi_dm_table_info_rsdp1[];
+extern struct acpi_dmtable_info acpi_dm_table_info_rsdp2[];
+extern struct acpi_dmtable_info acpi_dm_table_info_sbst[];
+extern struct acpi_dmtable_info acpi_dm_table_info_slit[];
+extern struct acpi_dmtable_info acpi_dm_table_info_spcr[];
+extern struct acpi_dmtable_info acpi_dm_table_info_spmi[];
+extern struct acpi_dmtable_info acpi_dm_table_info_srat[];
+extern struct acpi_dmtable_info acpi_dm_table_info_srat0[];
+extern struct acpi_dmtable_info acpi_dm_table_info_srat1[];
+extern struct acpi_dmtable_info acpi_dm_table_info_tcpa[];
+extern struct acpi_dmtable_info acpi_dm_table_info_wdrt[];
+
+/*
+ * dmtable
+ */
+void acpi_dm_dump_data_table(struct acpi_table_header *table);
+
+void
+acpi_dm_dump_table(u32 table_length,
+ u32 table_offset,
+ void *table,
+ u32 sub_table_length, struct acpi_dmtable_info *info);
+
+void acpi_dm_line_header(u32 offset, u32 byte_length, char *name);
+
+void acpi_dm_line_header2(u32 offset, u32 byte_length, char *name, u32 value);
+
+/*
+ * dmtbdump
+ */
+void acpi_dm_dump_asf(struct acpi_table_header *table);
+
+void acpi_dm_dump_cpep(struct acpi_table_header *table);
+
+void acpi_dm_dump_fadt(struct acpi_table_header *table);
+
+void acpi_dm_dump_srat(struct acpi_table_header *table);
+
+void acpi_dm_dump_mcfg(struct acpi_table_header *table);
+
+void acpi_dm_dump_madt(struct acpi_table_header *table);
+
+u32 acpi_dm_dump_rsdp(struct acpi_table_header *table);
+
+void acpi_dm_dump_rsdt(struct acpi_table_header *table);
+
+void acpi_dm_dump_slit(struct acpi_table_header *table);
+
+void acpi_dm_dump_xsdt(struct acpi_table_header *table);
+
/*
* dmwalk
*/
@@ -84,6 +224,11 @@ void
acpi_dm_disassemble(struct acpi_walk_state *walk_state,
union acpi_parse_object *origin, u32 num_opcodes);
+void
+acpi_dm_walk_parse_tree(union acpi_parse_object *op,
+ asl_walk_callback descending_callback,
+ asl_walk_callback ascending_callback, void *context);
+
/*
* dmopcode
*/
@@ -166,6 +311,7 @@ void acpi_dm_dump_integer64(u64 value, char *name);
void
acpi_dm_resource_template(struct acpi_op_walk_info *info,
+ union acpi_parse_object *op,
u8 * byte_data, u32 byte_count);
u8 acpi_dm_is_resource_template(union acpi_parse_object *op);
@@ -176,6 +322,8 @@ void acpi_dm_bit_list(u16 mask);
void acpi_dm_decode_attribute(u8 attribute);
+void acpi_dm_descriptor_name(void);
+
/*
* dmresrcl
*/
@@ -248,6 +396,15 @@ acpi_dm_vendor_small_descriptor(union aml_resource *resource,
/*
* dmutils
*/
-void acpi_dm_add_to_external_list(char *path);
+void acpi_dm_add_to_external_list(char *path, u8 type, u32 value);
+
+/*
+ * dmrestag
+ */
+void acpi_dm_find_resources(union acpi_parse_object *root);
+
+void
+acpi_dm_check_resource_reference(union acpi_parse_object *op,
+ struct acpi_walk_state *walk_state);
#endif /* __ACDISASM_H__ */
diff --git a/include/acpi/acdispat.h b/include/acpi/acdispat.h
index c41a926ff31..288f84903af 100644
--- a/include/acpi/acdispat.h
+++ b/include/acpi/acdispat.h
@@ -194,7 +194,9 @@ acpi_status
acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
union acpi_operand_object *return_desc);
-void acpi_ds_terminate_control_method(struct acpi_walk_state *walk_state);
+void
+acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
+ struct acpi_walk_state *walk_state);
acpi_status
acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
@@ -302,7 +304,7 @@ acpi_ds_init_aml_walk(struct acpi_walk_state *walk_state,
struct acpi_namespace_node *method_node,
u8 * aml_start,
u32 aml_length,
- struct acpi_parameter_info *info, u8 pass_number);
+ struct acpi_evaluate_info *info, u8 pass_number);
acpi_status
acpi_ds_obj_stack_pop_and_delete(u32 pop_count,
diff --git a/include/acpi/acevents.h b/include/acpi/acevents.h
index f2717be4fe0..234142828e1 100644
--- a/include/acpi/acevents.h
+++ b/include/acpi/acevents.h
@@ -93,7 +93,7 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
*/
u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info);
-acpi_status acpi_ev_walk_gpe_list(ACPI_GPE_CALLBACK gpe_walk_callback);
+acpi_status acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback);
acpi_status
acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
@@ -138,7 +138,7 @@ acpi_status
acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
u32 function,
acpi_physical_address address,
- u32 bit_width, void *value);
+ u32 bit_width, acpi_integer * value);
acpi_status
acpi_ev_attach_region(union acpi_operand_object *handler_obj,
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index dc768aa580e..797ca1ea521 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -160,8 +160,9 @@
#define AE_AML_BAD_RESOURCE_VALUE (acpi_status) (0x001F | AE_CODE_AML)
#define AE_AML_CIRCULAR_REFERENCE (acpi_status) (0x0020 | AE_CODE_AML)
#define AE_AML_BAD_RESOURCE_LENGTH (acpi_status) (0x0021 | AE_CODE_AML)
+#define AE_AML_ILLEGAL_ADDRESS (acpi_status) (0x0022 | AE_CODE_AML)
-#define AE_CODE_AML_MAX 0x0021
+#define AE_CODE_AML_MAX 0x0022
/*
* Internal exceptions used for control
@@ -275,7 +276,8 @@ char const *acpi_gbl_exception_names_aml[] = {
"AE_AML_NO_RESOURCE_END_TAG",
"AE_AML_BAD_RESOURCE_VALUE",
"AE_AML_CIRCULAR_REFERENCE",
- "AE_AML_BAD_RESOURCE_LENGTH"
+ "AE_AML_BAD_RESOURCE_LENGTH",
+ "AE_AML_ILLEGAL_ADDRESS"
};
char const *acpi_gbl_exception_names_ctrl[] = {
diff --git a/include/acpi/acglobal.h b/include/acpi/acglobal.h
index 734cc77bf2c..14531d48f6b 100644
--- a/include/acpi/acglobal.h
+++ b/include/acpi/acglobal.h
@@ -107,6 +107,7 @@ ACPI_EXTERN u32 acpi_gbl_trace_flags;
* 3) Allow access to uninitialized locals/args (auto-init to integer 0)
* 4) Allow ANY object type to be a source operand for the Store() operator
* 5) Allow unresolved references (invalid target name) in package objects
+ * 6) Enable warning messages for behavior that is not ACPI spec compliant
*/
ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_enable_interpreter_slack, FALSE);
@@ -114,7 +115,7 @@ ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_enable_interpreter_slack, FALSE);
* Automatically serialize ALL control methods? Default is FALSE, meaning
* to use the Serialized/not_serialized method flags on a per method basis.
* Only change this if the ASL code is poorly written and cannot handle
- * reentrancy even though methods are marked "not_serialized".
+ * reentrancy even though methods are marked "NotSerialized".
*/
ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_all_methods_serialized, FALSE);
@@ -149,10 +150,10 @@ ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_leave_wake_gpes_disabled, TRUE);
ACPI_EXTERN u32 acpi_gbl_table_flags;
ACPI_EXTERN u32 acpi_gbl_rsdt_table_count;
ACPI_EXTERN struct rsdp_descriptor *acpi_gbl_RSDP;
-ACPI_EXTERN XSDT_DESCRIPTOR *acpi_gbl_XSDT;
-ACPI_EXTERN FADT_DESCRIPTOR *acpi_gbl_FADT;
+ACPI_EXTERN struct xsdt_descriptor *acpi_gbl_XSDT;
+ACPI_EXTERN struct fadt_descriptor *acpi_gbl_FADT;
ACPI_EXTERN struct acpi_table_header *acpi_gbl_DSDT;
-ACPI_EXTERN FACS_DESCRIPTOR *acpi_gbl_FACS;
+ACPI_EXTERN struct facs_descriptor *acpi_gbl_FACS;
ACPI_EXTERN struct acpi_common_facs acpi_gbl_common_fACS;
/*
* Since there may be multiple SSDTs and PSDTs, a single pointer is not
@@ -177,15 +178,15 @@ ACPI_EXTERN u8 acpi_gbl_integer_nybble_width;
/*
* ACPI Table info arrays
*/
-extern struct acpi_table_list acpi_gbl_table_lists[NUM_ACPI_TABLE_TYPES];
-extern struct acpi_table_support acpi_gbl_table_data[NUM_ACPI_TABLE_TYPES];
+extern struct acpi_table_list acpi_gbl_table_lists[ACPI_TABLE_ID_MAX + 1];
+extern struct acpi_table_support acpi_gbl_table_data[ACPI_TABLE_ID_MAX + 1];
/*
* Predefined mutex objects. This array contains the
* actual OS mutex handles, indexed by the local ACPI_MUTEX_HANDLEs.
* (The table maps local handles to the real OS handles)
*/
-ACPI_EXTERN struct acpi_mutex_info acpi_gbl_mutex_info[NUM_MUTEX];
+ACPI_EXTERN struct acpi_mutex_info acpi_gbl_mutex_info[ACPI_NUM_MUTEX];
/*****************************************************************************
*
@@ -203,6 +204,7 @@ ACPI_EXTERN struct acpi_memory_list *acpi_gbl_ns_node_list;
/* Object caches */
+ACPI_EXTERN acpi_cache_t *acpi_gbl_namespace_cache;
ACPI_EXTERN acpi_cache_t *acpi_gbl_state_cache;
ACPI_EXTERN acpi_cache_t *acpi_gbl_ps_node_cache;
ACPI_EXTERN acpi_cache_t *acpi_gbl_ps_node_ext_cache;
@@ -244,7 +246,6 @@ extern const char *acpi_gbl_sleep_state_names[ACPI_S_STATE_COUNT];
extern const char *acpi_gbl_highest_dstate_names[4];
extern const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES];
extern const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS];
-extern const char *acpi_gbl_valid_osi_strings[ACPI_NUM_OSI_STRINGS];
/*****************************************************************************
*
@@ -291,14 +292,6 @@ ACPI_EXTERN u8 acpi_gbl_cm_single_step;
/*****************************************************************************
*
- * Parser globals
- *
- ****************************************************************************/
-
-ACPI_EXTERN union acpi_parse_object *acpi_gbl_parsed_namespace_root;
-
-/*****************************************************************************
- *
* Hardware globals
*
****************************************************************************/
@@ -321,7 +314,11 @@ ACPI_EXTERN struct acpi_fixed_event_handler
ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head;
ACPI_EXTERN struct acpi_gpe_block_info
*acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS];
+
+/* Spinlocks */
+
ACPI_EXTERN acpi_handle acpi_gbl_gpe_lock;
+ACPI_EXTERN acpi_handle acpi_gbl_hardware_lock;
/*****************************************************************************
*
diff --git a/include/acpi/aclocal.h b/include/acpi/aclocal.h
index 8361820d297..1eeca7adca9 100644
--- a/include/acpi/aclocal.h
+++ b/include/acpi/aclocal.h
@@ -44,7 +44,10 @@
#ifndef __ACLOCAL_H__
#define __ACLOCAL_H__
+/* acpisrc:struct_defs -- for acpisrc conversion */
+
#define ACPI_WAIT_FOREVER 0xFFFF /* u16, as per ACPI spec */
+#define ACPI_INFINITE_CONCURRENCY 0xFF
typedef void *acpi_mutex;
typedef u32 acpi_mutex_handle;
@@ -69,52 +72,55 @@ union acpi_parse_object;
* Predefined handles for the mutex objects used within the subsystem
* All mutex objects are automatically created by acpi_ut_mutex_initialize.
*
- * The acquire/release ordering protocol is implied via this list. Mutexes
+ * The acquire/release ordering protocol is implied via this list. Mutexes
* with a lower value must be acquired before mutexes with a higher value.
*
- * NOTE: any changes here must be reflected in the acpi_gbl_mutex_names table also!
+ * NOTE: any changes here must be reflected in the acpi_gbl_mutex_names
+ * table below also!
*/
-#define ACPI_MTX_EXECUTE 0
-#define ACPI_MTX_INTERPRETER 1
-#define ACPI_MTX_PARSER 2
-#define ACPI_MTX_DISPATCHER 3
-#define ACPI_MTX_TABLES 4
-#define ACPI_MTX_OP_REGIONS 5
-#define ACPI_MTX_NAMESPACE 6
-#define ACPI_MTX_EVENTS 7
-#define ACPI_MTX_HARDWARE 8
-#define ACPI_MTX_CACHES 9
-#define ACPI_MTX_MEMORY 10
-#define ACPI_MTX_DEBUG_CMD_COMPLETE 11
-#define ACPI_MTX_DEBUG_CMD_READY 12
-
-#define MAX_MUTEX 12
-#define NUM_MUTEX MAX_MUTEX+1
+#define ACPI_MTX_INTERPRETER 0 /* AML Interpreter, main lock */
+#define ACPI_MTX_CONTROL_METHOD 1 /* Control method termination [TBD: may no longer be necessary] */
+#define ACPI_MTX_TABLES 2 /* Data for ACPI tables */
+#define ACPI_MTX_NAMESPACE 3 /* ACPI Namespace */
+#define ACPI_MTX_EVENTS 4 /* Data for ACPI events */
+#define ACPI_MTX_CACHES 5 /* Internal caches, general purposes */
+#define ACPI_MTX_MEMORY 6 /* Debug memory tracking lists */
+#define ACPI_MTX_DEBUG_CMD_COMPLETE 7 /* AML debugger */
+#define ACPI_MTX_DEBUG_CMD_READY 8 /* AML debugger */
+
+#define ACPI_MAX_MUTEX 8
+#define ACPI_NUM_MUTEX ACPI_MAX_MUTEX+1
#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
#ifdef DEFINE_ACPI_GLOBALS
-/* Names for the mutexes used in the subsystem */
+/* Debug names for the mutexes above */
-static char *acpi_gbl_mutex_names[] = {
- "ACPI_MTX_Execute",
+static char *acpi_gbl_mutex_names[ACPI_NUM_MUTEX] = {
"ACPI_MTX_Interpreter",
- "ACPI_MTX_Parser",
- "ACPI_MTX_Dispatcher",
+ "ACPI_MTX_Method",
"ACPI_MTX_Tables",
- "ACPI_MTX_op_regions",
"ACPI_MTX_Namespace",
"ACPI_MTX_Events",
- "ACPI_MTX_Hardware",
"ACPI_MTX_Caches",
"ACPI_MTX_Memory",
- "ACPI_MTX_debug_cmd_complete",
- "ACPI_MTX_debug_cmd_ready",
+ "ACPI_MTX_DebugCmdComplete",
+ "ACPI_MTX_DebugCmdReady"
};
#endif
#endif
+/*
+ * Predefined handles for spinlocks used within the subsystem.
+ * These spinlocks are created by acpi_ut_mutex_initialize
+ */
+#define ACPI_LOCK_GPES 0
+#define ACPI_LOCK_HARDWARE 1
+
+#define ACPI_MAX_LOCK 1
+#define ACPI_NUM_LOCK ACPI_MAX_LOCK+1
+
/* Owner IDs are used to track namespace nodes for selective deletion */
typedef u8 acpi_owner_id;
@@ -129,7 +135,7 @@ typedef u8 acpi_owner_id;
struct acpi_mutex_info {
acpi_mutex mutex;
u32 use_count;
- u32 thread_id;
+ acpi_thread_id thread_id;
};
/* Lock flag parameter for various interfaces */
@@ -144,6 +150,8 @@ struct acpi_mutex_info {
#define ACPI_FIELD_DWORD_GRANULARITY 4
#define ACPI_FIELD_QWORD_GRANULARITY 8
+#define ACPI_ENTRY_NOT_FOUND NULL
+
/*****************************************************************************
*
* Namespace typedefs and structs
@@ -158,49 +166,55 @@ typedef enum {
ACPI_IMODE_EXECUTE = 0x0E
} acpi_interpreter_mode;
-/*
- * The Node describes a named object that appears in the AML
- * An acpi_node is used to store Nodes.
- *
- * data_type is used to differentiate between internal descriptors, and MUST
- * be the first byte in this structure.
- */
union acpi_name_union {
u32 integer;
char ascii[4];
};
+/*
+ * The Namespace Node describes a named object that appears in the AML.
+ * descriptor_type is used to differentiate between internal descriptors.
+ *
+ * The node is optimized for both 32-bit and 64-bit platforms:
+ * 20 bytes for the 32-bit case, 32 bytes for the 64-bit case.
+ *
+ * Note: The descriptor_type and Type fields must appear in the identical
+ * position in both the struct acpi_namespace_node and union acpi_operand_object
+ * structures.
+ */
struct acpi_namespace_node {
- u8 descriptor; /* Used to differentiate object descriptor types */
- u8 type; /* Type associated with this name */
- u16 reference_count; /* Current count of references and children */
+ union acpi_operand_object *object; /* Interpreter object */
+ u8 descriptor_type; /* Differentiate object descriptor types */
+ u8 type; /* ACPI Type associated with this name */
+ u8 flags; /* Miscellaneous flags */
+ acpi_owner_id owner_id; /* Node creator */
union acpi_name_union name; /* ACPI Name, always 4 chars per ACPI spec */
- union acpi_operand_object *object; /* Pointer to attached ACPI object (optional) */
struct acpi_namespace_node *child; /* First child */
- struct acpi_namespace_node *peer; /* Next peer */
- u8 owner_id; /* Who created this node */
- u8 flags;
-
- /* Fields used by the ASL compiler only */
+ struct acpi_namespace_node *peer; /* Peer. Parent if ANOBJ_END_OF_PEER_LIST set */
-#ifdef ACPI_ASL_COMPILER
- u32 value;
+ /*
+ * The following fields are used by the ASL compiler and disassembler only
+ */
+#ifdef ACPI_LARGE_NAMESPACE_NODE
union acpi_parse_object *op;
+ u32 value;
+ u32 length;
#endif
};
-#define ACPI_ENTRY_NOT_FOUND NULL
+/* Namespace Node flags */
-/* Node flags */
+#define ANOBJ_END_OF_PEER_LIST 0x01 /* End-of-list, Peer field points to parent */
+#define ANOBJ_DATA_WIDTH_32 0x02 /* Parent table uses 32-bit math */
+#define ANOBJ_METHOD_ARG 0x04 /* Node is a method argument */
+#define ANOBJ_METHOD_LOCAL 0x08 /* Node is a method local */
+#define ANOBJ_SUBTREE_HAS_INI 0x10 /* Used to optimize device initialization */
-#define ANOBJ_RESERVED 0x01
-#define ANOBJ_END_OF_PEER_LIST 0x02
-#define ANOBJ_DATA_WIDTH_32 0x04 /* Parent table is 64-bits */
-#define ANOBJ_METHOD_ARG 0x08
-#define ANOBJ_METHOD_LOCAL 0x10
-#define ANOBJ_METHOD_NO_RETVAL 0x20
-#define ANOBJ_METHOD_SOME_NO_RETVAL 0x40
-#define ANOBJ_IS_BIT_OFFSET 0x80
+#define ANOBJ_IS_EXTERNAL 0x08 /* i_aSL only: This object created via External() */
+#define ANOBJ_METHOD_NO_RETVAL 0x10 /* i_aSL only: Method has no return value */
+#define ANOBJ_METHOD_SOME_NO_RETVAL 0x20 /* i_aSL only: Method has at least one return value */
+#define ANOBJ_IS_BIT_OFFSET 0x40 /* i_aSL only: Reference is a bit offset */
+#define ANOBJ_IS_REFERENCED 0x80 /* i_aSL only: Object was referenced */
/*
* ACPI Table Descriptor. One per ACPI table
@@ -212,8 +226,8 @@ struct acpi_table_desc {
struct acpi_table_header *pointer;
u8 *aml_start;
u64 physical_address;
- u32 aml_length;
acpi_size length;
+ u32 aml_length;
acpi_owner_id owner_id;
u8 type;
u8 allocation;
@@ -276,6 +290,9 @@ struct acpi_create_field_info {
u8 field_type;
};
+typedef
+acpi_status(*ACPI_INTERNAL_METHOD) (struct acpi_walk_state * walk_state);
+
/*
* Bitmapped ACPI types. Used internally only
*/
@@ -377,7 +394,7 @@ struct acpi_gpe_walk_info {
struct acpi_gpe_block_info *gpe_block;
};
-typedef acpi_status(*ACPI_GPE_CALLBACK) (struct acpi_gpe_xrupt_info *
+typedef acpi_status(*acpi_gpe_callback) (struct acpi_gpe_xrupt_info *
gpe_xrupt_info,
struct acpi_gpe_block_info *
gpe_block);
@@ -416,13 +433,14 @@ struct acpi_field_info {
#define ACPI_CONTROL_PREDICATE_FALSE 0xC3
#define ACPI_CONTROL_PREDICATE_TRUE 0xC4
-#define ACPI_STATE_COMMON /* Two 32-bit fields and a pointer */\
- u8 data_type; /* To differentiate various internal objs */\
- u8 flags; \
- u16 value; \
- u16 state; \
- u16 reserved; \
- void *next;
+#define ACPI_STATE_COMMON \
+ void *next; \
+ u8 descriptor_type; /* To differentiate various internal objs */\
+ u8 flags; \
+ u16 value; \
+ u16 state;
+
+ /* There are 2 bytes available here until the next natural alignment boundary */
struct acpi_common_state {
ACPI_STATE_COMMON};
@@ -438,12 +456,12 @@ struct acpi_update_state {
* Pkg state - used to traverse nested package structures
*/
struct acpi_pkg_state {
- ACPI_STATE_COMMON union acpi_operand_object *source_object;
+ ACPI_STATE_COMMON u16 index;
+ union acpi_operand_object *source_object;
union acpi_operand_object *dest_object;
struct acpi_walk_state *walk_state;
void *this_target_obj;
u32 num_packages;
- u16 index;
};
/*
@@ -451,10 +469,10 @@ struct acpi_pkg_state {
* Allows nesting of these constructs
*/
struct acpi_control_state {
- ACPI_STATE_COMMON union acpi_parse_object *predicate_op;
+ ACPI_STATE_COMMON u16 opcode;
+ union acpi_parse_object *predicate_op;
u8 *aml_predicate_start; /* Start of if/while predicate */
u8 *package_end; /* End of if/while block */
- u16 opcode;
};
/*
@@ -465,11 +483,11 @@ struct acpi_scope_state {
};
struct acpi_pscope_state {
- ACPI_STATE_COMMON union acpi_parse_object *op; /* Current op being parsed */
+ ACPI_STATE_COMMON u32 arg_count; /* Number of fixed arguments */
+ union acpi_parse_object *op; /* Current op being parsed */
u8 *arg_end; /* Current argument end */
u8 *pkg_end; /* Current package end */
u32 arg_list; /* Next argument to parse */
- u32 arg_count; /* Number of fixed arguments */
};
/*
@@ -477,10 +495,10 @@ struct acpi_pscope_state {
* states are created when there are nested control methods executing.
*/
struct acpi_thread_state {
- ACPI_STATE_COMMON struct acpi_walk_state *walk_state_list; /* Head of list of walk_states for this thread */
+ ACPI_STATE_COMMON u8 current_sync_level; /* Mutex Sync (nested acquire) level */
+ struct acpi_walk_state *walk_state_list; /* Head of list of walk_states for this thread */
union acpi_operand_object *acquired_mutex_list; /* List of all currently acquired mutexes */
- u32 thread_id; /* Running thread ID */
- u8 current_sync_level; /* Mutex Sync (nested acquire) level */
+ acpi_thread_id thread_id; /* Running thread ID */
};
/*
@@ -488,10 +506,9 @@ struct acpi_thread_state {
* AML arguments
*/
struct acpi_result_values {
- ACPI_STATE_COMMON
- union acpi_operand_object *obj_desc[ACPI_OBJ_NUM_OPERANDS];
- u8 num_results;
+ ACPI_STATE_COMMON u8 num_results;
u8 last_insert;
+ union acpi_operand_object *obj_desc[ACPI_OBJ_NUM_OPERANDS];
};
typedef
@@ -546,7 +563,7 @@ struct acpi_opcode_info {
#endif
u32 parse_args; /* Grammar/Parse time arguments */
u32 runtime_args; /* Interpret time arguments */
- u32 flags; /* Misc flags */
+ u16 flags; /* Misc flags */
u8 object_type; /* Corresponding internal object type */
u8 class; /* Opcode class */
u8 type; /* Opcode type */
@@ -563,29 +580,31 @@ union acpi_parse_value {
};
#define ACPI_PARSE_COMMON \
- u8 data_type; /* To differentiate various internal objs */\
- u8 flags; /* Type of Op */\
- u16 aml_opcode; /* AML opcode */\
- u32 aml_offset; /* Offset of declaration in AML */\
- union acpi_parse_object *parent; /* Parent op */\
- union acpi_parse_object *next; /* Next op */\
+ union acpi_parse_object *parent; /* Parent op */\
+ u8 descriptor_type; /* To differentiate various internal objs */\
+ u8 flags; /* Type of Op */\
+ u16 aml_opcode; /* AML opcode */\
+ u32 aml_offset; /* Offset of declaration in AML */\
+ union acpi_parse_object *next; /* Next op */\
+ struct acpi_namespace_node *node; /* For use by interpreter */\
+ union acpi_parse_value value; /* Value or args associated with the opcode */\
ACPI_DISASM_ONLY_MEMBERS (\
- u8 disasm_flags; /* Used during AML disassembly */\
- u8 disasm_opcode; /* Subtype used for disassembly */\
- char aml_op_name[16]) /* Op name (debug only) */\
- /* NON-DEBUG members below: */\
- struct acpi_namespace_node *node; /* For use by interpreter */\
- union acpi_parse_value value; /* Value or args associated with the opcode */
-
-#define ACPI_DASM_BUFFER 0x00
-#define ACPI_DASM_RESOURCE 0x01
-#define ACPI_DASM_STRING 0x02
-#define ACPI_DASM_UNICODE 0x03
-#define ACPI_DASM_EISAID 0x04
-#define ACPI_DASM_MATCHOP 0x05
+ u8 disasm_flags; /* Used during AML disassembly */\
+ u8 disasm_opcode; /* Subtype used for disassembly */\
+ char aml_op_name[16]) /* Op name (debug only) */
+
+#define ACPI_DASM_BUFFER 0x00
+#define ACPI_DASM_RESOURCE 0x01
+#define ACPI_DASM_STRING 0x02
+#define ACPI_DASM_UNICODE 0x03
+#define ACPI_DASM_EISAID 0x04
+#define ACPI_DASM_MATCHOP 0x05
+#define ACPI_DASM_LNOT_PREFIX 0x06
+#define ACPI_DASM_LNOT_SUFFIX 0x07
+#define ACPI_DASM_IGNORE 0x08
/*
- * generic operation (for example: If, While, Store)
+ * Generic operation (for example: If, While, Store)
*/
struct acpi_parse_obj_common {
ACPI_PARSE_COMMON};
@@ -601,7 +620,7 @@ struct acpi_parse_obj_named {
u32 name; /* 4-byte name or zero if no name */
};
-/* The parse node is the fundamental element of the parse tree */
+/* This version is used by the i_aSL compiler only */
#define ACPI_MAX_PARSEOP_NAME 20
@@ -643,7 +662,6 @@ union acpi_parse_object {
* method.
*/
struct acpi_parse_state {
- u32 aml_size;
u8 *aml_start; /* First AML byte */
u8 *aml; /* Next AML byte */
u8 *aml_end; /* (last + 1) AML byte */
@@ -653,22 +671,23 @@ struct acpi_parse_state {
struct acpi_namespace_node *start_node;
union acpi_generic_state *scope; /* Current scope */
union acpi_parse_object *start_scope;
+ u32 aml_size;
};
/* Parse object flags */
-#define ACPI_PARSEOP_GENERIC 0x01
-#define ACPI_PARSEOP_NAMED 0x02
-#define ACPI_PARSEOP_DEFERRED 0x04
-#define ACPI_PARSEOP_BYTELIST 0x08
-#define ACPI_PARSEOP_IN_CACHE 0x80
+#define ACPI_PARSEOP_GENERIC 0x01
+#define ACPI_PARSEOP_NAMED 0x02
+#define ACPI_PARSEOP_DEFERRED 0x04
+#define ACPI_PARSEOP_BYTELIST 0x08
+#define ACPI_PARSEOP_IN_CACHE 0x80
/* Parse object disasm_flags */
-#define ACPI_PARSEOP_IGNORE 0x01
-#define ACPI_PARSEOP_PARAMLIST 0x02
-#define ACPI_PARSEOP_EMPTY_TERMLIST 0x04
-#define ACPI_PARSEOP_SPECIAL 0x10
+#define ACPI_PARSEOP_IGNORE 0x01
+#define ACPI_PARSEOP_PARAMLIST 0x02
+#define ACPI_PARSEOP_EMPTY_TERMLIST 0x04
+#define ACPI_PARSEOP_SPECIAL 0x10
/*****************************************************************************
*
@@ -676,8 +695,8 @@ struct acpi_parse_state {
*
****************************************************************************/
-#define PCI_ROOT_HID_STRING "PNP0A03"
-#define PCI_EXPRESS_ROOT_HID_STRING "PNP0A08"
+#define PCI_ROOT_HID_STRING "PNP0A03"
+#define PCI_EXPRESS_ROOT_HID_STRING "PNP0A08"
struct acpi_bit_register_info {
u8 parent_register;
@@ -710,13 +729,14 @@ struct acpi_bit_register_info {
#define ACPI_BITMASK_PCIEXP_WAKE_STATUS 0x4000 /* ACPI 3.0 */
#define ACPI_BITMASK_WAKE_STATUS 0x8000
-#define ACPI_BITMASK_ALL_FIXED_STATUS (ACPI_BITMASK_TIMER_STATUS | \
- ACPI_BITMASK_BUS_MASTER_STATUS | \
- ACPI_BITMASK_GLOBAL_LOCK_STATUS | \
- ACPI_BITMASK_POWER_BUTTON_STATUS | \
- ACPI_BITMASK_SLEEP_BUTTON_STATUS | \
- ACPI_BITMASK_RT_CLOCK_STATUS | \
- ACPI_BITMASK_WAKE_STATUS)
+#define ACPI_BITMASK_ALL_FIXED_STATUS (\
+ ACPI_BITMASK_TIMER_STATUS | \
+ ACPI_BITMASK_BUS_MASTER_STATUS | \
+ ACPI_BITMASK_GLOBAL_LOCK_STATUS | \
+ ACPI_BITMASK_POWER_BUTTON_STATUS | \
+ ACPI_BITMASK_SLEEP_BUTTON_STATUS | \
+ ACPI_BITMASK_RT_CLOCK_STATUS | \
+ ACPI_BITMASK_WAKE_STATUS)
#define ACPI_BITMASK_TIMER_ENABLE 0x0001
#define ACPI_BITMASK_GLOBAL_LOCK_ENABLE 0x0020
@@ -820,7 +840,7 @@ struct acpi_bit_register_info {
*
****************************************************************************/
-#define ACPI_ASCII_ZERO 0x30
+#define ACPI_ASCII_ZERO 0x30
/*****************************************************************************
*
@@ -842,9 +862,9 @@ struct acpi_integrity_info {
u32 objects;
};
-#define ACPI_DB_REDIRECTABLE_OUTPUT 0x01
-#define ACPI_DB_CONSOLE_OUTPUT 0x02
-#define ACPI_DB_DUPLICATE_OUTPUT 0x03
+#define ACPI_DB_REDIRECTABLE_OUTPUT 0x01
+#define ACPI_DB_CONSOLE_OUTPUT 0x02
+#define ACPI_DB_DUPLICATE_OUTPUT 0x03
/*****************************************************************************
*
@@ -854,18 +874,18 @@ struct acpi_integrity_info {
/* Entry for a memory allocation (debug only) */
-#define ACPI_MEM_MALLOC 0
-#define ACPI_MEM_CALLOC 1
-#define ACPI_MAX_MODULE_NAME 16
+#define ACPI_MEM_MALLOC 0
+#define ACPI_MEM_CALLOC 1
+#define ACPI_MAX_MODULE_NAME 16
#define ACPI_COMMON_DEBUG_MEM_HEADER \
- struct acpi_debug_mem_block *previous; \
- struct acpi_debug_mem_block *next; \
- u32 size; \
- u32 component; \
- u32 line; \
- char module[ACPI_MAX_MODULE_NAME]; \
- u8 alloc_type;
+ struct acpi_debug_mem_block *previous; \
+ struct acpi_debug_mem_block *next; \
+ u32 size; \
+ u32 component; \
+ u32 line; \
+ char module[ACPI_MAX_MODULE_NAME]; \
+ u8 alloc_type;
struct acpi_debug_mem_header {
ACPI_COMMON_DEBUG_MEM_HEADER};
diff --git a/include/acpi/acmacros.h b/include/acpi/acmacros.h
index f2be2a88173..38f9aa4bef0 100644
--- a/include/acpi/acmacros.h
+++ b/include/acpi/acmacros.h
@@ -56,6 +56,10 @@
#define ACPI_CLEAR_BIT(target,bit) ((target) &= ~(bit))
#define ACPI_MIN(a,b) (((a)<(b))?(a):(b))
+/* Size calculation */
+
+#define ACPI_ARRAY_LENGTH(x) (sizeof(x) / sizeof((x)[0]))
+
#if ACPI_MACHINE_WIDTH == 16
/*
@@ -99,7 +103,7 @@
* printf() format helpers
*/
-/* Split 64-bit integer into two 32-bit values. Use with %8.8X%8.8X */
+/* Split 64-bit integer into two 32-bit values. Use with %8.8_x%8.8_x */
#define ACPI_FORMAT_UINT64(i) ACPI_HIDWORD(i),ACPI_LODWORD(i)
@@ -130,7 +134,6 @@
#define ACPI_TO_POINTER(i) ACPI_ADD_PTR (void,(void *) NULL,(acpi_native_uint) i)
#define ACPI_TO_INTEGER(p) ACPI_PTR_DIFF (p,(void *) NULL)
#define ACPI_OFFSET(d,f) (acpi_size) ACPI_PTR_DIFF (&(((d *)0)->f),(void *) NULL)
-#define ACPI_FADT_OFFSET(f) ACPI_OFFSET (FADT_DESCRIPTOR, f)
#if ACPI_MACHINE_WIDTH == 16
#define ACPI_STORE_POINTER(d,s) ACPI_MOVE_32_TO_32(d,s)
@@ -141,6 +144,12 @@
#define ACPI_PTR_TO_PHYSADDR(i) ACPI_TO_INTEGER(i)
#endif
+#ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED
+#define ACPI_COMPARE_NAME(a,b) (*ACPI_CAST_PTR (u32,(a)) == *ACPI_CAST_PTR (u32,(b)))
+#else
+#define ACPI_COMPARE_NAME(a,b) (!ACPI_STRNCMP (ACPI_CAST_PTR (char,(a)), ACPI_CAST_PTR (char,(b)), ACPI_NAME_SIZE))
+#endif
+
/*
* Macros for moving data around to/from buffers that are possibly unaligned.
* If the hardware supports the transfer of unaligned data, just do the store.
@@ -341,29 +350,33 @@
/*
* Rounding macros (Power of two boundaries only)
*/
-#define ACPI_ROUND_DOWN(value,boundary) (((acpi_native_uint)(value)) & \
+#define ACPI_ROUND_DOWN(value,boundary) (((acpi_native_uint)(value)) & \
(~(((acpi_native_uint) boundary)-1)))
-#define ACPI_ROUND_UP(value,boundary) ((((acpi_native_uint)(value)) + \
+#define ACPI_ROUND_UP(value,boundary) ((((acpi_native_uint)(value)) + \
(((acpi_native_uint) boundary)-1)) & \
(~(((acpi_native_uint) boundary)-1)))
-#define ACPI_ROUND_DOWN_TO_32_BITS(a) ACPI_ROUND_DOWN(a,4)
-#define ACPI_ROUND_DOWN_TO_64_BITS(a) ACPI_ROUND_DOWN(a,8)
-#define ACPI_ROUND_DOWN_TO_NATIVE_WORD(a) ACPI_ROUND_DOWN(a,ALIGNED_ADDRESS_BOUNDARY)
+/* Note: sizeof(acpi_native_uint) evaluates to either 2, 4, or 8 */
+
+#define ACPI_ROUND_DOWN_TO_32BIT(a) ACPI_ROUND_DOWN(a,4)
+#define ACPI_ROUND_DOWN_TO_64BIT(a) ACPI_ROUND_DOWN(a,8)
+#define ACPI_ROUND_DOWN_TO_NATIVE_WORD(a) ACPI_ROUND_DOWN(a,sizeof(acpi_native_uint))
-#define ACPI_ROUND_UP_to_32_bITS(a) ACPI_ROUND_UP(a,4)
-#define ACPI_ROUND_UP_to_64_bITS(a) ACPI_ROUND_UP(a,8)
-#define ACPI_ROUND_UP_TO_NATIVE_WORD(a) ACPI_ROUND_UP(a,ALIGNED_ADDRESS_BOUNDARY)
+#define ACPI_ROUND_UP_TO_32BIT(a) ACPI_ROUND_UP(a,4)
+#define ACPI_ROUND_UP_TO_64BIT(a) ACPI_ROUND_UP(a,8)
+#define ACPI_ROUND_UP_TO_NATIVE_WORD(a) ACPI_ROUND_UP(a,sizeof(acpi_native_uint))
-#define ACPI_ROUND_BITS_UP_TO_BYTES(a) ACPI_DIV_8((a) + 7)
-#define ACPI_ROUND_BITS_DOWN_TO_BYTES(a) ACPI_DIV_8((a))
+#define ACPI_ROUND_BITS_UP_TO_BYTES(a) ACPI_DIV_8((a) + 7)
+#define ACPI_ROUND_BITS_DOWN_TO_BYTES(a) ACPI_DIV_8((a))
-#define ACPI_ROUND_UP_TO_1K(a) (((a) + 1023) >> 10)
+#define ACPI_ROUND_UP_TO_1K(a) (((a) + 1023) >> 10)
/* Generic (non-power-of-two) rounding */
-#define ACPI_ROUND_UP_TO(value,boundary) (((value) + ((boundary)-1)) / (boundary))
+#define ACPI_ROUND_UP_TO(value,boundary) (((value) + ((boundary)-1)) / (boundary))
+
+#define ACPI_IS_MISALIGNED(value) (((acpi_native_uint)value) & (sizeof(acpi_native_uint)-1))
/*
* Bitmask creation
@@ -371,10 +384,10 @@
* MASK_BITS_ABOVE creates a mask starting AT the position and above
* MASK_BITS_BELOW creates a mask starting one bit BELOW the position
*/
-#define ACPI_MASK_BITS_ABOVE(position) (~((ACPI_INTEGER_MAX) << ((u32) (position))))
-#define ACPI_MASK_BITS_BELOW(position) ((ACPI_INTEGER_MAX) << ((u32) (position)))
+#define ACPI_MASK_BITS_ABOVE(position) (~((ACPI_INTEGER_MAX) << ((u32) (position))))
+#define ACPI_MASK_BITS_BELOW(position) ((ACPI_INTEGER_MAX) << ((u32) (position)))
-#define ACPI_IS_OCTAL_DIGIT(d) (((char)(d) >= '0') && ((char)(d) <= '7'))
+#define ACPI_IS_OCTAL_DIGIT(d) (((char)(d) >= '0') && ((char)(d) <= '7'))
/* Bitfields within ACPI registers */
@@ -396,8 +409,8 @@
*
* The "Descriptor" field is the first field in both structures.
*/
-#define ACPI_GET_DESCRIPTOR_TYPE(d) (((union acpi_descriptor *)(void *)(d))->descriptor_id)
-#define ACPI_SET_DESCRIPTOR_TYPE(d,t) (((union acpi_descriptor *)(void *)(d))->descriptor_id = t)
+#define ACPI_GET_DESCRIPTOR_TYPE(d) (((union acpi_descriptor *)(void *)(d))->common.descriptor_type)
+#define ACPI_SET_DESCRIPTOR_TYPE(d,t) (((union acpi_descriptor *)(void *)(d))->common.descriptor_type = t)
/* Macro to test the object type */
@@ -486,7 +499,6 @@
#define ACPI_ERROR(plist)
#define ACPI_ERROR_NAMESPACE(s,e)
#define ACPI_ERROR_METHOD(s,n,p,e)
-
#endif
/*
@@ -514,12 +526,12 @@
#define ACPI_GET_FUNCTION_NAME _acpi_function_name
/*
* The Name parameter should be the procedure name as a quoted string.
- * This is declared as a local string ("my_function_name") so that it can
+ * This is declared as a local string ("MyFunctionName") so that it can
* be also used by the function exit macros below.
* Note: (const char) is used to be compatible with the debug interfaces
* and macros such as __FUNCTION__.
*/
-#define ACPI_FUNCTION_NAME(name) const char *_acpi_function_name = name;
+#define ACPI_FUNCTION_NAME(name) const char *_acpi_function_name = #name;
#else
/* Compiler supports __FUNCTION__ (or equivalent) -- Ignore this macro */
@@ -528,13 +540,13 @@
#endif
#define ACPI_FUNCTION_TRACE(a) ACPI_FUNCTION_NAME(a) \
- acpi_ut_trace(ACPI_DEBUG_PARAMETERS)
+ acpi_ut_trace(ACPI_DEBUG_PARAMETERS)
#define ACPI_FUNCTION_TRACE_PTR(a,b) ACPI_FUNCTION_NAME(a) \
- acpi_ut_trace_ptr(ACPI_DEBUG_PARAMETERS,(void *)b)
+ acpi_ut_trace_ptr(ACPI_DEBUG_PARAMETERS,(void *)b)
#define ACPI_FUNCTION_TRACE_U32(a,b) ACPI_FUNCTION_NAME(a) \
- acpi_ut_trace_u32(ACPI_DEBUG_PARAMETERS,(u32)b)
+ acpi_ut_trace_u32(ACPI_DEBUG_PARAMETERS,(u32)b)
#define ACPI_FUNCTION_TRACE_STR(a,b) ACPI_FUNCTION_NAME(a) \
- acpi_ut_trace_str(ACPI_DEBUG_PARAMETERS,(char *)b)
+ acpi_ut_trace_str(ACPI_DEBUG_PARAMETERS,(char *)b)
#define ACPI_FUNCTION_ENTRY() acpi_ut_track_stack_ptr()
@@ -543,7 +555,7 @@
* WARNING: These macros include a return statement. This is usually considered
* bad form, but having a separate exit macro is very ugly and difficult to maintain.
* One of the FUNCTION_TRACE macros above must be used in conjunction with these macros
- * so that "_acpi_function_name" is defined.
+ * so that "_AcpiFunctionName" is defined.
*
* Note: the DO_WHILE0 macro is used to prevent some compilers from complaining
* about these constructs.
@@ -654,6 +666,7 @@
#define ACPI_DUMP_STACK_ENTRY(a)
#define ACPI_DUMP_OPERANDS(a,b,c,d,e)
#define ACPI_DUMP_ENTRY(a,b)
+#define ACPI_DUMP_TABLES(a,b)
#define ACPI_DUMP_PATHNAME(a,b,c,d)
#define ACPI_DUMP_RESOURCE_LIST(a)
#define ACPI_DUMP_BUFFER(a,b)
@@ -709,19 +722,19 @@
/* Memory allocation */
-#define ACPI_MEM_ALLOCATE(a) acpi_ut_allocate((acpi_size)(a),_COMPONENT,_acpi_module_name,__LINE__)
-#define ACPI_MEM_CALLOCATE(a) acpi_ut_callocate((acpi_size)(a), _COMPONENT,_acpi_module_name,__LINE__)
-#define ACPI_MEM_FREE(a) acpi_os_free(a)
+#define ACPI_ALLOCATE(a) acpi_ut_allocate((acpi_size)(a),_COMPONENT,_acpi_module_name,__LINE__)
+#define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed((acpi_size)(a), _COMPONENT,_acpi_module_name,__LINE__)
+#define ACPI_FREE(a) acpi_os_free(a)
#define ACPI_MEM_TRACKING(a)
#else
/* Memory allocation */
-#define ACPI_MEM_ALLOCATE(a) acpi_ut_allocate_and_track((acpi_size)(a),_COMPONENT,_acpi_module_name,__LINE__)
-#define ACPI_MEM_CALLOCATE(a) acpi_ut_callocate_and_track((acpi_size)(a), _COMPONENT,_acpi_module_name,__LINE__)
-#define ACPI_MEM_FREE(a) acpi_ut_free_and_track(a,_COMPONENT,_acpi_module_name,__LINE__)
-#define ACPI_MEM_TRACKING(a) a
+#define ACPI_ALLOCATE(a) acpi_ut_allocate_and_track((acpi_size)(a),_COMPONENT,_acpi_module_name,__LINE__)
+#define ACPI_ALLOCATE_ZEROED(a) acpi_ut_allocate_zeroed_and_track((acpi_size)(a), _COMPONENT,_acpi_module_name,__LINE__)
+#define ACPI_FREE(a) acpi_ut_free_and_track(a,_COMPONENT,_acpi_module_name,__LINE__)
+#define ACPI_MEM_TRACKING(a) a
#endif /* ACPI_DBG_TRACK_ALLOCATIONS */
diff --git a/include/acpi/acnamesp.h b/include/acpi/acnamesp.h
index b667a804fc8..83b52f9f899 100644
--- a/include/acpi/acnamesp.h
+++ b/include/acpi/acnamesp.h
@@ -63,6 +63,8 @@
#define ACPI_NS_DONT_OPEN_SCOPE 0x02
#define ACPI_NS_NO_PEER_SEARCH 0x04
#define ACPI_NS_ERROR_IF_FOUND 0x08
+#define ACPI_NS_PREFIX_IS_SCOPE 0x10
+#define ACPI_NS_EXTERNAL 0x20
#define ACPI_NS_WALK_UNLOCK TRUE
#define ACPI_NS_WALK_NO_UNLOCK FALSE
@@ -171,19 +173,17 @@ acpi_ns_dump_objects(acpi_object_type type,
/*
* nseval - Namespace evaluation functions
*/
-acpi_status acpi_ns_evaluate_by_handle(struct acpi_parameter_info *info);
-
-acpi_status
-acpi_ns_evaluate_by_name(char *pathname, struct acpi_parameter_info *info);
-
-acpi_status
-acpi_ns_evaluate_relative(char *pathname, struct acpi_parameter_info *info);
+acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info);
/*
* nsnames - Name and Scope manipulation
*/
u32 acpi_ns_opens_scope(acpi_object_type type);
+void
+acpi_ns_build_external_path(struct acpi_namespace_node *node,
+ acpi_size size, char *name_buffer);
+
char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node);
char *acpi_ns_name_of_current_scope(struct acpi_walk_state *walk_state);
@@ -196,9 +196,9 @@ u8
acpi_ns_pattern_match(struct acpi_namespace_node *obj_node, char *search_for);
acpi_status
-acpi_ns_get_node_by_path(char *external_pathname,
- struct acpi_namespace_node *in_prefix_node,
- u32 flags, struct acpi_namespace_node **out_node);
+acpi_ns_get_node(struct acpi_namespace_node *prefix_node,
+ char *external_pathname,
+ u32 flags, struct acpi_namespace_node **out_node);
acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node);
@@ -241,10 +241,10 @@ acpi_ns_search_and_enter(u32 entry_name,
u32 flags, struct acpi_namespace_node **ret_node);
acpi_status
-acpi_ns_search_node(u32 entry_name,
- struct acpi_namespace_node *node,
- acpi_object_type type,
- struct acpi_namespace_node **ret_node);
+acpi_ns_search_one_scope(u32 entry_name,
+ struct acpi_namespace_node *node,
+ acpi_object_type type,
+ struct acpi_namespace_node **ret_node);
void
acpi_ns_install_node(struct acpi_walk_state *walk_state,
diff --git a/include/acpi/acobject.h b/include/acpi/acobject.h
index d130cfed8d5..1747d94084d 100644
--- a/include/acpi/acobject.h
+++ b/include/acpi/acobject.h
@@ -1,7 +1,7 @@
/******************************************************************************
*
- * Name: acobject.h - Definition of union acpi_operand_object (Internal object only)
+ * Name: acobject.h - Definition of union acpi_operand_object (Internal object only)
*
*****************************************************************************/
@@ -45,10 +45,12 @@
#ifndef _ACOBJECT_H
#define _ACOBJECT_H
+/* acpisrc:struct_defs -- for acpisrc conversion */
+
/*
- * The union acpi_operand_object is used to pass AML operands from the dispatcher
+ * The union acpi_operand_object is used to pass AML operands from the dispatcher
* to the interpreter, and to keep track of the various handlers such as
- * address space handlers and notify handlers. The object is a constant
+ * address space handlers and notify handlers. The object is a constant
* size in order to allow it to be cached and reused.
*/
@@ -61,17 +63,25 @@
/*
* Common area for all objects.
*
- * data_type is used to differentiate between internal descriptors, and MUST
- * be the first byte in this structure.
+ * descriptor_type is used to differentiate between internal descriptors, and
+ * must be in the same place across all descriptors
+ *
+ * Note: The descriptor_type and Type fields must appear in the identical
+ * position in both the struct acpi_namespace_node and union acpi_operand_object
+ * structures.
*/
-#define ACPI_OBJECT_COMMON_HEADER /* SIZE/ALIGNMENT: 32 bits, one ptr plus trailing 8-bit flag */\
- u8 descriptor; /* To differentiate various internal objs */\
- u8 type; /* acpi_object_type */\
- u16 reference_count; /* For object deletion management */\
- union acpi_operand_object *next_object; /* Objects linked to parent NS node */\
- u8 flags;
-
-/* Values for flag byte above */
+#define ACPI_OBJECT_COMMON_HEADER \
+ union acpi_operand_object *next_object; /* Objects linked to parent NS node */\
+ u8 descriptor_type; /* To differentiate various internal objs */\
+ u8 type; /* acpi_object_type */\
+ u16 reference_count; /* For object deletion management */\
+ u8 flags;
+ /*
+ * Note: There are 3 bytes available here before the
+ * next natural alignment boundary (for both 32/64 cases)
+ */
+
+/* Values for Flag byte above */
#define AOPOBJ_AML_CONSTANT 0x01
#define AOPOBJ_STATIC_POINTER 0x02
@@ -79,36 +89,7 @@
#define AOPOBJ_OBJECT_INITIALIZED 0x08
#define AOPOBJ_SETUP_COMPLETE 0x10
#define AOPOBJ_SINGLE_DATUM 0x20
-
-/*
- * Common bitfield for the field objects
- * "Field Datum" -- a datum from the actual field object
- * "Buffer Datum" -- a datum from a user buffer, read from or to be written to the field
- */
-#define ACPI_COMMON_FIELD_INFO /* SIZE/ALIGNMENT: 24 bits + three 32-bit values */\
- u8 field_flags; /* Access, update, and lock bits */\
- u8 attribute; /* From access_as keyword */\
- u8 access_byte_width; /* Read/Write size in bytes */\
- u32 bit_length; /* Length of field in bits */\
- u32 base_byte_offset; /* Byte offset within containing object */\
- u8 start_field_bit_offset;/* Bit offset within first field datum (0-63) */\
- u8 access_bit_width; /* Read/Write size in bits (8-64) */\
- u32 value; /* Value to store into the Bank or Index register */\
- struct acpi_namespace_node *node; /* Link back to parent node */
-
-/*
- * Fields common to both Strings and Buffers
- */
-#define ACPI_COMMON_BUFFER_INFO \
- u32 length;
-
-/*
- * Common fields for objects that support ASL notifications
- */
-#define ACPI_COMMON_NOTIFY_INFO \
- union acpi_operand_object *system_notify; /* Handler for system notifies */\
- union acpi_operand_object *device_notify; /* Handler for driver notifies */\
- union acpi_operand_object *handler; /* Handler for Address space */
+#define AOPOBJ_INVALID 0x40 /* Used if host OS won't allow an op_region address */
/******************************************************************************
*
@@ -125,25 +106,31 @@ struct acpi_object_integer {
/*
* Note: The String and Buffer object must be identical through the Pointer
- * element. There is code that depends on this.
+ * and length elements. There is code that depends on this.
+ *
+ * Fields common to both Strings and Buffers
*/
+#define ACPI_COMMON_BUFFER_INFO(_type) \
+ _type *pointer; \
+ u32 length;
+
struct acpi_object_string { /* Null terminated, ASCII characters only */
- ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_BUFFER_INFO char *pointer; /* String in AML stream or allocated string */
+ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_BUFFER_INFO(char) /* String in AML stream or allocated string */
};
struct acpi_object_buffer {
- ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_BUFFER_INFO u8 * pointer; /* Buffer in AML stream or allocated buffer */
- struct acpi_namespace_node *node; /* Link back to parent node */
- u8 *aml_start;
+ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_BUFFER_INFO(u8) /* Buffer in AML stream or allocated buffer */
u32 aml_length;
+ u8 *aml_start;
+ struct acpi_namespace_node *node; /* Link back to parent node */
};
struct acpi_object_package {
- ACPI_OBJECT_COMMON_HEADER u32 count; /* # of elements in package */
- u32 aml_length;
- u8 *aml_start;
- struct acpi_namespace_node *node; /* Link back to parent node */
+ ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *node; /* Link back to parent node */
union acpi_operand_object **elements; /* Array of pointers to acpi_objects */
+ u8 *aml_start;
+ u32 aml_length;
+ u32 count; /* # of elements in package */
};
/******************************************************************************
@@ -156,23 +143,6 @@ struct acpi_object_event {
ACPI_OBJECT_COMMON_HEADER void *semaphore;
};
-#define ACPI_INFINITE_CONCURRENCY 0xFF
-
-typedef
-acpi_status(*ACPI_INTERNAL_METHOD) (struct acpi_walk_state * walk_state);
-
-struct acpi_object_method {
- ACPI_OBJECT_COMMON_HEADER u8 method_flags;
- u8 param_count;
- u32 aml_length;
- void *semaphore;
- u8 *aml_start;
- ACPI_INTERNAL_METHOD implementation;
- u8 concurrency;
- u8 thread_count;
- acpi_owner_id owner_id;
-};
-
struct acpi_object_mutex {
ACPI_OBJECT_COMMON_HEADER u8 sync_level; /* 0-15, specified in Mutex() call */
u16 acquisition_depth; /* Allow multiple Acquires, same thread */
@@ -186,11 +156,23 @@ struct acpi_object_mutex {
struct acpi_object_region {
ACPI_OBJECT_COMMON_HEADER u8 space_id;
- union acpi_operand_object *handler; /* Handler for region access */
struct acpi_namespace_node *node; /* Containing namespace node */
+ union acpi_operand_object *handler; /* Handler for region access */
union acpi_operand_object *next;
- u32 length;
acpi_physical_address address;
+ u32 length;
+};
+
+struct acpi_object_method {
+ ACPI_OBJECT_COMMON_HEADER u8 method_flags;
+ u8 param_count;
+ u8 concurrency;
+ void *semaphore;
+ u8 *aml_start;
+ ACPI_INTERNAL_METHOD implementation;
+ u32 aml_length;
+ u8 thread_count;
+ acpi_owner_id owner_id;
};
/******************************************************************************
@@ -199,6 +181,14 @@ struct acpi_object_region {
*
*****************************************************************************/
+/*
+ * Common fields for objects that support ASL notifications
+ */
+#define ACPI_COMMON_NOTIFY_INFO \
+ union acpi_operand_object *system_notify; /* Handler for system notifies */\
+ union acpi_operand_object *device_notify; /* Handler for driver notifies */\
+ union acpi_operand_object *handler; /* Handler for Address space */
+
struct acpi_object_notify_common { /* COMMON NOTIFY for POWER, PROCESSOR, DEVICE, and THERMAL */
ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO};
@@ -213,9 +203,9 @@ struct acpi_object_power_resource {
};
struct acpi_object_processor {
- ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO u32 proc_id;
- u32 length;
- acpi_io_address address;
+ ACPI_OBJECT_COMMON_HEADER u8 proc_id;
+ u8 length;
+ ACPI_COMMON_NOTIFY_INFO acpi_io_address address;
};
struct acpi_object_thermal_zone {
@@ -227,9 +217,24 @@ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO};
*
*****************************************************************************/
+/*
+ * Common bitfield for the field objects
+ * "Field Datum" -- a datum from the actual field object
+ * "Buffer Datum" -- a datum from a user buffer, read from or to be written to the field
+ */
+#define ACPI_COMMON_FIELD_INFO \
+ u8 field_flags; /* Access, update, and lock bits */\
+ u8 attribute; /* From access_as keyword */\
+ u8 access_byte_width; /* Read/Write size in bytes */\
+ struct acpi_namespace_node *node; /* Link back to parent node */\
+ u32 bit_length; /* Length of field in bits */\
+ u32 base_byte_offset; /* Byte offset within containing object */\
+ u32 value; /* Value to store into the Bank or Index register */\
+ u8 start_field_bit_offset;/* Bit offset within first field datum (0-63) */\
+ u8 access_bit_width; /* Read/Write size in bits (8-64) */
+
struct acpi_object_field_common { /* COMMON FIELD (for BUFFER, REGION, BANK, and INDEX fields) */
- ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *region_obj; /* Containing Operation Region object */
- /* (REGION/BANK fields only) */
+ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *region_obj; /* Parent Operation Region object (REGION/BANK fields only) */
};
struct acpi_object_region_field {
@@ -244,7 +249,7 @@ struct acpi_object_bank_field {
struct acpi_object_index_field {
ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO
/*
- * No "region_obj" pointer needed since the Index and Data registers
+ * No "RegionObj" pointer needed since the Index and Data registers
* are each field definitions unto themselves.
*/
union acpi_operand_object *index_obj; /* Index register */
@@ -269,13 +274,9 @@ struct acpi_object_notify_handler {
void *context;
};
-/* Flags for address handler */
-
-#define ACPI_ADDR_HANDLER_DEFAULT_INSTALLED 0x1
-
struct acpi_object_addr_handler {
ACPI_OBJECT_COMMON_HEADER u8 space_id;
- u16 hflags;
+ u8 handler_flags;
acpi_adr_space_handler handler;
struct acpi_namespace_node *node; /* Parent device */
void *context;
@@ -284,6 +285,10 @@ struct acpi_object_addr_handler {
union acpi_operand_object *next;
};
+/* Flags for address handler (handler_flags) */
+
+#define ACPI_ADDR_HANDLER_DEFAULT_INSTALLED 0x01
+
/******************************************************************************
*
* Special internal objects
@@ -297,10 +302,10 @@ struct acpi_object_addr_handler {
struct acpi_object_reference {
ACPI_OBJECT_COMMON_HEADER u8 target_type; /* Used for index_op */
u16 opcode;
- u32 offset; /* Used for arg_op, local_op, and index_op */
- void *object; /* name_op=>HANDLE to obj, index_op=>union acpi_operand_object */
+ void *object; /* name_op=>HANDLE to obj, index_op=>union acpi_operand_object */
struct acpi_namespace_node *node;
union acpi_operand_object **where;
+ u32 offset; /* Used for arg_op, local_op, and index_op */
};
/*
@@ -311,12 +316,10 @@ struct acpi_object_reference {
* Currently: Region and field_unit types
*/
struct acpi_object_extra {
- ACPI_OBJECT_COMMON_HEADER u8 byte_fill1;
- u16 word_fill1;
- u32 aml_length;
- u8 *aml_start;
- struct acpi_namespace_node *method_REG; /* _REG method for this region (if any) */
+ ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *method_REG; /* _REG method for this region (if any) */
void *region_context; /* Region-specific data */
+ u8 *aml_start;
+ u32 aml_length;
};
/* Additional data that can be attached to namespace nodes */
@@ -391,8 +394,13 @@ union acpi_operand_object {
#define ACPI_DESC_TYPE_NAMED 0x0F
#define ACPI_DESC_TYPE_MAX 0x0F
+struct acpi_common_descriptor {
+ void *common_pointer;
+ u8 descriptor_type; /* To differentiate various internal objs */
+};
+
union acpi_descriptor {
- u8 descriptor_id; /* To differentiate various internal objs */
+ struct acpi_common_descriptor common;
union acpi_operand_object object;
struct acpi_namespace_node node;
union acpi_parse_object op;
diff --git a/include/acpi/acopcode.h b/include/acpi/acopcode.h
index e6d78bd9e90..7659a46bc43 100644
--- a/include/acpi/acopcode.h
+++ b/include/acpi/acopcode.h
@@ -94,7 +94,7 @@
#define ARGP_CONCAT_RES_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
#define ARGP_COND_REF_OF_OP ARGP_LIST2 (ARGP_SUPERNAME, ARGP_SUPERNAME)
#define ARGP_CONTINUE_OP ARG_NONE
-#define ARGP_COPY_OP ARGP_LIST2 (ARGP_SUPERNAME, ARGP_SIMPLENAME)
+#define ARGP_COPY_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_SIMPLENAME)
#define ARGP_CREATE_BIT_FIELD_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_NAME)
#define ARGP_CREATE_BYTE_FIELD_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_NAME)
#define ARGP_CREATE_DWORD_FIELD_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_NAME)
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index 7785d481dc3..8d5039d0b43 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -50,7 +50,7 @@
* component basis and a per-exception-type basis.
*/
-/* Component IDs are used in the global "debug_layer" */
+/* Component IDs are used in the global "DebugLayer" */
#define ACPI_UTILITIES 0x00000001
#define ACPI_HARDWARE 0x00000002
@@ -121,7 +121,7 @@
#define ACPI_LV_INTERRUPTS 0x08000000
#define ACPI_LV_VERBOSITY3 0x0F000000 | ACPI_LV_VERBOSITY2
-/* Exceptionally verbose output -- also used in the global "debug_level" */
+/* Exceptionally verbose output -- also used in the global "DebugLevel" */
#define ACPI_LV_AML_DISASSEMBLE 0x10000000
#define ACPI_LV_VERBOSE_INFO 0x20000000
@@ -135,7 +135,7 @@
*/
#define ACPI_DEBUG_LEVEL(dl) (u32) dl,ACPI_DEBUG_PARAMETERS
-/* Exception level -- used in the global "debug_level" */
+/* Exception level -- used in the global "DebugLevel" */
#define ACPI_DB_INIT ACPI_DEBUG_LEVEL (ACPI_LV_INIT)
#define ACPI_DB_DEBUG_OBJECT ACPI_DEBUG_LEVEL (ACPI_LV_DEBUG_OBJECT)
@@ -144,13 +144,13 @@
/*
* These two levels are essentially obsolete, all instances in the
- * ACPICA core code have been replaced by REPORT_ERROR and REPORT_WARNING
+ * ACPICA core code have been replaced by ACPI_ERROR and ACPI_WARNING
* (Kept here because some drivers may still use them)
*/
#define ACPI_DB_ERROR ACPI_DEBUG_LEVEL (ACPI_LV_ERROR)
#define ACPI_DB_WARN ACPI_DEBUG_LEVEL (ACPI_LV_WARN)
-/* Trace level -- also used in the global "debug_level" */
+/* Trace level -- also used in the global "DebugLevel" */
#define ACPI_DB_INIT_NAMES ACPI_DEBUG_LEVEL (ACPI_LV_INIT_NAMES)
#define ACPI_DB_THREADS ACPI_DEBUG_LEVEL (ACPI_LV_THREADS)
diff --git a/include/acpi/acparser.h b/include/acpi/acparser.h
index 5a1ff484af3..9d49d3c41cd 100644
--- a/include/acpi/acparser.h
+++ b/include/acpi/acparser.h
@@ -46,7 +46,7 @@
#define OP_HAS_RETURN_VALUE 1
-/* variable # arguments */
+/* Variable number of arguments. This field must be 32 bits */
#define ACPI_VAR_ARGS ACPI_UINT32_MAX
@@ -71,7 +71,7 @@
/*
* psxface - Parser external interfaces
*/
-acpi_status acpi_ps_execute_method(struct acpi_parameter_info *info);
+acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info);
/*
* psargs - Parse AML opcode arguments
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 6dca3d54208..a2b3e390a50 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -26,7 +26,7 @@
#ifndef __ACPI_BUS_H__
#define __ACPI_BUS_H__
-#include <linux/kobject.h>
+#include <linux/device.h>
#include <acpi/acpi.h>
@@ -59,7 +59,7 @@ acpi_evaluate_reference(acpi_handle handle,
#define ACPI_BUS_FILE_ROOT "acpi"
extern struct proc_dir_entry *acpi_root_dir;
-extern FADT_DESCRIPTOR acpi_fadt;
+extern struct fadt_descriptor acpi_fadt;
enum acpi_bus_removal_type {
ACPI_BUS_REMOVAL_NORMAL = 0,
@@ -169,7 +169,8 @@ struct acpi_device_flags {
u32 power_manageable:1;
u32 performance_manageable:1;
u32 wake_capable:1; /* Wakeup(_PRW) supported? */
- u32 reserved:20;
+ u32 force_power_state:1;
+ u32 reserved:19;
};
/* File System */
@@ -296,6 +297,7 @@ struct acpi_device {
struct acpi_driver *driver;
void *driver_data;
struct kobject kobj;
+ struct device dev;
};
#define acpi_driver_data(d) ((d)->driver_data)
@@ -327,7 +329,7 @@ int acpi_bus_set_power(acpi_handle handle, int state);
int acpi_bus_generate_event(struct acpi_device *device, u8 type, int data);
int acpi_bus_receive_event(struct acpi_bus_event *event);
int acpi_bus_register_driver(struct acpi_driver *driver);
-int acpi_bus_unregister_driver(struct acpi_driver *driver);
+void acpi_bus_unregister_driver(struct acpi_driver *driver);
int acpi_bus_add(struct acpi_device **child, struct acpi_device *parent,
acpi_handle handle, int type);
int acpi_bus_trim(struct acpi_device *start, int rmdevice);
diff --git a/include/acpi/acpi_numa.h b/include/acpi/acpi_numa.h
new file mode 100644
index 00000000000..1049f2a0a6d
--- /dev/null
+++ b/include/acpi/acpi_numa.h
@@ -0,0 +1,23 @@
+#ifndef __ACPI_NUMA_H
+#define __ACPI_NUMA_H
+
+#ifdef CONFIG_ACPI_NUMA
+#include <linux/kernel.h>
+
+/* Proximity bitmap length */
+#if MAX_NUMNODES > 256
+#define MAX_PXM_DOMAINS MAX_NUMNODES
+#else
+#define MAX_PXM_DOMAINS (256) /* Old pxm spec is defined 8 bit */
+#endif
+
+extern int __cpuinitdata pxm_to_node_map[MAX_PXM_DOMAINS];
+extern int __cpuinitdata node_to_pxm_map[MAX_NUMNODES];
+
+extern int __cpuinit pxm_to_node(int);
+extern int __cpuinit node_to_pxm(int);
+extern int __cpuinit acpi_map_pxm_to_node(int);
+extern void __cpuinit acpi_unmap_pxm_to_node(int);
+
+#endif /* CONFIG_ACPI_NUMA */
+#endif /* __ACP_NUMA_H */
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 970e9a6372c..8f473c83b7c 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -50,12 +50,16 @@
#include "platform/acenv.h"
#include "actypes.h"
-/* Priorities for acpi_os_queue_for_execution */
+/* Types for acpi_os_execute */
-#define OSD_PRIORITY_GPE 1
-#define OSD_PRIORITY_HIGH 2
-#define OSD_PRIORITY_MED 3
-#define OSD_PRIORITY_LO 4
+typedef enum {
+ OSL_GLOBAL_LOCK_HANDLER,
+ OSL_NOTIFY_HANDLER,
+ OSL_GPE_HANDLER,
+ OSL_DEBUGGER_THREAD,
+ OSL_EC_POLL_HANDLER,
+ OSL_EC_BURST_HANDLER
+} acpi_execute_type;
#define ACPI_NO_UNIT_LIMIT ((u32) -1)
#define ACPI_MUTEX_SEM 1
@@ -161,13 +165,11 @@ acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler service_routine);
/*
* Threads and Scheduling
*/
-u32 acpi_os_get_thread_id(void);
+acpi_thread_id acpi_os_get_thread_id(void);
acpi_status
-acpi_os_queue_for_execution(u32 priority,
- acpi_osd_exec_callback function, void *context);
-
-void acpi_os_wait_events_complete(void *context);
+acpi_os_execute(acpi_execute_type type,
+ acpi_osd_exec_callback function, void *context);
void acpi_os_wait_events_complete(void *context);
@@ -214,6 +216,12 @@ acpi_os_derive_pci_id(acpi_handle rhandle,
/*
* Miscellaneous
*/
+acpi_status acpi_os_validate_interface(char *interface);
+
+acpi_status
+acpi_os_validate_address(u8 space_id,
+ acpi_physical_address address, acpi_size length);
+
u8 acpi_os_readable(void *pointer, acpi_size length);
#ifdef ACPI_FUTURE_USAGE
@@ -255,11 +263,4 @@ char *acpi_os_get_next_filename(void *dir_handle);
void acpi_os_close_directory(void *dir_handle);
-/*
- * Debug
- */
-void
-acpi_os_dbg_assert(void *failed_assertion,
- void *file_name, u32 line_number, char *message);
-
#endif /* __ACPIOSXF_H__ */
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 66cf2ecef57..049e9aa1b86 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -268,7 +268,7 @@ acpi_status acpi_remove_gpe_block(acpi_handle gpe_device);
* Resource interfaces
*/
typedef
-acpi_status(*ACPI_WALK_RESOURCE_CALLBACK) (struct acpi_resource * resource,
+acpi_status(*acpi_walk_resource_callback) (struct acpi_resource * resource,
void *context);
acpi_status
@@ -290,7 +290,7 @@ acpi_get_possible_resources(acpi_handle device_handle,
acpi_status
acpi_walk_resources(acpi_handle device_handle,
char *name,
- ACPI_WALK_RESOURCE_CALLBACK user_function, void *context);
+ acpi_walk_resource_callback user_function, void *context);
acpi_status
acpi_set_current_resources(acpi_handle device_handle,
diff --git a/include/acpi/acresrc.h b/include/acpi/acresrc.h
index fa02e808338..ad11fc13fbe 100644
--- a/include/acpi/acresrc.h
+++ b/include/acpi/acresrc.h
@@ -164,23 +164,26 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
/*
* rsutils
*/
+
acpi_status
-acpi_rs_get_prt_method_data(acpi_handle handle, struct acpi_buffer *ret_buffer);
+acpi_rs_get_prt_method_data(struct acpi_namespace_node *node,
+ struct acpi_buffer *ret_buffer);
acpi_status
-acpi_rs_get_crs_method_data(acpi_handle handle, struct acpi_buffer *ret_buffer);
+acpi_rs_get_crs_method_data(struct acpi_namespace_node *node,
+ struct acpi_buffer *ret_buffer);
-#ifdef ACPI_FUTURE_USAGE
acpi_status
-acpi_rs_get_prs_method_data(acpi_handle handle, struct acpi_buffer *ret_buffer);
-#endif /* ACPI_FUTURE_USAGE */
+acpi_rs_get_prs_method_data(struct acpi_namespace_node *node,
+ struct acpi_buffer *ret_buffer);
acpi_status
acpi_rs_get_method_data(acpi_handle handle,
char *path, struct acpi_buffer *ret_buffer);
acpi_status
-acpi_rs_set_srs_method_data(acpi_handle handle, struct acpi_buffer *ret_buffer);
+acpi_rs_set_srs_method_data(struct acpi_namespace_node *node,
+ struct acpi_buffer *ret_buffer);
/*
* rscalc
@@ -198,8 +201,9 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
acpi_size * buffer_size_needed);
acpi_status
-acpi_rs_convert_aml_to_resources(u8 * aml_buffer,
- u32 aml_buffer_length, u8 * output_buffer);
+acpi_rs_convert_aml_to_resources(u8 * aml,
+ u32 length,
+ u32 offset, u8 resource_index, void **context);
acpi_status
acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
diff --git a/include/acpi/acstruct.h b/include/acpi/acstruct.h
index d8c1c2cdac0..5e8095f0f78 100644
--- a/include/acpi/acstruct.h
+++ b/include/acpi/acstruct.h
@@ -44,6 +44,8 @@
#ifndef __ACSTRUCT_H__
#define __ACSTRUCT_H__
+/* acpisrc:struct_defs -- for acpisrc conversion */
+
/*****************************************************************************
*
* Tree walking typedefs and structs
@@ -51,67 +53,76 @@
****************************************************************************/
/*
- * Walk state - current state of a parse tree walk. Used for both a leisurely stroll through
- * the tree (for whatever reason), and for control method execution.
+ * Walk state - current state of a parse tree walk. Used for both a leisurely
+ * stroll through the tree (for whatever reason), and for control method
+ * execution.
*/
#define ACPI_NEXT_OP_DOWNWARD 1
#define ACPI_NEXT_OP_UPWARD 2
+/*
+ * Groups of definitions for walk_type used for different implementations of
+ * walkers (never simultaneously) - flags for interpreter:
+ */
#define ACPI_WALK_NON_METHOD 0
-#define ACPI_WALK_METHOD 1
-#define ACPI_WALK_METHOD_RESTART 2
-#define ACPI_WALK_CONST_REQUIRED 3
-#define ACPI_WALK_CONST_OPTIONAL 4
+#define ACPI_WALK_METHOD 0x01
+#define ACPI_WALK_METHOD_RESTART 0x02
+
+/* Flags for i_aSL compiler only */
+
+#define ACPI_WALK_CONST_REQUIRED 0x10
+#define ACPI_WALK_CONST_OPTIONAL 0x20
struct acpi_walk_state {
- u8 data_type; /* To differentiate various internal objs MUST BE FIRST! */
+ struct acpi_walk_state *next; /* Next walk_state in list */
+ u8 descriptor_type; /* To differentiate various internal objs */
u8 walk_type;
- acpi_owner_id owner_id; /* Owner of objects created during the walk */
- u8 last_predicate; /* Result of last predicate */
- u8 current_result; /* */
+ u16 opcode; /* Current AML opcode */
u8 next_op_info; /* Info about next_op */
u8 num_operands; /* Stack pointer for Operands[] array */
+ acpi_owner_id owner_id; /* Owner of objects created during the walk */
+ u8 last_predicate; /* Result of last predicate */
+ u8 current_result;
u8 return_used;
- u16 opcode; /* Current AML opcode */
u8 scope_depth;
u8 pass_number; /* Parse pass during table load */
- u32 arg_count; /* push for fixed or var args */
u32 aml_offset;
u32 arg_types;
u32 method_breakpoint; /* For single stepping */
u32 user_breakpoint; /* User AML breakpoint */
u32 parse_flags;
+
+ struct acpi_parse_state parser_state; /* Current state of parser */
u32 prev_arg_types;
+ u32 arg_count; /* push for fixed or var args */
- u8 *aml_last_while;
struct acpi_namespace_node arguments[ACPI_METHOD_NUM_ARGS]; /* Control method arguments */
+ struct acpi_namespace_node local_variables[ACPI_METHOD_NUM_LOCALS]; /* Control method locals */
+ union acpi_operand_object *operands[ACPI_OBJ_NUM_OPERANDS + 1]; /* Operands passed to the interpreter (+1 for NULL terminator) */
+ union acpi_operand_object **params;
+
+ u8 *aml_last_while;
union acpi_operand_object **caller_return_desc;
union acpi_generic_state *control_state; /* List of control states (nested IFs) */
struct acpi_namespace_node *deferred_node; /* Used when executing deferred opcodes */
struct acpi_gpe_event_info *gpe_event_info; /* Info for GPE (_Lxx/_Exx methods only */
union acpi_operand_object *implicit_return_obj;
- struct acpi_namespace_node local_variables[ACPI_METHOD_NUM_LOCALS]; /* Control method locals */
struct acpi_namespace_node *method_call_node; /* Called method Node */
union acpi_parse_object *method_call_op; /* method_call Op if running a method */
union acpi_operand_object *method_desc; /* Method descriptor if running a method */
struct acpi_namespace_node *method_node; /* Method node if running a method. */
union acpi_parse_object *op; /* Current parser op */
- union acpi_operand_object *operands[ACPI_OBJ_NUM_OPERANDS + 1]; /* Operands passed to the interpreter (+1 for NULL terminator) */
const struct acpi_opcode_info *op_info; /* Info on current opcode */
union acpi_parse_object *origin; /* Start of walk [Obsolete] */
- union acpi_operand_object **params;
- struct acpi_parse_state parser_state; /* Current state of parser */
union acpi_operand_object *result_obj;
union acpi_generic_state *results; /* Stack of accumulated results */
union acpi_operand_object *return_desc; /* Return object, if any */
union acpi_generic_state *scope_info; /* Stack of nested scopes */
-
union acpi_parse_object *prev_op; /* Last op that was processed */
union acpi_parse_object *next_op; /* next op to be processed */
+ struct acpi_thread_state *thread;
acpi_parse_downwards descending_callback;
acpi_parse_upwards ascending_callback;
- struct acpi_thread_state *thread;
- struct acpi_walk_state *next; /* Next walk_state in list */
};
/* Info used by acpi_ps_init_objects */
@@ -131,32 +142,6 @@ struct acpi_init_walk_info {
struct acpi_table_desc *table_desc;
};
-/* Info used by acpi_ns_initialize_devices */
-
-struct acpi_device_walk_info {
- u16 device_count;
- u16 num_STA;
- u16 num_INI;
- struct acpi_table_desc *table_desc;
-};
-
-/* TBD: [Restructure] Merge with struct above */
-
-struct acpi_walk_info {
- u32 debug_level;
- u32 count;
- acpi_owner_id owner_id;
- u8 display_type;
-};
-
-/* Display Types */
-
-#define ACPI_DISPLAY_SUMMARY (u8) 0
-#define ACPI_DISPLAY_OBJECTS (u8) 1
-#define ACPI_DISPLAY_MASK (u8) 1
-
-#define ACPI_DISPLAY_SHORT (u8) 2
-
struct acpi_get_devices_info {
acpi_walk_callback user_function;
void *context;
@@ -189,16 +174,21 @@ union acpi_aml_operands {
} mid;
};
-/* Internal method parameter list */
-
-struct acpi_parameter_info {
- struct acpi_namespace_node *node;
+/*
+ * Structure used to pass object evaluation parameters.
+ * Purpose is to reduce CPU stack use.
+ */
+struct acpi_evaluate_info {
+ struct acpi_namespace_node *prefix_node;
+ char *pathname;
union acpi_operand_object *obj_desc;
union acpi_operand_object **parameters;
+ struct acpi_namespace_node *resolved_node;
union acpi_operand_object *return_object;
u8 pass_number;
u8 parameter_type;
u8 return_object_type;
+ u8 flags;
};
/* Types for parameter_type above */
@@ -206,4 +196,35 @@ struct acpi_parameter_info {
#define ACPI_PARAM_ARGS 0
#define ACPI_PARAM_GPE 1
+/* Values for Flags above */
+
+#define ACPI_IGNORE_RETURN_VALUE 1
+
+/* Info used by acpi_ns_initialize_devices */
+
+struct acpi_device_walk_info {
+ u16 device_count;
+ u16 num_STA;
+ u16 num_INI;
+ struct acpi_table_desc *table_desc;
+ struct acpi_evaluate_info *evaluate_info;
+};
+
+/* TBD: [Restructure] Merge with struct above */
+
+struct acpi_walk_info {
+ u32 debug_level;
+ u32 count;
+ acpi_owner_id owner_id;
+ u8 display_type;
+};
+
+/* Display Types */
+
+#define ACPI_DISPLAY_SUMMARY (u8) 0
+#define ACPI_DISPLAY_OBJECTS (u8) 1
+#define ACPI_DISPLAY_MASK (u8) 1
+
+#define ACPI_DISPLAY_SHORT (u8) 2
+
#endif
diff --git a/include/acpi/actables.h b/include/acpi/actables.h
index 30a47542e1c..4dbaf02fe52 100644
--- a/include/acpi/actables.h
+++ b/include/acpi/actables.h
@@ -136,7 +136,11 @@ acpi_status acpi_tb_is_table_installed(struct acpi_table_desc *new_table_desc);
acpi_status
acpi_tb_verify_table_checksum(struct acpi_table_header *table_header);
-u8 acpi_tb_generate_checksum(void *buffer, u32 length);
+u8 acpi_tb_sum_table(void *buffer, u32 length);
+
+u8 acpi_tb_generate_checksum(struct acpi_table_header *table);
+
+void acpi_tb_set_checksum(struct acpi_table_header *table);
acpi_status
acpi_tb_validate_table_header(struct acpi_table_header *table_header);
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index ed53f842dad..b125ceed9cb 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Name: actbl.h - Table data structures defined in ACPI specification
+ * Name: actbl.h - Basic ACPI Table Definitions
*
*****************************************************************************/
@@ -45,66 +45,45 @@
#define __ACTBL_H__
/*
- * Note about bitfields: The u8 type is used for bitfields in ACPI tables.
- * This is the only type that is even remotely portable. Anything else is not
- * portable, so do not use any other bitfield types.
- */
-
-/*
- * Values for description table header signatures
+ * Values for description table header signatures. Useful because they make
+ * it more difficult to inadvertently type in the wrong signature.
*/
-#define RSDP_NAME "RSDP"
-#define RSDP_SIG "RSD PTR " /* RSDT Pointer signature */
-#define APIC_SIG "APIC" /* Multiple APIC Description Table */
#define DSDT_SIG "DSDT" /* Differentiated System Description Table */
#define FADT_SIG "FACP" /* Fixed ACPI Description Table */
#define FACS_SIG "FACS" /* Firmware ACPI Control Structure */
#define PSDT_SIG "PSDT" /* Persistent System Description Table */
+#define RSDP_SIG "RSD PTR " /* Root System Description Pointer */
#define RSDT_SIG "RSDT" /* Root System Description Table */
#define XSDT_SIG "XSDT" /* Extended System Description Table */
#define SSDT_SIG "SSDT" /* Secondary System Description Table */
-#define SBST_SIG "SBST" /* Smart Battery Specification Table */
-#define SPIC_SIG "SPIC" /* IOSAPIC table */
-#define BOOT_SIG "BOOT" /* Boot table */
-
-#define GL_OWNED 0x02 /* Ownership of global lock is bit 1 */
+#define RSDP_NAME "RSDP"
/*
- * Common table types. The base code can remain
- * constant if the underlying tables are changed
+ * All tables and structures must be byte-packed to match the ACPI
+ * specification, since the tables are provided by the system BIOS
*/
-#define RSDT_DESCRIPTOR struct rsdt_descriptor_rev2
-#define XSDT_DESCRIPTOR struct xsdt_descriptor_rev2
-#define FACS_DESCRIPTOR struct facs_descriptor_rev2
-#define FADT_DESCRIPTOR struct fadt_descriptor_rev2
-
#pragma pack(1)
/*
- * ACPI Version-independent tables
+ * These are the ACPI tables that are directly consumed by the subsystem.
+ *
+ * The RSDP and FACS do not use the common ACPI table header. All other ACPI
+ * tables use the header.
*
- * NOTE: The tables that are specific to ACPI versions (1.0, 2.0, etc.)
- * are in separate files.
+ * Note about bitfields: The u8 type is used for bitfields in ACPI tables.
+ * This is the only type that is even remotely portable. Anything else is not
+ * portable, so do not use any other bitfield types.
*/
-struct rsdp_descriptor { /* Root System Descriptor Pointer */
- char signature[8]; /* ACPI signature, contains "RSD PTR " */
- u8 checksum; /* ACPI 1.0 checksum */
- char oem_id[6]; /* OEM identification */
- u8 revision; /* Must be (0) for ACPI 1.0 or (2) for ACPI 2.0+ */
- u32 rsdt_physical_address; /* 32-bit physical address of the RSDT */
- u32 length; /* XSDT Length in bytes, including header */
- u64 xsdt_physical_address; /* 64-bit physical address of the XSDT */
- u8 extended_checksum; /* Checksum of entire table (ACPI 2.0) */
- char reserved[3]; /* Reserved, must be zero */
-};
-struct acpi_common_facs { /* Common FACS for internal use */
- u32 *global_lock;
- u64 *firmware_waking_vector;
- u8 vector_width;
-};
+/*******************************************************************************
+ *
+ * ACPI Table Header. This common header is used by all tables except the
+ * RSDP and FACS. The define is used for direct inclusion of header into
+ * other ACPI tables
+ *
+ ******************************************************************************/
-#define ACPI_TABLE_HEADER_DEF /* ACPI common table header */ \
+#define ACPI_TABLE_HEADER_DEF \
char signature[4]; /* ASCII table signature */\
u32 length; /* Length of table in bytes, including this header */\
u8 revision; /* ACPI Specification minor version # */\
@@ -112,154 +91,239 @@ struct acpi_common_facs { /* Common FACS for internal use */
char oem_id[6]; /* ASCII OEM identification */\
char oem_table_id[8]; /* ASCII OEM table identification */\
u32 oem_revision; /* OEM revision number */\
- char asl_compiler_id [4]; /* ASCII ASL compiler vendor ID */\
+ char asl_compiler_id[4]; /* ASCII ASL compiler vendor ID */\
u32 asl_compiler_revision; /* ASL compiler version */
-struct acpi_table_header { /* ACPI common table header */
+struct acpi_table_header {
ACPI_TABLE_HEADER_DEF};
/*
- * MADT values and structures
+ * GAS - Generic Address Structure (ACPI 2.0+)
*/
+struct acpi_generic_address {
+ u8 address_space_id; /* Address space where struct or register exists */
+ u8 register_bit_width; /* Size in bits of given register */
+ u8 register_bit_offset; /* Bit offset within the register */
+ u8 access_width; /* Minimum Access size (ACPI 3.0) */
+ u64 address; /* 64-bit address of struct or register */
+};
-/* Values for MADT PCATCompat */
+/*******************************************************************************
+ *
+ * RSDP - Root System Description Pointer (Signature is "RSD PTR ")
+ *
+ ******************************************************************************/
+
+struct rsdp_descriptor {
+ char signature[8]; /* ACPI signature, contains "RSD PTR " */
+ u8 checksum; /* ACPI 1.0 checksum */
+ char oem_id[6]; /* OEM identification */
+ u8 revision; /* Must be (0) for ACPI 1.0 or (2) for ACPI 2.0+ */
+ u32 rsdt_physical_address; /* 32-bit physical address of the RSDT */
+ u32 length; /* Table length in bytes, including header (ACPI 2.0+) */
+ u64 xsdt_physical_address; /* 64-bit physical address of the XSDT (ACPI 2.0+) */
+ u8 extended_checksum; /* Checksum of entire table (ACPI 2.0+) */
+ u8 reserved[3]; /* Reserved, must be zero */
+};
-#define DUAL_PIC 0
-#define MULTIPLE_APIC 1
+#define ACPI_RSDP_REV0_SIZE 20 /* Size of original ACPI 1.0 RSDP */
-/* Master MADT */
+/*******************************************************************************
+ *
+ * RSDT/XSDT - Root System Description Tables
+ *
+ ******************************************************************************/
-struct multiple_apic_table {
- ACPI_TABLE_HEADER_DEF /* ACPI common table header */
- u32 local_apic_address; /* Physical address of local APIC */
+struct rsdt_descriptor {
+ ACPI_TABLE_HEADER_DEF u32 table_offset_entry[1]; /* Array of pointers to ACPI tables */
+};
+
+struct xsdt_descriptor {
+ ACPI_TABLE_HEADER_DEF u64 table_offset_entry[1]; /* Array of pointers to ACPI tables */
+};
+
+/*******************************************************************************
+ *
+ * FACS - Firmware ACPI Control Structure (FACS)
+ *
+ ******************************************************************************/
+
+struct facs_descriptor {
+ char signature[4]; /* ASCII table signature */
+ u32 length; /* Length of structure, in bytes */
+ u32 hardware_signature; /* Hardware configuration signature */
+ u32 firmware_waking_vector; /* 32-bit physical address of the Firmware Waking Vector */
+ u32 global_lock; /* Global Lock for shared hardware resources */
/* Flags (32 bits) */
- u8 PCATcompat:1; /* 00: System also has dual 8259s */
+ u8 S4bios_f:1; /* 00: S4BIOS support is present */
u8:7; /* 01-07: Reserved, must be zero */
u8 reserved1[3]; /* 08-31: Reserved, must be zero */
-};
-/* Values for Type in APIC_HEADER_DEF */
+ u64 xfirmware_waking_vector; /* 64-bit version of the Firmware Waking Vector (ACPI 2.0+) */
+ u8 version; /* Version of this table (ACPI 2.0+) */
+ u8 reserved[31]; /* Reserved, must be zero */
+};
-#define APIC_PROCESSOR 0
-#define APIC_IO 1
-#define APIC_XRUPT_OVERRIDE 2
-#define APIC_NMI 3
-#define APIC_LOCAL_NMI 4
-#define APIC_ADDRESS_OVERRIDE 5
-#define APIC_IO_SAPIC 6
-#define APIC_LOCAL_SAPIC 7
-#define APIC_XRUPT_SOURCE 8
-#define APIC_RESERVED 9 /* 9 and greater are reserved */
+#define ACPI_GLOCK_PENDING 0x01 /* 00: Pending global lock ownership */
+#define ACPI_GLOCK_OWNED 0x02 /* 01: Global lock is owned */
/*
- * MADT sub-structures (Follow MULTIPLE_APIC_DESCRIPTION_TABLE)
+ * Common FACS - This is a version-independent FACS structure used for internal use only
*/
-#define APIC_HEADER_DEF /* Common APIC sub-structure header */\
- u8 type; \
- u8 length;
-
-struct apic_header {
-APIC_HEADER_DEF};
-
-/* Values for MPS INTI flags */
-
-#define POLARITY_CONFORMS 0
-#define POLARITY_ACTIVE_HIGH 1
-#define POLARITY_RESERVED 2
-#define POLARITY_ACTIVE_LOW 3
-
-#define TRIGGER_CONFORMS 0
-#define TRIGGER_EDGE 1
-#define TRIGGER_RESERVED 2
-#define TRIGGER_LEVEL 3
-
-/* Common flag definitions (16 bits each) */
-
-#define MPS_INTI_FLAGS \
- u8 polarity : 2; /* 00-01: Polarity of APIC I/O input signals */\
- u8 trigger_mode : 2; /* 02-03: Trigger mode of APIC input signals */\
- u8 : 4; /* 04-07: Reserved, must be zero */\
- u8 reserved1; /* 08-15: Reserved, must be zero */
-
-#define LOCAL_APIC_FLAGS \
- u8 processor_enabled: 1; /* 00: Processor is usable if set */\
- u8 : 7; /* 01-07: Reserved, must be zero */\
- u8 reserved2; /* 08-15: Reserved, must be zero */
-
-/* Sub-structures for MADT */
-
-struct madt_processor_apic {
- APIC_HEADER_DEF u8 processor_id; /* ACPI processor id */
- u8 local_apic_id; /* Processor's local APIC id */
- LOCAL_APIC_FLAGS};
-
-struct madt_io_apic {
- APIC_HEADER_DEF u8 io_apic_id; /* I/O APIC ID */
- u8 reserved; /* Reserved - must be zero */
- u32 address; /* APIC physical address */
- u32 interrupt; /* Global system interrupt where INTI
- * lines start */
+struct acpi_common_facs {
+ u32 *global_lock;
+ u64 *firmware_waking_vector;
+ u8 vector_width;
};
-struct madt_interrupt_override {
- APIC_HEADER_DEF u8 bus; /* 0 - ISA */
- u8 source; /* Interrupt source (IRQ) */
- u32 interrupt; /* Global system interrupt */
- MPS_INTI_FLAGS};
+/*******************************************************************************
+ *
+ * FADT - Fixed ACPI Description Table (Signature "FACP")
+ *
+ ******************************************************************************/
+
+/* Fields common to all versions of the FADT */
+
+#define ACPI_FADT_COMMON \
+ ACPI_TABLE_HEADER_DEF \
+ u32 V1_firmware_ctrl; /* 32-bit physical address of FACS */ \
+ u32 V1_dsdt; /* 32-bit physical address of DSDT */ \
+ u8 reserved1; /* System Interrupt Model isn't used in ACPI 2.0*/ \
+ u8 prefer_PM_profile; /* Conveys preferred power management profile to OSPM. */ \
+ u16 sci_int; /* System vector of SCI interrupt */ \
+ u32 smi_cmd; /* Port address of SMI command port */ \
+ u8 acpi_enable; /* Value to write to smi_cmd to enable ACPI */ \
+ u8 acpi_disable; /* Value to write to smi_cmd to disable ACPI */ \
+ u8 S4bios_req; /* Value to write to SMI CMD to enter S4BIOS state */ \
+ u8 pstate_cnt; /* Processor performance state control*/ \
+ u32 V1_pm1a_evt_blk; /* Port address of Power Mgt 1a Event Reg Blk */ \
+ u32 V1_pm1b_evt_blk; /* Port address of Power Mgt 1b Event Reg Blk */ \
+ u32 V1_pm1a_cnt_blk; /* Port address of Power Mgt 1a Control Reg Blk */ \
+ u32 V1_pm1b_cnt_blk; /* Port address of Power Mgt 1b Control Reg Blk */ \
+ u32 V1_pm2_cnt_blk; /* Port address of Power Mgt 2 Control Reg Blk */ \
+ u32 V1_pm_tmr_blk; /* Port address of Power Mgt Timer Ctrl Reg Blk */ \
+ u32 V1_gpe0_blk; /* Port addr of General Purpose acpi_event 0 Reg Blk */ \
+ u32 V1_gpe1_blk; /* Port addr of General Purpose acpi_event 1 Reg Blk */ \
+ u8 pm1_evt_len; /* Byte Length of ports at pm1_x_evt_blk */ \
+ u8 pm1_cnt_len; /* Byte Length of ports at pm1_x_cnt_blk */ \
+ u8 pm2_cnt_len; /* Byte Length of ports at pm2_cnt_blk */ \
+ u8 pm_tm_len; /* Byte Length of ports at pm_tm_blk */ \
+ u8 gpe0_blk_len; /* Byte Length of ports at gpe0_blk */ \
+ u8 gpe1_blk_len; /* Byte Length of ports at gpe1_blk */ \
+ u8 gpe1_base; /* Offset in gpe model where gpe1 events start */ \
+ u8 cst_cnt; /* Support for the _CST object and C States change notification.*/ \
+ u16 plvl2_lat; /* Worst case HW latency to enter/exit C2 state */ \
+ u16 plvl3_lat; /* Worst case HW latency to enter/exit C3 state */ \
+ u16 flush_size; /* Processor's memory cache line width, in bytes */ \
+ u16 flush_stride; /* Number of flush strides that need to be read */ \
+ u8 duty_offset; /* Processor's duty cycle index in processor's P_CNT reg*/ \
+ u8 duty_width; /* Processor's duty cycle value bit width in P_CNT register.*/ \
+ u8 day_alrm; /* Index to day-of-month alarm in RTC CMOS RAM */ \
+ u8 mon_alrm; /* Index to month-of-year alarm in RTC CMOS RAM */ \
+ u8 century; /* Index to century in RTC CMOS RAM */ \
+ u16 iapc_boot_arch; /* IA-PC Boot Architecture Flags. See Table 5-10 for description*/ \
+ u8 reserved2; /* Reserved, must be zero */
-struct madt_nmi_source {
- APIC_HEADER_DEF MPS_INTI_FLAGS u32 interrupt; /* Global system interrupt */
+/*
+ * ACPI 2.0+ FADT
+ */
+struct fadt_descriptor {
+ ACPI_FADT_COMMON
+ /* Flags (32 bits) */
+ u8 wb_invd:1; /* 00: The wbinvd instruction works properly */
+ u8 wb_invd_flush:1; /* 01: The wbinvd flushes but does not invalidate */
+ u8 proc_c1:1; /* 02: All processors support C1 state */
+ u8 plvl2_up:1; /* 03: C2 state works on MP system */
+ u8 pwr_button:1; /* 04: Power button is handled as a generic feature */
+ u8 sleep_button:1; /* 05: Sleep button is handled as a generic feature, or not present */
+ u8 fixed_rTC:1; /* 06: RTC wakeup stat not in fixed register space */
+ u8 rtcs4:1; /* 07: RTC wakeup stat not possible from S4 */
+ u8 tmr_val_ext:1; /* 08: tmr_val is 32 bits 0=24-bits */
+ u8 dock_cap:1; /* 09: Docking supported */
+ u8 reset_reg_sup:1; /* 10: System reset via the FADT RESET_REG supported */
+ u8 sealed_case:1; /* 11: No internal expansion capabilities and case is sealed */
+ u8 headless:1; /* 12: No local video capabilities or local input devices */
+ u8 cpu_sw_sleep:1; /* 13: Must execute native instruction after writing SLP_TYPx register */
+
+ u8 pci_exp_wak:1; /* 14: System supports PCIEXP_WAKE (STS/EN) bits (ACPI 3.0) */
+ u8 use_platform_clock:1; /* 15: OSPM should use platform-provided timer (ACPI 3.0) */
+ u8 S4rtc_sts_valid:1; /* 16: Contents of RTC_STS valid after S4 wake (ACPI 3.0) */
+ u8 remote_power_on_capable:1; /* 17: System is compatible with remote power on (ACPI 3.0) */
+ u8 force_apic_cluster_model:1; /* 18: All local APICs must use cluster model (ACPI 3.0) */
+ u8 force_apic_physical_destination_mode:1; /* 19: All local x_aPICs must use physical dest mode (ACPI 3.0) */
+ u8:4; /* 20-23: Reserved, must be zero */
+ u8 reserved3; /* 24-31: Reserved, must be zero */
+
+ struct acpi_generic_address reset_register; /* Reset register address in GAS format */
+ u8 reset_value; /* Value to write to the reset_register port to reset the system */
+ u8 reserved4[3]; /* These three bytes must be zero */
+ u64 xfirmware_ctrl; /* 64-bit physical address of FACS */
+ u64 Xdsdt; /* 64-bit physical address of DSDT */
+ struct acpi_generic_address xpm1a_evt_blk; /* Extended Power Mgt 1a acpi_event Reg Blk address */
+ struct acpi_generic_address xpm1b_evt_blk; /* Extended Power Mgt 1b acpi_event Reg Blk address */
+ struct acpi_generic_address xpm1a_cnt_blk; /* Extended Power Mgt 1a Control Reg Blk address */
+ struct acpi_generic_address xpm1b_cnt_blk; /* Extended Power Mgt 1b Control Reg Blk address */
+ struct acpi_generic_address xpm2_cnt_blk; /* Extended Power Mgt 2 Control Reg Blk address */
+ struct acpi_generic_address xpm_tmr_blk; /* Extended Power Mgt Timer Ctrl Reg Blk address */
+ struct acpi_generic_address xgpe0_blk; /* Extended General Purpose acpi_event 0 Reg Blk address */
+ struct acpi_generic_address xgpe1_blk; /* Extended General Purpose acpi_event 1 Reg Blk address */
};
-struct madt_local_apic_nmi {
- APIC_HEADER_DEF u8 processor_id; /* ACPI processor id */
- MPS_INTI_FLAGS u8 lint; /* LINTn to which NMI is connected */
+/*
+ * "Down-revved" ACPI 2.0 FADT descriptor
+ * Defined here to allow compiler to generate the length of the struct
+ */
+struct fadt_descriptor_rev2_minus {
+ ACPI_FADT_COMMON u32 flags;
+ struct acpi_generic_address reset_register; /* Reset register address in GAS format */
+ u8 reset_value; /* Value to write to the reset_register port to reset the system. */
+ u8 reserved7[3]; /* Reserved, must be zero */
};
-struct madt_address_override {
- APIC_HEADER_DEF u16 reserved; /* Reserved, must be zero */
- u64 address; /* APIC physical address */
+/*
+ * ACPI 1.0 FADT
+ * Defined here to allow compiler to generate the length of the struct
+ */
+struct fadt_descriptor_rev1 {
+ ACPI_FADT_COMMON u32 flags;
};
-struct madt_io_sapic {
- APIC_HEADER_DEF u8 io_sapic_id; /* I/O SAPIC ID */
- u8 reserved; /* Reserved, must be zero */
- u32 interrupt_base; /* Glocal interrupt for SAPIC start */
- u64 address; /* SAPIC physical address */
-};
+/* FADT: Prefered Power Management Profiles */
-struct madt_local_sapic {
- APIC_HEADER_DEF u8 processor_id; /* ACPI processor id */
- u8 local_sapic_id; /* SAPIC ID */
- u8 local_sapic_eid; /* SAPIC EID */
- u8 reserved[3]; /* Reserved, must be zero */
- LOCAL_APIC_FLAGS u32 processor_uID; /* Numeric UID - ACPI 3.0 */
- char processor_uIDstring[1]; /* String UID - ACPI 3.0 */
-};
+#define PM_UNSPECIFIED 0
+#define PM_DESKTOP 1
+#define PM_MOBILE 2
+#define PM_WORKSTATION 3
+#define PM_ENTERPRISE_SERVER 4
+#define PM_SOHO_SERVER 5
+#define PM_APPLIANCE_PC 6
-struct madt_interrupt_source {
- APIC_HEADER_DEF MPS_INTI_FLAGS u8 interrupt_type; /* 1=PMI, 2=INIT, 3=corrected */
- u8 processor_id; /* Processor ID */
- u8 processor_eid; /* Processor EID */
- u8 io_sapic_vector; /* Vector value for PMI interrupts */
- u32 interrupt; /* Global system interrupt */
- u32 flags; /* Interrupt Source Flags */
-};
+/* FADT: Boot Arch Flags */
-/*
- * Smart Battery
- */
-struct smart_battery_table {
- ACPI_TABLE_HEADER_DEF u32 warning_level;
- u32 low_level;
- u32 critical_level;
-};
+#define BAF_LEGACY_DEVICES 0x0001
+#define BAF_8042_KEYBOARD_CONTROLLER 0x0002
+
+#define FADT2_REVISION_ID 3
+#define FADT2_MINUS_REVISION_ID 2
+
+/* Reset to default packing */
#pragma pack()
/*
+ * This macro is temporary until the table bitfield flag definitions
+ * are removed and replaced by a Flags field.
+ */
+#define ACPI_FLAG_OFFSET(d,f,o) (u8) (ACPI_OFFSET (d,f) + \
+ sizeof(((d *)0)->f) + o)
+/*
+ * Get the remaining ACPI tables
+ */
+#include "actbl1.h"
+
+/*
* ACPI Table information. We save the table address, length,
* and type of memory allocation (mapped or allocated) for each
* table for 1) when we exit, and 2) if a new table is installed
@@ -290,27 +354,17 @@ struct acpi_table_support {
u8 flags;
};
-/*
- * Get the ACPI version-specific tables
- */
-#include "actbl1.h" /* Acpi 1.0 table definitions */
-#include "actbl2.h" /* Acpi 2.0 table definitions */
-
extern u8 acpi_fadt_is_v1; /* is set to 1 if FADT is revision 1,
* needed for certain workarounds */
+/* Macros used to generate offsets to specific table fields */
-#pragma pack(1)
-/*
- * High performance timer
- */
-struct hpet_table {
- ACPI_TABLE_HEADER_DEF u32 hardware_id;
- struct acpi_generic_address base_address;
- u8 hpet_number;
- u16 clock_tick;
- u8 attributes;
-};
+#define ACPI_FACS_OFFSET(f) (u8) ACPI_OFFSET (struct facs_descriptor,f)
+#define ACPI_FADT_OFFSET(f) (u8) ACPI_OFFSET (struct fadt_descriptor, f)
+#define ACPI_GAS_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_generic_address,f)
+#define ACPI_HDR_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_header,f)
+#define ACPI_RSDP_OFFSET(f) (u8) ACPI_OFFSET (struct rsdp_descriptor,f)
-#pragma pack()
+#define ACPI_FADT_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct fadt_descriptor,f,o)
+#define ACPI_FACS_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct facs_descriptor,f,o)
#endif /* __ACTBL_H__ */
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index cd428d57add..745a6445a4f 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Name: actbl1.h - ACPI 1.0 tables
+ * Name: actbl1.h - Additional ACPI table definitions
*
*****************************************************************************/
@@ -44,92 +44,599 @@
#ifndef __ACTBL1_H__
#define __ACTBL1_H__
+/*******************************************************************************
+ *
+ * Additional ACPI Tables
+ *
+ * These tables are not consumed directly by the ACPICA subsystem, but are
+ * included here to support device drivers and the AML disassembler.
+ *
+ ******************************************************************************/
+
+/*
+ * Values for description table header signatures. Useful because they make
+ * it more difficult to inadvertently type in the wrong signature.
+ */
+#define ACPI_SIG_ASF "ASF!" /* Alert Standard Format table */
+#define ACPI_SIG_BOOT "BOOT" /* Simple Boot Flag Table */
+#define ACPI_SIG_CPEP "CPEP" /* Corrected Platform Error Polling table */
+#define ACPI_SIG_DBGP "DBGP" /* Debug Port table */
+#define ACPI_SIG_ECDT "ECDT" /* Embedded Controller Boot Resources Table */
+#define ACPI_SIG_HPET "HPET" /* High Precision Event Timer table */
+#define ACPI_SIG_MADT "APIC" /* Multiple APIC Description Table */
+#define ACPI_SIG_MCFG "MCFG" /* PCI Memory Mapped Configuration table */
+#define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */
+#define ACPI_SIG_SLIT "SLIT" /* System Locality Distance Information Table */
+#define ACPI_SIG_SPCR "SPCR" /* Serial Port Console Redirection table */
+#define ACPI_SIG_SPMI "SPMI" /* Server Platform Management Interface table */
+#define ACPI_SIG_SRAT "SRAT" /* System Resource Affinity Table */
+#define ACPI_SIG_TCPA "TCPA" /* Trusted Computing Platform Alliance table */
+#define ACPI_SIG_WDRT "WDRT" /* Watchdog Resource Table */
+
+/* Legacy names */
+
+#define APIC_SIG "APIC" /* Multiple APIC Description Table */
+#define BOOT_SIG "BOOT" /* Simple Boot Flag Table */
+#define SBST_SIG "SBST" /* Smart Battery Specification Table */
+
+/*
+ * All tables must be byte-packed to match the ACPI specification, since
+ * the tables are provided by the system BIOS.
+ */
#pragma pack(1)
/*
- * ACPI 1.0 Root System Description Table (RSDT)
+ * Note about bitfields: The u8 type is used for bitfields in ACPI tables.
+ * This is the only type that is even remotely portable. Anything else is not
+ * portable, so do not use any other bitfield types.
*/
-struct rsdt_descriptor_rev1 {
- ACPI_TABLE_HEADER_DEF /* ACPI common table header */
- u32 table_offset_entry[1]; /* Array of pointers to ACPI tables */
-};
+
+/*******************************************************************************
+ *
+ * ASF - Alert Standard Format table (Signature "ASF!")
+ *
+ ******************************************************************************/
+
+struct acpi_table_asf {
+ACPI_TABLE_HEADER_DEF};
+
+#define ACPI_ASF_HEADER_DEF \
+ u8 type; \
+ u8 reserved; \
+ u16 length;
+
+struct acpi_asf_header {
+ACPI_ASF_HEADER_DEF};
+
+/* Values for Type field */
+
+#define ASF_INFO 0
+#define ASF_ALERT 1
+#define ASF_CONTROL 2
+#define ASF_BOOT 3
+#define ASF_ADDRESS 4
+#define ASF_RESERVED 5
/*
- * ACPI 1.0 Firmware ACPI Control Structure (FACS)
+ * ASF subtables
*/
-struct facs_descriptor_rev1 {
- char signature[4]; /* ASCII table signature */
- u32 length; /* Length of structure in bytes */
- u32 hardware_signature; /* Hardware configuration signature */
- u32 firmware_waking_vector; /* ACPI OS waking vector */
- u32 global_lock; /* Global Lock */
+
+/* 0: ASF Information */
+
+struct acpi_asf_info {
+ ACPI_ASF_HEADER_DEF u8 min_reset_value;
+ u8 min_poll_interval;
+ u16 system_id;
+ u32 mfg_id;
+ u8 flags;
+ u8 reserved2[3];
+};
+
+/* 1: ASF Alerts */
+
+struct acpi_asf_alert {
+ ACPI_ASF_HEADER_DEF u8 assert_mask;
+ u8 deassert_mask;
+ u8 alerts;
+ u8 data_length;
+ u8 array[1];
+};
+
+/* 2: ASF Remote Control */
+
+struct acpi_asf_remote {
+ ACPI_ASF_HEADER_DEF u8 controls;
+ u8 data_length;
+ u16 reserved2;
+ u8 array[1];
+};
+
+/* 3: ASF RMCP Boot Options */
+
+struct acpi_asf_rmcp {
+ ACPI_ASF_HEADER_DEF u8 capabilities[7];
+ u8 completion_code;
+ u32 enterprise_id;
+ u8 command;
+ u16 parameter;
+ u16 boot_options;
+ u16 oem_parameters;
+};
+
+/* 4: ASF Address */
+
+struct acpi_asf_address {
+ ACPI_ASF_HEADER_DEF u8 eprom_address;
+ u8 devices;
+ u8 smbus_addresses[1];
+};
+
+/*******************************************************************************
+ *
+ * BOOT - Simple Boot Flag Table
+ *
+ ******************************************************************************/
+
+struct acpi_table_boot {
+ ACPI_TABLE_HEADER_DEF u8 cmos_index; /* Index in CMOS RAM for the boot register */
+ u8 reserved[3];
+};
+
+/*******************************************************************************
+ *
+ * CPEP - Corrected Platform Error Polling table
+ *
+ ******************************************************************************/
+
+struct acpi_table_cpep {
+ ACPI_TABLE_HEADER_DEF u64 reserved;
+};
+
+/* Subtable */
+
+struct acpi_cpep_polling {
+ u8 type;
+ u8 length;
+ u8 processor_id; /* Processor ID */
+ u8 processor_eid; /* Processor EID */
+ u32 polling_interval; /* Polling interval (msec) */
+};
+
+/*******************************************************************************
+ *
+ * DBGP - Debug Port table
+ *
+ ******************************************************************************/
+
+struct acpi_table_dbgp {
+ ACPI_TABLE_HEADER_DEF u8 interface_type; /* 0=full 16550, 1=subset of 16550 */
+ u8 reserved[3];
+ struct acpi_generic_address debug_port;
+};
+
+/*******************************************************************************
+ *
+ * ECDT - Embedded Controller Boot Resources Table
+ *
+ ******************************************************************************/
+
+struct ec_boot_resources {
+ ACPI_TABLE_HEADER_DEF struct acpi_generic_address ec_control; /* Address of EC command/status register */
+ struct acpi_generic_address ec_data; /* Address of EC data register */
+ u32 uid; /* Unique ID - must be same as the EC _UID method */
+ u8 gpe_bit; /* The GPE for the EC */
+ u8 ec_id[1]; /* Full namepath of the EC in the ACPI namespace */
+};
+
+/*******************************************************************************
+ *
+ * HPET - High Precision Event Timer table
+ *
+ ******************************************************************************/
+
+struct acpi_hpet_table {
+ ACPI_TABLE_HEADER_DEF u32 hardware_id; /* Hardware ID of event timer block */
+ struct acpi_generic_address base_address; /* Address of event timer block */
+ u8 hpet_number; /* HPET sequence number */
+ u16 clock_tick; /* Main counter min tick, periodic mode */
+ u8 attributes;
+};
+
+#if 0 /* HPET flags to be converted to macros */
+struct { /* Flags (8 bits) */
+ u8 page_protect:1; /* 00: No page protection */
+ u8 page_protect4:1; /* 01: 4_kB page protected */
+ u8 page_protect64:1; /* 02: 64_kB page protected */
+ u8:5; /* 03-07: Reserved, must be zero */
+} flags;
+#endif
+
+/*******************************************************************************
+ *
+ * MADT - Multiple APIC Description Table
+ *
+ ******************************************************************************/
+
+struct multiple_apic_table {
+ ACPI_TABLE_HEADER_DEF u32 local_apic_address; /* Physical address of local APIC */
/* Flags (32 bits) */
- u8 S4bios_f:1; /* 00: S4BIOS support is present */
+ u8 PCATcompat:1; /* 00: System also has dual 8259s */
u8:7; /* 01-07: Reserved, must be zero */
u8 reserved1[3]; /* 08-31: Reserved, must be zero */
-
- u8 reserved2[40]; /* Reserved, must be zero */
};
+/* Values for MADT PCATCompat */
+
+#define DUAL_PIC 0
+#define MULTIPLE_APIC 1
+
+/* Common MADT Sub-table header */
+
+#define APIC_HEADER_DEF \
+ u8 type; \
+ u8 length;
+
+struct apic_header {
+APIC_HEADER_DEF};
+
+/* Values for Type in struct apic_header */
+
+#define APIC_PROCESSOR 0
+#define APIC_IO 1
+#define APIC_XRUPT_OVERRIDE 2
+#define APIC_NMI 3
+#define APIC_LOCAL_NMI 4
+#define APIC_ADDRESS_OVERRIDE 5
+#define APIC_IO_SAPIC 6
+#define APIC_LOCAL_SAPIC 7
+#define APIC_XRUPT_SOURCE 8
+#define APIC_RESERVED 9 /* 9 and greater are reserved */
+
+/* Flag definitions for MADT sub-tables */
+
+#define ACPI_MADT_IFLAGS /* INTI flags (16 bits) */ \
+ u8 polarity : 2; /* 00-01: Polarity of APIC I/O input signals */\
+ u8 trigger_mode : 2; /* 02-03: Trigger mode of APIC input signals */\
+ u8 : 4; /* 04-07: Reserved, must be zero */\
+ u8 reserved1; /* 08-15: Reserved, must be zero */
+
+#define ACPI_MADT_LFLAGS /* Local Sapic flags (32 bits) */ \
+ u8 processor_enabled: 1; /* 00: Processor is usable if set */\
+ u8 : 7; /* 01-07: Reserved, must be zero */\
+ u8 reserved2[3]; /* 08-31: Reserved, must be zero */
+
+/* Values for MPS INTI flags */
+
+#define POLARITY_CONFORMS 0
+#define POLARITY_ACTIVE_HIGH 1
+#define POLARITY_RESERVED 2
+#define POLARITY_ACTIVE_LOW 3
+
+#define TRIGGER_CONFORMS 0
+#define TRIGGER_EDGE 1
+#define TRIGGER_RESERVED 2
+#define TRIGGER_LEVEL 3
+
/*
- * ACPI 1.0 Fixed ACPI Description Table (FADT)
+ * MADT Sub-tables, correspond to Type in struct apic_header
*/
-struct fadt_descriptor_rev1 {
- ACPI_TABLE_HEADER_DEF /* ACPI common table header */
- u32 firmware_ctrl; /* Physical address of FACS */
- u32 dsdt; /* Physical address of DSDT */
- u8 model; /* System Interrupt Model */
- u8 reserved1; /* Reserved, must be zero */
- u16 sci_int; /* System vector of SCI interrupt */
- u32 smi_cmd; /* Port address of SMI command port */
- u8 acpi_enable; /* Value to write to smi_cmd to enable ACPI */
- u8 acpi_disable; /* Value to write to smi_cmd to disable ACPI */
- u8 S4bios_req; /* Value to write to SMI CMD to enter S4BIOS state */
- u8 reserved2; /* Reserved, must be zero */
- u32 pm1a_evt_blk; /* Port address of Power Mgt 1a acpi_event Reg Blk */
- u32 pm1b_evt_blk; /* Port address of Power Mgt 1b acpi_event Reg Blk */
- u32 pm1a_cnt_blk; /* Port address of Power Mgt 1a Control Reg Blk */
- u32 pm1b_cnt_blk; /* Port address of Power Mgt 1b Control Reg Blk */
- u32 pm2_cnt_blk; /* Port address of Power Mgt 2 Control Reg Blk */
- u32 pm_tmr_blk; /* Port address of Power Mgt Timer Ctrl Reg Blk */
- u32 gpe0_blk; /* Port addr of General Purpose acpi_event 0 Reg Blk */
- u32 gpe1_blk; /* Port addr of General Purpose acpi_event 1 Reg Blk */
- u8 pm1_evt_len; /* Byte length of ports at pm1_x_evt_blk */
- u8 pm1_cnt_len; /* Byte length of ports at pm1_x_cnt_blk */
- u8 pm2_cnt_len; /* Byte Length of ports at pm2_cnt_blk */
- u8 pm_tm_len; /* Byte Length of ports at pm_tm_blk */
- u8 gpe0_blk_len; /* Byte Length of ports at gpe0_blk */
- u8 gpe1_blk_len; /* Byte Length of ports at gpe1_blk */
- u8 gpe1_base; /* Offset in gpe model where gpe1 events start */
- u8 reserved3; /* Reserved, must be zero */
- u16 plvl2_lat; /* Worst case HW latency to enter/exit C2 state */
- u16 plvl3_lat; /* Worst case HW latency to enter/exit C3 state */
- u16 flush_size; /* Size of area read to flush caches */
- u16 flush_stride; /* Stride used in flushing caches */
- u8 duty_offset; /* Bit location of duty cycle field in p_cnt reg */
- u8 duty_width; /* Bit width of duty cycle field in p_cnt reg */
- u8 day_alrm; /* Index to day-of-month alarm in RTC CMOS RAM */
- u8 mon_alrm; /* Index to month-of-year alarm in RTC CMOS RAM */
- u8 century; /* Index to century in RTC CMOS RAM */
- u8 reserved4[3]; /* Reserved, must be zero */
+
+/* 0: processor APIC */
+
+struct madt_processor_apic {
+ APIC_HEADER_DEF u8 processor_id; /* ACPI processor id */
+ u8 local_apic_id; /* Processor's local APIC id */
+ ACPI_MADT_LFLAGS};
+
+/* 1: IO APIC */
+
+struct madt_io_apic {
+ APIC_HEADER_DEF u8 io_apic_id; /* I/O APIC ID */
+ u8 reserved; /* Reserved - must be zero */
+ u32 address; /* APIC physical address */
+ u32 interrupt; /* Global system interrupt where INTI lines start */
+};
+
+/* 2: Interrupt Override */
+
+struct madt_interrupt_override {
+ APIC_HEADER_DEF u8 bus; /* 0 - ISA */
+ u8 source; /* Interrupt source (IRQ) */
+ u32 interrupt; /* Global system interrupt */
+ ACPI_MADT_IFLAGS};
+
+/* 3: NMI Sources */
+
+struct madt_nmi_source {
+ APIC_HEADER_DEF ACPI_MADT_IFLAGS u32 interrupt; /* Global system interrupt */
+};
+
+/* 4: Local APIC NMI */
+
+struct madt_local_apic_nmi {
+ APIC_HEADER_DEF u8 processor_id; /* ACPI processor id */
+ ACPI_MADT_IFLAGS u8 lint; /* LINTn to which NMI is connected */
+};
+
+/* 5: Address Override */
+
+struct madt_address_override {
+ APIC_HEADER_DEF u16 reserved; /* Reserved, must be zero */
+ u64 address; /* APIC physical address */
+};
+
+/* 6: I/O Sapic */
+
+struct madt_io_sapic {
+ APIC_HEADER_DEF u8 io_sapic_id; /* I/O SAPIC ID */
+ u8 reserved; /* Reserved, must be zero */
+ u32 interrupt_base; /* Glocal interrupt for SAPIC start */
+ u64 address; /* SAPIC physical address */
+};
+
+/* 7: Local Sapic */
+
+struct madt_local_sapic {
+ APIC_HEADER_DEF u8 processor_id; /* ACPI processor id */
+ u8 local_sapic_id; /* SAPIC ID */
+ u8 local_sapic_eid; /* SAPIC EID */
+ u8 reserved[3]; /* Reserved, must be zero */
+ ACPI_MADT_LFLAGS u32 processor_uID; /* Numeric UID - ACPI 3.0 */
+ char processor_uIDstring[1]; /* String UID - ACPI 3.0 */
+};
+
+/* 8: Platform Interrupt Source */
+
+struct madt_interrupt_source {
+ APIC_HEADER_DEF ACPI_MADT_IFLAGS u8 interrupt_type; /* 1=PMI, 2=INIT, 3=corrected */
+ u8 processor_id; /* Processor ID */
+ u8 processor_eid; /* Processor EID */
+ u8 io_sapic_vector; /* Vector value for PMI interrupts */
+ u32 interrupt; /* Global system interrupt */
+ u32 flags; /* Interrupt Source Flags */
+};
+
+#ifdef DUPLICATE_DEFINITION_WITH_LINUX_ACPI_H
+/*******************************************************************************
+ *
+ * MCFG - PCI Memory Mapped Configuration table and sub-table
+ *
+ ******************************************************************************/
+
+struct acpi_table_mcfg {
+ ACPI_TABLE_HEADER_DEF u8 reserved[8];
+};
+
+struct acpi_mcfg_allocation {
+ u64 base_address; /* Base address, processor-relative */
+ u16 pci_segment; /* PCI segment group number */
+ u8 start_bus_number; /* Starting PCI Bus number */
+ u8 end_bus_number; /* Final PCI Bus number */
+ u32 reserved;
+};
+#endif
+
+/*******************************************************************************
+ *
+ * SBST - Smart Battery Specification Table
+ *
+ ******************************************************************************/
+
+struct smart_battery_table {
+ ACPI_TABLE_HEADER_DEF u32 warning_level;
+ u32 low_level;
+ u32 critical_level;
+};
+
+/*******************************************************************************
+ *
+ * SLIT - System Locality Distance Information Table
+ *
+ ******************************************************************************/
+
+struct system_locality_info {
+ ACPI_TABLE_HEADER_DEF u64 locality_count;
+ u8 entry[1][1];
+};
+
+/*******************************************************************************
+ *
+ * SPCR - Serial Port Console Redirection table
+ *
+ ******************************************************************************/
+
+struct acpi_table_spcr {
+ ACPI_TABLE_HEADER_DEF u8 interface_type; /* 0=full 16550, 1=subset of 16550 */
+ u8 reserved[3];
+ struct acpi_generic_address serial_port;
+ u8 interrupt_type;
+ u8 pc_interrupt;
+ u32 interrupt;
+ u8 baud_rate;
+ u8 parity;
+ u8 stop_bits;
+ u8 flow_control;
+ u8 terminal_type;
+ u8 reserved2;
+ u16 pci_device_id;
+ u16 pci_vendor_id;
+ u8 pci_bus;
+ u8 pci_device;
+ u8 pci_function;
+ u32 pci_flags;
+ u8 pci_segment;
+ u32 reserved3;
+};
+
+/*******************************************************************************
+ *
+ * SPMI - Server Platform Management Interface table
+ *
+ ******************************************************************************/
+
+struct acpi_table_spmi {
+ ACPI_TABLE_HEADER_DEF u8 reserved;
+ u8 interface_type;
+ u16 spec_revision; /* Version of IPMI */
+ u8 interrupt_type;
+ u8 gpe_number; /* GPE assigned */
+ u8 reserved2;
+ u8 pci_device_flag;
+ u32 interrupt;
+ struct acpi_generic_address ipmi_register;
+ u8 pci_segment;
+ u8 pci_bus;
+ u8 pci_device;
+ u8 pci_function;
+};
+
+/*******************************************************************************
+ *
+ * SRAT - System Resource Affinity Table
+ *
+ ******************************************************************************/
+
+struct system_resource_affinity {
+ ACPI_TABLE_HEADER_DEF u32 reserved1; /* Must be value '1' */
+ u64 reserved2; /* Reserved, must be zero */
+};
+
+/* SRAT common sub-table header */
+
+#define SRAT_SUBTABLE_HEADER \
+ u8 type; \
+ u8 length;
+
+/* Values for Type above */
+
+#define SRAT_CPU_AFFINITY 0
+#define SRAT_MEMORY_AFFINITY 1
+#define SRAT_RESERVED 2
+
+/* SRAT sub-tables */
+
+struct static_resource_alloc {
+ SRAT_SUBTABLE_HEADER u8 proximity_domain_lo;
+ u8 apic_id;
+
+ /* Flags (32 bits) */
+
+ u8 enabled:1; /* 00: Use affinity structure */
+ u8:7; /* 01-07: Reserved, must be zero */
+ u8 reserved3[3]; /* 08-31: Reserved, must be zero */
+
+ u8 local_sapic_eid;
+ u8 proximity_domain_hi[3];
+ u32 reserved4; /* Reserved, must be zero */
+};
+
+struct memory_affinity {
+ SRAT_SUBTABLE_HEADER u32 proximity_domain;
+ u16 reserved3;
+ u64 base_address;
+ u64 address_length;
+ u32 reserved4;
/* Flags (32 bits) */
- u8 wb_invd:1; /* 00: The wbinvd instruction works properly */
- u8 wb_invd_flush:1; /* 01: The wbinvd flushes but does not invalidate */
- u8 proc_c1:1; /* 02: All processors support C1 state */
- u8 plvl2_up:1; /* 03: C2 state works on MP system */
- u8 pwr_button:1; /* 04: Power button is handled as a generic feature */
- u8 sleep_button:1; /* 05: Sleep button is handled as a generic feature, or not present */
- u8 fixed_rTC:1; /* 06: RTC wakeup stat not in fixed register space */
- u8 rtcs4:1; /* 07: RTC wakeup stat not possible from S4 */
- u8 tmr_val_ext:1; /* 08: tmr_val width is 32 bits (0 = 24 bits) */
- u8:7; /* 09-15: Reserved, must be zero */
- u8 reserved5[2]; /* 16-31: Reserved, must be zero */
+ u8 enabled:1; /* 00: Use affinity structure */
+ u8 hot_pluggable:1; /* 01: Memory region is hot pluggable */
+ u8 non_volatile:1; /* 02: Memory is non-volatile */
+ u8:5; /* 03-07: Reserved, must be zero */
+ u8 reserved5[3]; /* 08-31: Reserved, must be zero */
+
+ u64 reserved6; /* Reserved, must be zero */
+};
+
+/*******************************************************************************
+ *
+ * TCPA - Trusted Computing Platform Alliance table
+ *
+ ******************************************************************************/
+
+struct acpi_table_tcpa {
+ ACPI_TABLE_HEADER_DEF u16 reserved;
+ u32 max_log_length; /* Maximum length for the event log area */
+ u64 log_address; /* Address of the event log area */
};
+/*******************************************************************************
+ *
+ * WDRT - Watchdog Resource Table
+ *
+ ******************************************************************************/
+
+struct acpi_table_wdrt {
+ ACPI_TABLE_HEADER_DEF u32 header_length; /* Watchdog Header Length */
+ u8 pci_segment; /* PCI Segment number */
+ u8 pci_bus; /* PCI Bus number */
+ u8 pci_device; /* PCI Device number */
+ u8 pci_function; /* PCI Function number */
+ u32 timer_period; /* Period of one timer count (msec) */
+ u32 max_count; /* Maximum counter value supported */
+ u32 min_count; /* Minimum counter value */
+ u8 flags;
+ u8 reserved[3];
+ u32 entries; /* Number of watchdog entries that follow */
+};
+
+#if 0 /* Flags, will be converted to macros */
+u8 enabled:1; /* 00: Timer enabled */
+u8:6; /* 01-06: Reserved */
+u8 sleep_stop:1; /* 07: Timer stopped in sleep state */
+#endif
+
+/* Macros used to generate offsets to specific table fields */
+
+#define ACPI_ASF0_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_asf_info,f)
+#define ACPI_ASF1_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_asf_alert,f)
+#define ACPI_ASF2_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_asf_remote,f)
+#define ACPI_ASF3_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_asf_rmcp,f)
+#define ACPI_ASF4_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_asf_address,f)
+#define ACPI_BOOT_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_boot,f)
+#define ACPI_CPEP_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_cpep,f)
+#define ACPI_CPEP0_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_cpep_polling,f)
+#define ACPI_DBGP_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_dbgp,f)
+#define ACPI_ECDT_OFFSET(f) (u8) ACPI_OFFSET (struct ec_boot_resources,f)
+#define ACPI_HPET_OFFSET(f) (u8) ACPI_OFFSET (struct hpet_table,f)
+#define ACPI_MADT_OFFSET(f) (u8) ACPI_OFFSET (struct multiple_apic_table,f)
+#define ACPI_MADT0_OFFSET(f) (u8) ACPI_OFFSET (struct madt_processor_apic,f)
+#define ACPI_MADT1_OFFSET(f) (u8) ACPI_OFFSET (struct madt_io_apic,f)
+#define ACPI_MADT2_OFFSET(f) (u8) ACPI_OFFSET (struct madt_interrupt_override,f)
+#define ACPI_MADT3_OFFSET(f) (u8) ACPI_OFFSET (struct madt_nmi_source,f)
+#define ACPI_MADT4_OFFSET(f) (u8) ACPI_OFFSET (struct madt_local_apic_nmi,f)
+#define ACPI_MADT5_OFFSET(f) (u8) ACPI_OFFSET (struct madt_address_override,f)
+#define ACPI_MADT6_OFFSET(f) (u8) ACPI_OFFSET (struct madt_io_sapic,f)
+#define ACPI_MADT7_OFFSET(f) (u8) ACPI_OFFSET (struct madt_local_sapic,f)
+#define ACPI_MADT8_OFFSET(f) (u8) ACPI_OFFSET (struct madt_interrupt_source,f)
+#define ACPI_MADTH_OFFSET(f) (u8) ACPI_OFFSET (struct apic_header,f)
+#define ACPI_MCFG_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_mcfg,f)
+#define ACPI_MCFG0_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_mcfg_allocation,f)
+#define ACPI_SBST_OFFSET(f) (u8) ACPI_OFFSET (struct smart_battery_table,f)
+#define ACPI_SLIT_OFFSET(f) (u8) ACPI_OFFSET (struct system_locality_info,f)
+#define ACPI_SPCR_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_spcr,f)
+#define ACPI_SPMI_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_spmi,f)
+#define ACPI_SRAT_OFFSET(f) (u8) ACPI_OFFSET (struct system_resource_affinity,f)
+#define ACPI_SRAT0_OFFSET(f) (u8) ACPI_OFFSET (struct static_resource_alloc,f)
+#define ACPI_SRAT1_OFFSET(f) (u8) ACPI_OFFSET (struct memory_affinity,f)
+#define ACPI_TCPA_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_tcpa,f)
+#define ACPI_WDRT_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_wdrt,f)
+
+#define ACPI_HPET_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct hpet_table,f,o)
+#define ACPI_SRAT0_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct static_resource_alloc,f,o)
+#define ACPI_SRAT1_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct memory_affinity,f,o)
+#define ACPI_MADT_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct multiple_apic_table,f,o)
+#define ACPI_MADT0_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct madt_processor_apic,f,o)
+#define ACPI_MADT2_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct madt_interrupt_override,f,o)
+#define ACPI_MADT3_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct madt_nmi_source,f,o)
+#define ACPI_MADT4_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct madt_local_apic_nmi,f,o)
+#define ACPI_MADT7_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct madt_local_sapic,f,o)
+#define ACPI_MADT8_FLAG_OFFSET(f,o) ACPI_FLAG_OFFSET (struct madt_interrupt_source,f,o)
+
+/* Reset to default packing */
+
#pragma pack()
#endif /* __ACTBL1_H__ */
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index dfc7ac1094b..67efe6cad27 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -44,234 +44,6 @@
#ifndef __ACTBL2_H__
#define __ACTBL2_H__
-/*
- * Prefered Power Management Profiles
- */
-#define PM_UNSPECIFIED 0
-#define PM_DESKTOP 1
-#define PM_MOBILE 2
-#define PM_WORKSTATION 3
-#define PM_ENTERPRISE_SERVER 4
-#define PM_SOHO_SERVER 5
-#define PM_APPLIANCE_PC 6
-
-/*
- * ACPI Boot Arch Flags
- */
-#define BAF_LEGACY_DEVICES 0x0001
-#define BAF_8042_KEYBOARD_CONTROLLER 0x0002
-
-#define FADT2_REVISION_ID 3
-#define FADT2_MINUS_REVISION_ID 2
-
-#pragma pack(1)
-
-/*
- * ACPI 2.0 Root System Description Table (RSDT)
- */
-struct rsdt_descriptor_rev2 {
- ACPI_TABLE_HEADER_DEF /* ACPI common table header */
- u32 table_offset_entry[1]; /* Array of pointers to ACPI tables */
-};
-
-/*
- * ACPI 2.0 Extended System Description Table (XSDT)
- */
-struct xsdt_descriptor_rev2 {
- ACPI_TABLE_HEADER_DEF /* ACPI common table header */
- u64 table_offset_entry[1]; /* Array of pointers to ACPI tables */
-};
-
-/*
- * ACPI 2.0 Firmware ACPI Control Structure (FACS)
- */
-struct facs_descriptor_rev2 {
- char signature[4]; /* ASCII table signature */
- u32 length; /* Length of structure, in bytes */
- u32 hardware_signature; /* Hardware configuration signature */
- u32 firmware_waking_vector; /* 32-bit physical address of the Firmware Waking Vector. */
- u32 global_lock; /* Global Lock used to synchronize access to shared hardware resources */
-
- /* Flags (32 bits) */
-
- u8 S4bios_f:1; /* 00: S4BIOS support is present */
- u8:7; /* 01-07: Reserved, must be zero */
- u8 reserved1[3]; /* 08-31: Reserved, must be zero */
-
- u64 xfirmware_waking_vector; /* 64-bit physical address of the Firmware Waking Vector. */
- u8 version; /* Version of this table */
- u8 reserved3[31]; /* Reserved, must be zero */
-};
-
-/*
- * ACPI 2.0+ Generic Address Structure (GAS)
- */
-struct acpi_generic_address {
- u8 address_space_id; /* Address space where struct or register exists. */
- u8 register_bit_width; /* Size in bits of given register */
- u8 register_bit_offset; /* Bit offset within the register */
- u8 access_width; /* Minimum Access size (ACPI 3.0) */
- u64 address; /* 64-bit address of struct or register */
-};
-
-#define FADT_REV2_COMMON \
- u32 V1_firmware_ctrl; /* 32-bit physical address of FACS */ \
- u32 V1_dsdt; /* 32-bit physical address of DSDT */ \
- u8 reserved1; /* System Interrupt Model isn't used in ACPI 2.0*/ \
- u8 prefer_PM_profile; /* Conveys preferred power management profile to OSPM. */ \
- u16 sci_int; /* System vector of SCI interrupt */ \
- u32 smi_cmd; /* Port address of SMI command port */ \
- u8 acpi_enable; /* Value to write to smi_cmd to enable ACPI */ \
- u8 acpi_disable; /* Value to write to smi_cmd to disable ACPI */ \
- u8 S4bios_req; /* Value to write to SMI CMD to enter S4BIOS state */ \
- u8 pstate_cnt; /* Processor performance state control*/ \
- u32 V1_pm1a_evt_blk; /* Port address of Power Mgt 1a acpi_event Reg Blk */ \
- u32 V1_pm1b_evt_blk; /* Port address of Power Mgt 1b acpi_event Reg Blk */ \
- u32 V1_pm1a_cnt_blk; /* Port address of Power Mgt 1a Control Reg Blk */ \
- u32 V1_pm1b_cnt_blk; /* Port address of Power Mgt 1b Control Reg Blk */ \
- u32 V1_pm2_cnt_blk; /* Port address of Power Mgt 2 Control Reg Blk */ \
- u32 V1_pm_tmr_blk; /* Port address of Power Mgt Timer Ctrl Reg Blk */ \
- u32 V1_gpe0_blk; /* Port addr of General Purpose acpi_event 0 Reg Blk */ \
- u32 V1_gpe1_blk; /* Port addr of General Purpose acpi_event 1 Reg Blk */ \
- u8 pm1_evt_len; /* Byte length of ports at pm1_x_evt_blk */ \
- u8 pm1_cnt_len; /* Byte length of ports at pm1_x_cnt_blk */ \
- u8 pm2_cnt_len; /* Byte Length of ports at pm2_cnt_blk */ \
- u8 pm_tm_len; /* Byte Length of ports at pm_tm_blk */ \
- u8 gpe0_blk_len; /* Byte Length of ports at gpe0_blk */ \
- u8 gpe1_blk_len; /* Byte Length of ports at gpe1_blk */ \
- u8 gpe1_base; /* Offset in gpe model where gpe1 events start */ \
- u8 cst_cnt; /* Support for the _CST object and C States change notification.*/ \
- u16 plvl2_lat; /* Worst case HW latency to enter/exit C2 state */ \
- u16 plvl3_lat; /* Worst case HW latency to enter/exit C3 state */ \
- u16 flush_size; /* Number of flush strides that need to be read */ \
- u16 flush_stride; /* Processor's memory cache line width, in bytes */ \
- u8 duty_offset; /* Processor's duty cycle index in processor's P_CNT reg*/ \
- u8 duty_width; /* Processor's duty cycle value bit width in P_CNT register.*/ \
- u8 day_alrm; /* Index to day-of-month alarm in RTC CMOS RAM */ \
- u8 mon_alrm; /* Index to month-of-year alarm in RTC CMOS RAM */ \
- u8 century; /* Index to century in RTC CMOS RAM */ \
- u16 iapc_boot_arch; /* IA-PC Boot Architecture Flags. See Table 5-10 for description*/
-
-/*
- * ACPI 2.0+ Fixed ACPI Description Table (FADT)
- */
-struct fadt_descriptor_rev2 {
- ACPI_TABLE_HEADER_DEF /* ACPI common table header */
- FADT_REV2_COMMON u8 reserved2; /* Reserved, must be zero */
-
- /* Flags (32 bits) */
-
- u8 wb_invd:1; /* 00: The wbinvd instruction works properly */
- u8 wb_invd_flush:1; /* 01: The wbinvd flushes but does not invalidate */
- u8 proc_c1:1; /* 02: All processors support C1 state */
- u8 plvl2_up:1; /* 03: C2 state works on MP system */
- u8 pwr_button:1; /* 04: Power button is handled as a generic feature */
- u8 sleep_button:1; /* 05: Sleep button is handled as a generic feature, or not present */
- u8 fixed_rTC:1; /* 06: RTC wakeup stat not in fixed register space */
- u8 rtcs4:1; /* 07: RTC wakeup stat not possible from S4 */
- u8 tmr_val_ext:1; /* 08: tmr_val is 32 bits 0=24-bits */
- u8 dock_cap:1; /* 09: Docking supported */
- u8 reset_reg_sup:1; /* 10: System reset via the FADT RESET_REG supported */
- u8 sealed_case:1; /* 11: No internal expansion capabilities and case is sealed */
- u8 headless:1; /* 12: No local video capabilities or local input devices */
- u8 cpu_sw_sleep:1; /* 13: Must execute native instruction after writing SLP_TYPx register */
-
- u8 pci_exp_wak:1; /* 14: System supports PCIEXP_WAKE (STS/EN) bits (ACPI 3.0) */
- u8 use_platform_clock:1; /* 15: OSPM should use platform-provided timer (ACPI 3.0) */
- u8 S4rtc_sts_valid:1; /* 16: Contents of RTC_STS valid after S4 wake (ACPI 3.0) */
- u8 remote_power_on_capable:1; /* 17: System is compatible with remote power on (ACPI 3.0) */
- u8 force_apic_cluster_model:1; /* 18: All local APICs must use cluster model (ACPI 3.0) */
- u8 force_apic_physical_destination_mode:1; /* 19: all local x_aPICs must use physical dest mode (ACPI 3.0) */
- u8:4; /* 20-23: Reserved, must be zero */
- u8 reserved3; /* 24-31: Reserved, must be zero */
-
- struct acpi_generic_address reset_register; /* Reset register address in GAS format */
- u8 reset_value; /* Value to write to the reset_register port to reset the system */
- u8 reserved4[3]; /* These three bytes must be zero */
- u64 xfirmware_ctrl; /* 64-bit physical address of FACS */
- u64 Xdsdt; /* 64-bit physical address of DSDT */
- struct acpi_generic_address xpm1a_evt_blk; /* Extended Power Mgt 1a acpi_event Reg Blk address */
- struct acpi_generic_address xpm1b_evt_blk; /* Extended Power Mgt 1b acpi_event Reg Blk address */
- struct acpi_generic_address xpm1a_cnt_blk; /* Extended Power Mgt 1a Control Reg Blk address */
- struct acpi_generic_address xpm1b_cnt_blk; /* Extended Power Mgt 1b Control Reg Blk address */
- struct acpi_generic_address xpm2_cnt_blk; /* Extended Power Mgt 2 Control Reg Blk address */
- struct acpi_generic_address xpm_tmr_blk; /* Extended Power Mgt Timer Ctrl Reg Blk address */
- struct acpi_generic_address xgpe0_blk; /* Extended General Purpose acpi_event 0 Reg Blk address */
- struct acpi_generic_address xgpe1_blk; /* Extended General Purpose acpi_event 1 Reg Blk address */
-};
-
-/* "Down-revved" ACPI 2.0 FADT descriptor */
-
-struct fadt_descriptor_rev2_minus {
- ACPI_TABLE_HEADER_DEF /* ACPI common table header */
- FADT_REV2_COMMON u8 reserved2; /* Reserved, must be zero */
- u32 flags;
- struct acpi_generic_address reset_register; /* Reset register address in GAS format */
- u8 reset_value; /* Value to write to the reset_register port to reset the system. */
- u8 reserved7[3]; /* Reserved, must be zero */
-};
-
-/* ECDT - Embedded Controller Boot Resources Table */
-
-struct ec_boot_resources {
- ACPI_TABLE_HEADER_DEF struct acpi_generic_address ec_control; /* Address of EC command/status register */
- struct acpi_generic_address ec_data; /* Address of EC data register */
- u32 uid; /* Unique ID - must be same as the EC _UID method */
- u8 gpe_bit; /* The GPE for the EC */
- u8 ec_id[1]; /* Full namepath of the EC in the ACPI namespace */
-};
-
-/* SRAT - System Resource Affinity Table */
-
-struct static_resource_alloc {
- u8 type;
- u8 length;
- u8 proximity_domain_lo;
- u8 apic_id;
-
- /* Flags (32 bits) */
-
- u8 enabled:1; /* 00: Use affinity structure */
- u8:7; /* 01-07: Reserved, must be zero */
- u8 reserved3[3]; /* 08-31: Reserved, must be zero */
-
- u8 local_sapic_eid;
- u8 proximity_domain_hi[3];
- u32 reserved4; /* Reserved, must be zero */
-};
-
-struct memory_affinity {
- u8 type;
- u8 length;
- u32 proximity_domain;
- u16 reserved3;
- u64 base_address;
- u64 address_length;
- u32 reserved4;
-
- /* Flags (32 bits) */
-
- u8 enabled:1; /* 00: Use affinity structure */
- u8 hot_pluggable:1; /* 01: Memory region is hot pluggable */
- u8 non_volatile:1; /* 02: Memory is non-volatile */
- u8:5; /* 03-07: Reserved, must be zero */
- u8 reserved5[3]; /* 08-31: Reserved, must be zero */
-
- u64 reserved6; /* Reserved, must be zero */
-};
-
-struct system_resource_affinity {
- ACPI_TABLE_HEADER_DEF u32 reserved1; /* Must be value '1' */
- u64 reserved2; /* Reserved, must be zero */
-};
-
-/* SLIT - System Locality Distance Information Table */
-
-struct system_locality_info {
- ACPI_TABLE_HEADER_DEF u64 locality_count;
- u8 entry[1][1];
-};
-
-#pragma pack()
+/* Code moved to both actbl.h and actbl1.h */
#endif /* __ACTBL2_H__ */
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 7ca89cde706..77cf1236b05 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -44,6 +44,8 @@
#ifndef __ACTYPES_H__
#define __ACTYPES_H__
+/* acpisrc:struct_defs -- for acpisrc conversion */
+
/*
* ACPI_MACHINE_WIDTH must be specified in an OS- or compiler-dependent header
* and must be either 16, 32, or 64
@@ -154,7 +156,6 @@ typedef u64 acpi_physical_address;
#define ACPI_MAX_PTR ACPI_UINT64_MAX
#define ACPI_SIZE_MAX ACPI_UINT64_MAX
-#define ALIGNED_ADDRESS_BOUNDARY 0x00000008
#define ACPI_USE_NATIVE_DIVIDE /* Has native 64-bit integer support */
/*
@@ -195,8 +196,6 @@ typedef u64 acpi_physical_address;
#define ACPI_MAX_PTR ACPI_UINT32_MAX
#define ACPI_SIZE_MAX ACPI_UINT32_MAX
-#define ALIGNED_ADDRESS_BOUNDARY 0x00000004
-
/*******************************************************************************
*
* Types specific to 16-bit targets
@@ -223,7 +222,6 @@ typedef char *acpi_physical_address;
#define ACPI_MAX_PTR ACPI_UINT16_MAX
#define ACPI_SIZE_MAX ACPI_UINT16_MAX
-#define ALIGNED_ADDRESS_BOUNDARY 0x00000002
#define ACPI_USE_NATIVE_DIVIDE /* No 64-bit integers, ok to use native divide */
/* 64-bit integers cannot be supported */
@@ -254,7 +252,7 @@ typedef acpi_native_uint acpi_size;
/* Use C99 uintptr_t for pointer casting if available, "void *" otherwise */
#ifndef acpi_uintptr_t
-#define acpi_uintptr_t void *
+#define acpi_uintptr_t void *
#endif
/*
@@ -263,7 +261,7 @@ typedef acpi_native_uint acpi_size;
* manager implementation is to be used (ACPI_USE_LOCAL_CACHE)
*/
#ifndef acpi_cache_t
-#define acpi_cache_t struct acpi_memory_list
+#define acpi_cache_t struct acpi_memory_list
#endif
/*
@@ -271,7 +269,7 @@ typedef acpi_native_uint acpi_size;
* lock and unlock OSL interfaces.
*/
#ifndef acpi_cpu_flags
-#define acpi_cpu_flags acpi_native_uint
+#define acpi_cpu_flags acpi_native_uint
#endif
/*
@@ -292,6 +290,21 @@ typedef acpi_native_uint acpi_size;
#define ACPI_UNUSED_VAR
#endif
+/*
+ * All ACPICA functions that are available to the rest of the kernel are
+ * tagged with this macro which can be defined as appropriate for the host.
+ */
+#ifndef ACPI_EXPORT_SYMBOL
+#define ACPI_EXPORT_SYMBOL(symbol)
+#endif
+
+/*
+ * thread_id is returned by acpi_os_get_thread_id.
+ */
+#ifndef acpi_thread_id
+#define acpi_thread_id acpi_native_uint
+#endif
+
/*******************************************************************************
*
* Independent types
@@ -477,15 +490,15 @@ typedef u64 acpi_integer;
*/
typedef u32 acpi_table_type;
-#define ACPI_TABLE_RSDP (acpi_table_type) 0
-#define ACPI_TABLE_DSDT (acpi_table_type) 1
-#define ACPI_TABLE_FADT (acpi_table_type) 2
-#define ACPI_TABLE_FACS (acpi_table_type) 3
-#define ACPI_TABLE_PSDT (acpi_table_type) 4
-#define ACPI_TABLE_SSDT (acpi_table_type) 5
-#define ACPI_TABLE_XSDT (acpi_table_type) 6
-#define ACPI_TABLE_MAX 6
-#define NUM_ACPI_TABLE_TYPES (ACPI_TABLE_MAX+1)
+#define ACPI_TABLE_ID_RSDP (acpi_table_type) 0
+#define ACPI_TABLE_ID_DSDT (acpi_table_type) 1
+#define ACPI_TABLE_ID_FADT (acpi_table_type) 2
+#define ACPI_TABLE_ID_FACS (acpi_table_type) 3
+#define ACPI_TABLE_ID_PSDT (acpi_table_type) 4
+#define ACPI_TABLE_ID_SSDT (acpi_table_type) 5
+#define ACPI_TABLE_ID_XSDT (acpi_table_type) 6
+#define ACPI_TABLE_ID_MAX 6
+#define ACPI_NUM_TABLE_TYPES (ACPI_TABLE_ID_MAX+1)
/*
* Types associated with ACPI names and objects. The first group of
@@ -816,7 +829,7 @@ struct acpi_system_info {
u32 debug_level;
u32 debug_layer;
u32 num_table_types;
- struct acpi_table_info table_info[NUM_ACPI_TABLE_TYPES];
+ struct acpi_table_info table_info[ACPI_TABLE_ID_MAX + 1];
};
/*
@@ -858,7 +871,7 @@ acpi_status(*acpi_adr_space_handler) (u32 function,
void *handler_context,
void *region_context);
-#define ACPI_DEFAULT_HANDLER NULL
+#define ACPI_DEFAULT_HANDLER NULL
typedef
acpi_status(*acpi_adr_space_setup) (acpi_handle region_handle,
@@ -911,12 +924,13 @@ struct acpi_compatible_id_list {
#define ACPI_STA_DEVICE_PRESENT 0x01
#define ACPI_STA_DEVICE_ENABLED 0x02
#define ACPI_STA_DEVICE_UI 0x04
-#define ACPI_STA_DEVICE_OK 0x08
+#define ACPI_STA_DEVICE_FUNCTIONING 0x08
+#define ACPI_STA_DEVICE_OK 0x08 /* Synonym */
#define ACPI_STA_BATTERY_PRESENT 0x10
#define ACPI_COMMON_OBJ_INFO \
- acpi_object_type type; /* ACPI object type */ \
- acpi_name name /* ACPI object Name */
+ acpi_object_type type; /* ACPI object type */ \
+ acpi_name name /* ACPI object Name */
struct acpi_obj_info_header {
ACPI_COMMON_OBJ_INFO;
@@ -957,7 +971,7 @@ struct acpi_mem_space_context {
* Definitions for Resource Attributes
*/
typedef u16 acpi_rs_length; /* Resource Length field is fixed at 16 bits */
-typedef u32 acpi_rsdesc_size; /* Max Resource Descriptor size is (length+3) = (64_k-1)+3 */
+typedef u32 acpi_rsdesc_size; /* Max Resource Descriptor size is (Length+3) = (64_k-1)+3 */
/*
* Memory Attributes
@@ -972,8 +986,8 @@ typedef u32 acpi_rsdesc_size; /* Max Resource Descriptor size is (length+3) = (6
/*
* IO Attributes
- * The ISA Io ranges are: n000-n0_ffh, n400-n4_ffh, n800-n8_ffh, n_c00-n_cFFh.
- * The non-ISA Io ranges are: n100-n3_ffh, n500-n7_ffh, n900-n_bFfh, n_cd0-n_fFFh.
+ * The ISA IO ranges are: n000-n0_fFh, n400-n4_fFh, n800-n8_fFh, n_c00-n_cFFh.
+ * The non-ISA IO ranges are: n100-n3_fFh, n500-n7_fFh, n900-n_bFFh, n_cd0-n_fFFh.
*/
#define ACPI_NON_ISA_ONLY_RANGES (u8) 0x01
#define ACPI_ISA_ONLY_RANGES (u8) 0x02
@@ -1171,12 +1185,12 @@ struct acpi_resource_source {
/* Fields common to all address descriptors, 16/32/64 bit */
#define ACPI_RESOURCE_ADDRESS_COMMON \
- u8 resource_type; \
- u8 producer_consumer; \
- u8 decode; \
- u8 min_address_fixed; \
- u8 max_address_fixed; \
- union acpi_resource_attribute info;
+ u8 resource_type; \
+ u8 producer_consumer; \
+ u8 decode; \
+ u8 min_address_fixed; \
+ u8 max_address_fixed; \
+ union acpi_resource_attribute info;
struct acpi_resource_address {
ACPI_RESOURCE_ADDRESS_COMMON};
@@ -1297,16 +1311,6 @@ struct acpi_resource {
#define ACPI_NEXT_RESOURCE(res) (struct acpi_resource *)((u8 *) res + res->length)
-#ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED
-#define ACPI_ALIGN_RESOURCE_SIZE(length) (length)
-#else
-#define ACPI_ALIGN_RESOURCE_SIZE(length) ACPI_ROUND_UP_TO_NATIVE_WORD(length)
-#endif
-
-/*
- * END: of definitions for Resource Attributes
- */
-
struct acpi_pci_routing_table {
u32 length;
u32 pin;
@@ -1315,8 +1319,4 @@ struct acpi_pci_routing_table {
char source[4]; /* pad to 64 bits so sizeof() works in all cases */
};
-/*
- * END: of definitions for PCI Routing tables
- */
-
#endif /* __ACTYPES_H__ */
diff --git a/include/acpi/acutils.h b/include/acpi/acutils.h
index 0927765df6a..ba039ea1a05 100644
--- a/include/acpi/acutils.h
+++ b/include/acpi/acutils.h
@@ -50,24 +50,24 @@ extern const u8 acpi_gbl_resource_aml_sizes[];
#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUGGER)
-extern const char *acpi_gbl_BMdecode[2];
-extern const char *acpi_gbl_config_decode[4];
-extern const char *acpi_gbl_consume_decode[2];
-extern const char *acpi_gbl_DECdecode[2];
-extern const char *acpi_gbl_HEdecode[2];
-extern const char *acpi_gbl_io_decode[2];
-extern const char *acpi_gbl_LLdecode[2];
-extern const char *acpi_gbl_max_decode[2];
-extern const char *acpi_gbl_MEMdecode[4];
-extern const char *acpi_gbl_min_decode[2];
-extern const char *acpi_gbl_MTPdecode[4];
-extern const char *acpi_gbl_RNGdecode[4];
-extern const char *acpi_gbl_RWdecode[2];
-extern const char *acpi_gbl_SHRdecode[2];
-extern const char *acpi_gbl_SIZdecode[4];
-extern const char *acpi_gbl_TRSdecode[2];
-extern const char *acpi_gbl_TTPdecode[2];
-extern const char *acpi_gbl_TYPdecode[4];
+extern const char *acpi_gbl_bm_decode[];
+extern const char *acpi_gbl_config_decode[];
+extern const char *acpi_gbl_consume_decode[];
+extern const char *acpi_gbl_dec_decode[];
+extern const char *acpi_gbl_he_decode[];
+extern const char *acpi_gbl_io_decode[];
+extern const char *acpi_gbl_ll_decode[];
+extern const char *acpi_gbl_max_decode[];
+extern const char *acpi_gbl_mem_decode[];
+extern const char *acpi_gbl_min_decode[];
+extern const char *acpi_gbl_mtp_decode[];
+extern const char *acpi_gbl_rng_decode[];
+extern const char *acpi_gbl_rw_decode[];
+extern const char *acpi_gbl_shr_decode[];
+extern const char *acpi_gbl_siz_decode[];
+extern const char *acpi_gbl_trs_decode[];
+extern const char *acpi_gbl_ttp_decode[];
+extern const char *acpi_gbl_typ_decode[];
#endif
/* Types for Resource descriptor entries */
@@ -78,6 +78,12 @@ extern const char *acpi_gbl_TYPdecode[4];
#define ACPI_SMALL_VARIABLE_LENGTH 3
typedef
+acpi_status(*acpi_walk_aml_callback) (u8 * aml,
+ u32 length,
+ u32 offset,
+ u8 resource_index, void **context);
+
+typedef
acpi_status(*acpi_pkg_callback) (u8 object_type,
union acpi_operand_object * source_object,
union acpi_generic_state * state,
@@ -277,6 +283,8 @@ acpi_ut_ptr_exit(u32 line_number,
void acpi_ut_dump_buffer(u8 * buffer, u32 count, u32 display, u32 component_id);
+void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display);
+
void acpi_ut_report_error(char *module_name, u32 line_number);
void acpi_ut_report_info(char *module_name, u32 line_number);
@@ -445,6 +453,8 @@ acpi_ut_short_divide(acpi_integer in_dividend,
/*
* utmisc
*/
+u8 acpi_ut_is_aml_table(struct acpi_table_header *table);
+
acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id);
void acpi_ut_release_owner_id(acpi_owner_id * owner_id);
@@ -460,7 +470,9 @@ void acpi_ut_print_string(char *string, u8 max_length);
u8 acpi_ut_valid_acpi_name(u32 name);
-u8 acpi_ut_valid_acpi_character(char character);
+acpi_name acpi_ut_repair_name(acpi_name name);
+
+u8 acpi_ut_valid_acpi_char(char character, acpi_native_uint position);
acpi_status
acpi_ut_strtoul64(char *string, u32 base, acpi_integer * ret_integer);
@@ -469,6 +481,25 @@ acpi_ut_strtoul64(char *string, u32 base, acpi_integer * ret_integer);
#define ACPI_ANY_BASE 0
+u32 acpi_ut_dword_byte_swap(u32 value);
+
+void acpi_ut_set_integer_width(u8 revision);
+
+#ifdef ACPI_DEBUG_OUTPUT
+void
+acpi_ut_display_init_pathname(u8 type,
+ struct acpi_namespace_node *obj_handle,
+ char *path);
+#endif
+
+/*
+ * utresrc
+ */
+acpi_status
+acpi_ut_walk_aml_resources(u8 * aml,
+ acpi_size aml_length,
+ acpi_walk_aml_callback user_function, void **context);
+
acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index);
u32 acpi_ut_get_descriptor_length(void *aml);
@@ -483,20 +514,6 @@ acpi_status
acpi_ut_get_resource_end_tag(union acpi_operand_object *obj_desc,
u8 ** end_tag);
-u8 acpi_ut_generate_checksum(u8 * buffer, u32 length);
-
-u32 acpi_ut_dword_byte_swap(u32 value);
-
-void acpi_ut_set_integer_width(u8 revision);
-
-#ifdef ACPI_DEBUG_OUTPUT
-void
-acpi_ut_display_init_pathname(u8 type,
- struct acpi_namespace_node *obj_handle,
- char *path);
-
-#endif
-
/*
* utmutex - mutex support
*/
@@ -523,14 +540,15 @@ acpi_ut_initialize_buffer(struct acpi_buffer *buffer,
void *acpi_ut_allocate(acpi_size size, u32 component, char *module, u32 line);
-void *acpi_ut_callocate(acpi_size size, u32 component, char *module, u32 line);
+void *acpi_ut_allocate_zeroed(acpi_size size,
+ u32 component, char *module, u32 line);
#ifdef ACPI_DBG_TRACK_ALLOCATIONS
void *acpi_ut_allocate_and_track(acpi_size size,
u32 component, char *module, u32 line);
-void *acpi_ut_callocate_and_track(acpi_size size,
- u32 component, char *module, u32 line);
+void *acpi_ut_allocate_zeroed_and_track(acpi_size size,
+ u32 component, char *module, u32 line);
void
acpi_ut_free_and_track(void *address, u32 component, char *module, u32 line);
@@ -540,6 +558,11 @@ void acpi_ut_dump_allocation_info(void);
#endif /* ACPI_FUTURE_USAGE */
void acpi_ut_dump_allocations(u32 component, char *module);
+
+acpi_status
+acpi_ut_create_list(char *list_name,
+ u16 object_size, struct acpi_memory_list **return_cache);
+
#endif
#endif /* _ACUTILS_H */
diff --git a/include/acpi/amlcode.h b/include/acpi/amlcode.h
index 37964a59aef..cf18426a87b 100644
--- a/include/acpi/amlcode.h
+++ b/include/acpi/amlcode.h
@@ -180,8 +180,10 @@
#define AML_BANK_FIELD_OP (u16) 0x5b87
#define AML_DATA_REGION_OP (u16) 0x5b88 /* ACPI 2.0 */
-/* Bogus opcodes (they are actually two separate opcodes) */
-
+/*
+ * Combination opcodes (actually two one-byte opcodes)
+ * Used by the disassembler and i_aSL compiler
+ */
#define AML_LGREATEREQUAL_OP (u16) 0x9295
#define AML_LLESSEQUAL_OP (u16) 0x9294
#define AML_LNOTEQUAL_OP (u16) 0x9293
diff --git a/include/acpi/amlresrc.h b/include/acpi/amlresrc.h
index fb4735315ad..be03818af9d 100644
--- a/include/acpi/amlresrc.h
+++ b/include/acpi/amlresrc.h
@@ -42,39 +42,45 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+/* acpisrc:struct_defs -- for acpisrc conversion */
+
#ifndef __AMLRESRC_H
#define __AMLRESRC_H
-#define ASL_RESNAME_ADDRESS "_ADR"
-#define ASL_RESNAME_ALIGNMENT "_ALN"
-#define ASL_RESNAME_ADDRESSSPACE "_ASI"
-#define ASL_RESNAME_ACCESSSIZE "_ASZ"
-#define ASL_RESNAME_TYPESPECIFICATTRIBUTES "_ATT"
-#define ASL_RESNAME_BASEADDRESS "_BAS"
-#define ASL_RESNAME_BUSMASTER "_BM_" /* Master(1), Slave(0) */
-#define ASL_RESNAME_DECODE "_DEC"
-#define ASL_RESNAME_DMA "_DMA"
-#define ASL_RESNAME_DMATYPE "_TYP" /* Compatible(0), A(1), B(2), F(3) */
-#define ASL_RESNAME_GRANULARITY "_GRA"
-#define ASL_RESNAME_INTERRUPT "_INT"
-#define ASL_RESNAME_INTERRUPTLEVEL "_LL_" /* active_lo(1), active_hi(0) */
-#define ASL_RESNAME_INTERRUPTSHARE "_SHR" /* Shareable(1), no_share(0) */
-#define ASL_RESNAME_INTERRUPTTYPE "_HE_" /* Edge(1), Level(0) */
-#define ASL_RESNAME_LENGTH "_LEN"
-#define ASL_RESNAME_MEMATTRIBUTES "_MTP" /* Memory(0), Reserved(1), ACPI(2), NVS(3) */
-#define ASL_RESNAME_MEMTYPE "_MEM" /* non_cache(0), Cacheable(1) Cache+combine(2), Cache+prefetch(3) */
-#define ASL_RESNAME_MAXADDR "_MAX"
-#define ASL_RESNAME_MINADDR "_MIN"
-#define ASL_RESNAME_MAXTYPE "_MAF"
-#define ASL_RESNAME_MINTYPE "_MIF"
-#define ASL_RESNAME_REGISTERBITOFFSET "_RBO"
-#define ASL_RESNAME_REGISTERBITWIDTH "_RBW"
-#define ASL_RESNAME_RANGETYPE "_RNG"
-#define ASL_RESNAME_READWRITETYPE "_RW_" /* read_only(0), Writeable (1) */
-#define ASL_RESNAME_TRANSLATION "_TRA"
-#define ASL_RESNAME_TRANSTYPE "_TRS" /* Sparse(1), Dense(0) */
-#define ASL_RESNAME_TYPE "_TTP" /* Translation(1), Static (0) */
-#define ASL_RESNAME_XFERTYPE "_SIz" /* 8(0), 8_and16(1), 16(2) */
+/*
+ * Resource descriptor tags, as defined in the ACPI specification.
+ * Used to symbolically reference fields within a descriptor.
+ */
+#define ACPI_RESTAG_ADDRESS "_ADR"
+#define ACPI_RESTAG_ALIGNMENT "_ALN"
+#define ACPI_RESTAG_ADDRESSSPACE "_ASI"
+#define ACPI_RESTAG_ACCESSSIZE "_ASZ"
+#define ACPI_RESTAG_TYPESPECIFICATTRIBUTES "_ATT"
+#define ACPI_RESTAG_BASEADDRESS "_BAS"
+#define ACPI_RESTAG_BUSMASTER "_BM_" /* Master(1), Slave(0) */
+#define ACPI_RESTAG_DECODE "_DEC"
+#define ACPI_RESTAG_DMA "_DMA"
+#define ACPI_RESTAG_DMATYPE "_TYP" /* Compatible(0), A(1), B(2), F(3) */
+#define ACPI_RESTAG_GRANULARITY "_GRA"
+#define ACPI_RESTAG_INTERRUPT "_INT"
+#define ACPI_RESTAG_INTERRUPTLEVEL "_LL_" /* active_lo(1), active_hi(0) */
+#define ACPI_RESTAG_INTERRUPTSHARE "_SHR" /* Shareable(1), no_share(0) */
+#define ACPI_RESTAG_INTERRUPTTYPE "_HE_" /* Edge(1), Level(0) */
+#define ACPI_RESTAG_LENGTH "_LEN"
+#define ACPI_RESTAG_MEMATTRIBUTES "_MTP" /* Memory(0), Reserved(1), ACPI(2), NVS(3) */
+#define ACPI_RESTAG_MEMTYPE "_MEM" /* non_cache(0), Cacheable(1) Cache+combine(2), Cache+prefetch(3) */
+#define ACPI_RESTAG_MAXADDR "_MAX"
+#define ACPI_RESTAG_MINADDR "_MIN"
+#define ACPI_RESTAG_MAXTYPE "_MAF"
+#define ACPI_RESTAG_MINTYPE "_MIF"
+#define ACPI_RESTAG_REGISTERBITOFFSET "_RBO"
+#define ACPI_RESTAG_REGISTERBITWIDTH "_RBW"
+#define ACPI_RESTAG_RANGETYPE "_RNG"
+#define ACPI_RESTAG_READWRITETYPE "_RW_" /* read_only(0), Writeable (1) */
+#define ACPI_RESTAG_TRANSLATION "_TRA"
+#define ACPI_RESTAG_TRANSTYPE "_TRS" /* Sparse(1), Dense(0) */
+#define ACPI_RESTAG_TYPE "_TTP" /* Translation(1), Static (0) */
+#define ACPI_RESTAG_XFERTYPE "_SIZ" /* 8(0), 8_and16(1), 16(2) */
/* Default sizes for "small" resource descriptors */
@@ -109,7 +115,7 @@ struct asl_resource_node {
* SMALL descriptors
*/
#define AML_RESOURCE_SMALL_HEADER_COMMON \
- u8 descriptor_type;
+ u8 descriptor_type;
struct aml_resource_small_header {
AML_RESOURCE_SMALL_HEADER_COMMON};
@@ -162,8 +168,8 @@ struct aml_resource_end_tag {
* LARGE descriptors
*/
#define AML_RESOURCE_LARGE_HEADER_COMMON \
- u8 descriptor_type;\
- u16 resource_length;
+ u8 descriptor_type;\
+ u16 resource_length;
struct aml_resource_large_header {
AML_RESOURCE_LARGE_HEADER_COMMON};
@@ -194,9 +200,9 @@ struct aml_resource_fixed_memory32 {
};
#define AML_RESOURCE_ADDRESS_COMMON \
- u8 resource_type; \
- u8 flags; \
- u8 specific_flags;
+ u8 resource_type; \
+ u8 flags; \
+ u8 specific_flags;
struct aml_resource_address {
AML_RESOURCE_LARGE_HEADER_COMMON AML_RESOURCE_ADDRESS_COMMON};
@@ -266,6 +272,7 @@ struct aml_resource_generic_register {
union aml_resource {
/* Descriptor headers */
+ u8 descriptor_type;
struct aml_resource_small_header small_header;
struct aml_resource_large_header large_header;
@@ -296,9 +303,9 @@ union aml_resource {
/* Utility overlays */
struct aml_resource_address address;
- u32 u32_item;
- u16 u16_item;
- u8 U8item;
+ u32 dword_item;
+ u16 word_item;
+ u8 byte_item;
};
#endif
diff --git a/include/acpi/pdc_intel.h b/include/acpi/pdc_intel.h
index 3fa81d55cd0..c5472be6f3a 100644
--- a/include/acpi/pdc_intel.h
+++ b/include/acpi/pdc_intel.h
@@ -18,6 +18,11 @@
ACPI_PDC_C_C1_HALT | \
ACPI_PDC_P_FFH)
+#define ACPI_PDC_EST_CAPABILITY_SWSMP (ACPI_PDC_SMP_C1PT | \
+ ACPI_PDC_C_C1_HALT | \
+ ACPI_PDC_SMP_P_SWCOORD | \
+ ACPI_PDC_P_FFH)
+
#define ACPI_PDC_C_CAPABILITY_SMP (ACPI_PDC_SMP_C2C3 | \
ACPI_PDC_SMP_C1PT | \
ACPI_PDC_C_C1_HALT)
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index 223ec646710..453a469fd39 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -49,33 +49,41 @@
*/
#ifdef ACPI_LIBRARY
+/*
+ * Note: The non-debug version of the acpi_library does not contain any
+ * debug support, for minimimal size. The debug version uses ACPI_FULL_DEBUG
+ */
#define ACPI_USE_LOCAL_CACHE
#endif
-#ifdef ACPI_DUMP_APP
-#ifndef MSDOS
+#ifdef ACPI_ASL_COMPILER
#define ACPI_DEBUG_OUTPUT
-#endif
#define ACPI_APPLICATION
#define ACPI_DISASSEMBLER
-#define ACPI_NO_METHOD_EXECUTION
+#define ACPI_CONSTANT_EVAL_ONLY
+#define ACPI_LARGE_NAMESPACE_NODE
+#define ACPI_DATA_TABLE_DISASSEMBLY
#endif
#ifdef ACPI_EXEC_APP
#undef DEBUGGER_THREADING
#define DEBUGGER_THREADING DEBUGGER_SINGLE_THREADED
-#define ACPI_DEBUG_OUTPUT
+#define ACPI_FULL_DEBUG
#define ACPI_APPLICATION
#define ACPI_DEBUGGER
-#define ACPI_DISASSEMBLER
#define ACPI_MUTEX_DEBUG
+#define ACPI_DBG_TRACK_ALLOCATIONS
#endif
-#ifdef ACPI_ASL_COMPILER
+#ifdef ACPI_DASM_APP
+#ifndef MSDOS
#define ACPI_DEBUG_OUTPUT
+#endif
#define ACPI_APPLICATION
#define ACPI_DISASSEMBLER
-#define ACPI_CONSTANT_EVAL_ONLY
+#define ACPI_NO_METHOD_EXECUTION
+#define ACPI_LARGE_NAMESPACE_NODE
+#define ACPI_DATA_TABLE_DISASSEMBLY
#endif
#ifdef ACPI_APPLICATION
@@ -83,6 +91,12 @@
#define ACPI_USE_LOCAL_CACHE
#endif
+#ifdef ACPI_FULL_DEBUG
+#define ACPI_DEBUGGER
+#define ACPI_DEBUG_OUTPUT
+#define ACPI_DISASSEMBLER
+#endif
+
/*
* Environment configuration. The purpose of this file is to interface to the
* local generation environment.
@@ -137,7 +151,7 @@
#elif defined(MSDOS) /* Must appear after WIN32 and WIN64 check */
#include "acdos16.h"
-#elif defined(__FreeBSD__)
+#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
#include "acfreebsd.h"
#elif defined(__NetBSD__)
@@ -163,17 +177,6 @@
#endif
-/*
- * Memory allocation tracking. Used only if
- * 1) This is the debug version
- * 2) This is NOT a 16-bit version of the code (not enough real-mode memory)
- */
-#ifdef ACPI_DEBUG_OUTPUT
-#if ACPI_MACHINE_WIDTH != 16
-#define ACPI_DBG_TRACK_ALLOCATIONS
-#endif
-#endif
-
/*! [End] no source code translation !*/
/*
@@ -271,8 +274,8 @@ typedef char *va_list;
/*
* Storage alignment properties
*/
-#define _AUPBND (sizeof (acpi_native_uint) - 1)
-#define _ADNBND (sizeof (acpi_native_uint) - 1)
+#define _AUPBND (sizeof (acpi_native_int) - 1)
+#define _ADNBND (sizeof (acpi_native_int) - 1)
/*
* Variable argument list macro definitions
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 3c6a6205853..277d35bced0 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -51,27 +51,22 @@
#include <linux/string.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/ctype.h>
#include <asm/system.h>
#include <asm/atomic.h>
#include <asm/div64.h>
#include <asm/acpi.h>
+#include <linux/slab.h>
-#define strtoul simple_strtoul
-
-#define ACPI_MACHINE_WIDTH BITS_PER_LONG
+/* Host-dependent types and defines */
-/* Type(s) for the OSL */
-
-#ifdef ACPI_USE_LOCAL_CACHE
-#define acpi_cache_t struct acpi_memory_list
-#else
-#include <linux/slab.h>
-#define acpi_cache_t kmem_cache_t
-#endif
+#define ACPI_MACHINE_WIDTH BITS_PER_LONG
+#define acpi_cache_t kmem_cache_t
+#define ACPI_EXPORT_SYMBOL(symbol) EXPORT_SYMBOL(symbol);
+#define strtoul simple_strtoul
/* Full namespace pathname length limit - arbitrary */
-
#define ACPI_PATHNAME_MAX 256
#else /* !__KERNEL__ */
@@ -103,4 +98,8 @@
#define acpi_cpu_flags unsigned long
+#define acpi_thread_id u32
+
+static inline acpi_thread_id acpi_os_get_thread_id(void) { return 0; }
+
#endif /* __ACLINUX_H__ */
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index ef7d83a4147..77371b3cdc4 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -2,6 +2,7 @@
#define __ACPI_PROCESSOR_H
#include <linux/kernel.h>
+#include <linux/cpu.h>
#include <asm/acpi.h>
@@ -17,6 +18,17 @@
#define ACPI_PDC_REVISION_ID 0x1
+#define ACPI_PSD_REV0_REVISION 0 /* Support for _PSD as in ACPI 3.0 */
+#define ACPI_PSD_REV0_ENTRIES 5
+
+/*
+ * Types of coordination defined in ACPI 3.0. Same macros can be used across
+ * P, C and T states
+ */
+#define DOMAIN_COORD_TYPE_SW_ALL 0xfc
+#define DOMAIN_COORD_TYPE_SW_ANY 0xfd
+#define DOMAIN_COORD_TYPE_HW_ALL 0xfe
+
/* Power Management */
struct acpi_processor_cx;
@@ -65,6 +77,14 @@ struct acpi_processor_power {
/* Performance Management */
+struct acpi_psd_package {
+ acpi_integer num_entries;
+ acpi_integer revision;
+ acpi_integer domain;
+ acpi_integer coord_type;
+ acpi_integer num_processors;
+} __attribute__ ((packed));
+
struct acpi_pct_register {
u8 descriptor;
u16 length;
@@ -91,7 +111,9 @@ struct acpi_processor_performance {
struct acpi_pct_register status_register;
unsigned int state_count;
struct acpi_processor_px *states;
-
+ struct acpi_psd_package domain_info;
+ cpumask_t shared_cpu_map;
+ unsigned int shared_type;
};
/* Throttling Control */
@@ -160,6 +182,9 @@ struct acpi_processor_errata {
} piix4;
};
+extern int acpi_processor_preregister_performance(
+ struct acpi_processor_performance **performance);
+
extern int acpi_processor_register_performance(struct acpi_processor_performance
*performance, unsigned int cpu);
extern void acpi_processor_unregister_performance(struct
diff --git a/include/asm-alpha/irq.h b/include/asm-alpha/irq.h
index f6de033718a..917b9fe372c 100644
--- a/include/asm-alpha/irq.h
+++ b/include/asm-alpha/irq.h
@@ -92,8 +92,4 @@ extern void enable_irq(unsigned int);
struct pt_regs;
extern void (*perf_irq)(unsigned long, struct pt_regs *);
-struct irqaction;
-int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
-
-
#endif /* _ALPHA_IRQ_H */
diff --git a/include/asm-arm/irq.h b/include/asm-arm/irq.h
index 60b5105c9c9..66e67e60bc5 100644
--- a/include/asm-arm/irq.h
+++ b/include/asm-arm/irq.h
@@ -47,10 +47,6 @@ void disable_irq_wake(unsigned int irq);
void enable_irq_wake(unsigned int irq);
int setup_irq(unsigned int, struct irqaction *);
-struct irqaction;
-struct pt_regs;
-int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
-
extern void migrate_irqs(void);
#endif
diff --git a/include/asm-arm26/irq.h b/include/asm-arm26/irq.h
index 06bd5a543d1..9aaac87efba 100644
--- a/include/asm-arm26/irq.h
+++ b/include/asm-arm26/irq.h
@@ -44,9 +44,5 @@ extern void enable_irq(unsigned int);
int set_irq_type(unsigned int irq, unsigned int type);
-int setup_irq(unsigned int, struct irqaction *);
-struct pt_regs;
-int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
-
#endif
diff --git a/include/asm-frv/atomic.h b/include/asm-frv/atomic.h
index 9a4ff03c396..066386ac238 100644
--- a/include/asm-frv/atomic.h
+++ b/include/asm-frv/atomic.h
@@ -227,7 +227,7 @@ extern unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsig
break; \
\
default: \
- __xg_orig = 0; \
+ __xg_orig = (__typeof__(__xg_orig))0; \
asm volatile("break"); \
break; \
} \
@@ -247,7 +247,7 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
switch (sizeof(__xg_orig)) { \
case 4: __xg_orig = (__typeof__(*(ptr))) __xchg_32((uint32_t) x, __xg_ptr); break; \
default: \
- __xg_orig = 0; \
+ __xg_orig = (__typeof__(__xg_orig))0; \
asm volatile("break"); \
break; \
} \
diff --git a/include/asm-frv/checksum.h b/include/asm-frv/checksum.h
index 10236f6802d..42bf0db2287 100644
--- a/include/asm-frv/checksum.h
+++ b/include/asm-frv/checksum.h
@@ -43,7 +43,7 @@ unsigned int csum_partial_copy(const char *src, char *dst, int len, int sum);
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
-extern unsigned int csum_partial_copy_from_user(const char *src, char *dst,
+extern unsigned int csum_partial_copy_from_user(const char __user *src, char *dst,
int len, int sum, int *csum_err);
#define csum_partial_copy_nocheck(src, dst, len, sum) \
diff --git a/include/asm-frv/highmem.h b/include/asm-frv/highmem.h
index cfbf7d3a1fe..e2247c22a63 100644
--- a/include/asm-frv/highmem.h
+++ b/include/asm-frv/highmem.h
@@ -134,7 +134,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
default:
BUG();
- return 0;
+ return NULL;
}
}
diff --git a/include/asm-frv/io.h b/include/asm-frv/io.h
index b56eba59e3c..7765f552889 100644
--- a/include/asm-frv/io.h
+++ b/include/asm-frv/io.h
@@ -40,13 +40,13 @@ static inline unsigned long _swapl(unsigned long v)
//#define __iormb() asm volatile("membar")
//#define __iowmb() asm volatile("membar")
-#define __raw_readb(addr) __builtin_read8((void *) (addr))
-#define __raw_readw(addr) __builtin_read16((void *) (addr))
-#define __raw_readl(addr) __builtin_read32((void *) (addr))
+#define __raw_readb __builtin_read8
+#define __raw_readw __builtin_read16
+#define __raw_readl __builtin_read32
-#define __raw_writeb(datum, addr) __builtin_write8((void *) (addr), datum)
-#define __raw_writew(datum, addr) __builtin_write16((void *) (addr), datum)
-#define __raw_writel(datum, addr) __builtin_write32((void *) (addr), datum)
+#define __raw_writeb(datum, addr) __builtin_write8(addr, datum)
+#define __raw_writew(datum, addr) __builtin_write16(addr, datum)
+#define __raw_writel(datum, addr) __builtin_write32(addr, datum)
static inline void io_outsb(unsigned int addr, const void *buf, int len)
{
@@ -116,7 +116,7 @@ static inline void memset_io(volatile void __iomem *addr, unsigned char val, int
memset((void __force *) addr, val, count);
}
-static inline void memcpy_fromio(void *dst, volatile void __iomem *src, int count)
+static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
{
memcpy(dst, (void __force *) src, count);
}
@@ -128,12 +128,12 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int
static inline uint8_t inb(unsigned long addr)
{
- return __builtin_read8((void *)addr);
+ return __builtin_read8((void __iomem *)addr);
}
static inline uint16_t inw(unsigned long addr)
{
- uint16_t ret = __builtin_read16((void *)addr);
+ uint16_t ret = __builtin_read16((void __iomem *)addr);
if (__is_PCI_IO(addr))
ret = _swapw(ret);
@@ -143,7 +143,7 @@ static inline uint16_t inw(unsigned long addr)
static inline uint32_t inl(unsigned long addr)
{
- uint32_t ret = __builtin_read32((void *)addr);
+ uint32_t ret = __builtin_read32((void __iomem *)addr);
if (__is_PCI_IO(addr))
ret = _swapl(ret);
@@ -153,21 +153,21 @@ static inline uint32_t inl(unsigned long addr)
static inline void outb(uint8_t datum, unsigned long addr)
{
- __builtin_write8((void *)addr, datum);
+ __builtin_write8((void __iomem *)addr, datum);
}
static inline void outw(uint16_t datum, unsigned long addr)
{
if (__is_PCI_IO(addr))
datum = _swapw(datum);
- __builtin_write16((void *)addr, datum);
+ __builtin_write16((void __iomem *)addr, datum);
}
static inline void outl(uint32_t datum, unsigned long addr)
{
if (__is_PCI_IO(addr))
datum = _swapl(datum);
- __builtin_write32((void *)addr, datum);
+ __builtin_write32((void __iomem *)addr, datum);
}
#define inb_p(addr) inb(addr)
@@ -189,12 +189,12 @@ static inline void outl(uint32_t datum, unsigned long addr)
static inline uint8_t readb(const volatile void __iomem *addr)
{
- return __builtin_read8((volatile uint8_t __force *) addr);
+ return __builtin_read8((__force void volatile __iomem *) addr);
}
static inline uint16_t readw(const volatile void __iomem *addr)
{
- uint16_t ret = __builtin_read16((volatile uint16_t __force *)addr);
+ uint16_t ret = __builtin_read16((__force void volatile __iomem *)addr);
if (__is_PCI_MEM(addr))
ret = _swapw(ret);
@@ -203,7 +203,7 @@ static inline uint16_t readw(const volatile void __iomem *addr)
static inline uint32_t readl(const volatile void __iomem *addr)
{
- uint32_t ret = __builtin_read32((volatile uint32_t __force *)addr);
+ uint32_t ret = __builtin_read32((__force void volatile __iomem *)addr);
if (__is_PCI_MEM(addr))
ret = _swapl(ret);
@@ -217,7 +217,7 @@ static inline uint32_t readl(const volatile void __iomem *addr)
static inline void writeb(uint8_t datum, volatile void __iomem *addr)
{
- __builtin_write8((volatile uint8_t __force *) addr, datum);
+ __builtin_write8(addr, datum);
if (__is_PCI_MEM(addr))
__flush_PCI_writes();
}
@@ -227,7 +227,7 @@ static inline void writew(uint16_t datum, volatile void __iomem *addr)
if (__is_PCI_MEM(addr))
datum = _swapw(datum);
- __builtin_write16((volatile uint16_t __force *) addr, datum);
+ __builtin_write16(addr, datum);
if (__is_PCI_MEM(addr))
__flush_PCI_writes();
}
@@ -237,7 +237,7 @@ static inline void writel(uint32_t datum, volatile void __iomem *addr)
if (__is_PCI_MEM(addr))
datum = _swapl(datum);
- __builtin_write32((volatile uint32_t __force *) addr, datum);
+ __builtin_write32(addr, datum);
if (__is_PCI_MEM(addr))
__flush_PCI_writes();
}
@@ -271,7 +271,7 @@ static inline void __iomem *ioremap_fullcache(unsigned long physaddr, unsigned l
return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
}
-extern void iounmap(void __iomem *addr);
+extern void iounmap(void volatile __iomem *addr);
static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
diff --git a/include/asm-frv/mb-regs.h b/include/asm-frv/mb-regs.h
index 93fa732fb0c..219e5f926f1 100644
--- a/include/asm-frv/mb-regs.h
+++ b/include/asm-frv/mb-regs.h
@@ -16,6 +16,17 @@
#include <asm/sections.h>
#include <asm/mem-layout.h>
+#ifndef __ASSEMBLY__
+/* gcc builtins, annotated */
+
+unsigned long __builtin_read8(volatile void __iomem *);
+unsigned long __builtin_read16(volatile void __iomem *);
+unsigned long __builtin_read32(volatile void __iomem *);
+void __builtin_write8(volatile void __iomem *, unsigned char);
+void __builtin_write16(volatile void __iomem *, unsigned short);
+void __builtin_write32(volatile void __iomem *, unsigned long);
+#endif
+
#define __region_IO KERNEL_IO_START /* the region from 0xe0000000 to 0xffffffff has suitable
* protection laid over the top for use in memory-mapped
* I/O
@@ -59,7 +70,7 @@
#define __region_PCI_MEM (__region_CS2 + 0x08000000UL)
#define __flush_PCI_writes() \
do { \
- __builtin_write8((volatile void *) __region_PCI_MEM, 0); \
+ __builtin_write8((volatile void __iomem *) __region_PCI_MEM, 0); \
} while(0)
#define __is_PCI_IO(addr) \
@@ -83,15 +94,15 @@ extern int __nongprelbss mb93090_mb00_detected;
#define __set_LEDS(X) \
do { \
if (mb93090_mb00_detected) \
- __builtin_write32((void *) __addr_LEDS(), ~(X)); \
+ __builtin_write32((void __iomem *) __addr_LEDS(), ~(X)); \
} while (0)
#else
#define __set_LEDS(X)
#endif
#define __addr_LCD() (__region_CS2 + 0x01200008UL)
-#define __get_LCD(B) __builtin_read32((volatile void *) (B))
-#define __set_LCD(B,X) __builtin_write32((volatile void *) (B), (X))
+#define __get_LCD(B) __builtin_read32((volatile void __iomem *) (B))
+#define __set_LCD(B,X) __builtin_write32((volatile void __iomem *) (B), (X))
#define LCD_D 0x000000ff /* LCD data bus */
#define LCD_RW 0x00000100 /* LCD R/W signal */
@@ -161,11 +172,11 @@ do { \
#define __get_CLKIN() 66000000UL
#define __addr_LEDS() (__region_CS2 + 0x00000023UL)
-#define __set_LEDS(X) __builtin_write8((volatile void *) __addr_LEDS(), (X))
+#define __set_LEDS(X) __builtin_write8((volatile void __iomem *) __addr_LEDS(), (X))
#define __addr_FPGATR() (__region_CS2 + 0x00000030UL)
-#define __set_FPGATR(X) __builtin_write32((volatile void *) __addr_FPGATR(), (X))
-#define __get_FPGATR() __builtin_read32((volatile void *) __addr_FPGATR())
+#define __set_FPGATR(X) __builtin_write32((volatile void __iomem *) __addr_FPGATR(), (X))
+#define __get_FPGATR() __builtin_read32((volatile void __iomem *) __addr_FPGATR())
#define MB93093_FPGA_FPGATR_AUDIO_CLK 0x00000003
@@ -180,7 +191,7 @@ do { \
#define MB93093_FPGA_SWR_PUSHSWMASK (0x1F<<26)
#define MB93093_FPGA_SWR_PUSHSW4 (1<<29)
-#define __addr_FPGA_SWR ((volatile void *)(__region_CS2 + 0x28UL))
+#define __addr_FPGA_SWR ((volatile void __iomem *)(__region_CS2 + 0x28UL))
#define __get_FPGA_PUSHSW1_5() (__builtin_read32(__addr_FPGA_SWR) & MB93093_FPGA_SWR_PUSHSWMASK)
diff --git a/include/asm-frv/signal.h b/include/asm-frv/signal.h
index 67366894780..dcc1b359291 100644
--- a/include/asm-frv/signal.h
+++ b/include/asm-frv/signal.h
@@ -114,13 +114,13 @@ struct old_sigaction {
__sighandler_t sa_handler;
old_sigset_t sa_mask;
unsigned long sa_flags;
- void (*sa_restorer)(void);
+ __sigrestore_t sa_restorer;
};
struct sigaction {
__sighandler_t sa_handler;
unsigned long sa_flags;
- void (*sa_restorer)(void);
+ __sigrestore_t sa_restorer;
sigset_t sa_mask; /* mask last for extensibility */
};
@@ -146,7 +146,7 @@ struct sigaction {
#endif /* __KERNEL__ */
typedef struct sigaltstack {
- void *ss_sp;
+ void __user *ss_sp;
int ss_flags;
size_t ss_size;
} stack_t;
diff --git a/include/asm-frv/uaccess.h b/include/asm-frv/uaccess.h
index a1d14043886..3d90e1018ee 100644
--- a/include/asm-frv/uaccess.h
+++ b/include/asm-frv/uaccess.h
@@ -22,7 +22,7 @@
#define HAVE_ARCH_UNMAPPED_AREA /* we decide where to put mmaps */
-#define __ptr(x) ((unsigned long *)(x))
+#define __ptr(x) ((unsigned long __force *)(x))
#define VERIFY_READ 0
#define VERIFY_WRITE 1
@@ -64,7 +64,7 @@ static inline int ___range_ok(unsigned long addr, unsigned long size)
#define __range_ok(addr,size) ___range_ok((unsigned long) (addr), (unsigned long) (size))
-#define access_ok(type,addr,size) (__range_ok((addr), (size)) == 0)
+#define access_ok(type,addr,size) (__range_ok((void __user *)(addr), (size)) == 0)
#define __access_ok(addr,size) (__range_ok((addr), (size)) == 0)
/*
@@ -97,6 +97,7 @@ extern unsigned long search_exception_table(unsigned long);
int __pu_err = 0; \
\
typeof(*(ptr)) __pu_val = (x); \
+ __chk_user_ptr(ptr); \
\
switch (sizeof (*(ptr))) { \
case 1: \
@@ -120,7 +121,7 @@ extern unsigned long search_exception_table(unsigned long);
#define put_user(x, ptr) \
({ \
- typeof(&*ptr) _p = (ptr); \
+ typeof(*(ptr)) __user *_p = (ptr); \
int _e; \
\
_e = __range_ok(_p, sizeof(*_p)); \
@@ -175,33 +176,44 @@ do { \
*/
#define __get_user(x, ptr) \
({ \
- typeof(*(ptr)) __gu_val = 0; \
int __gu_err = 0; \
+ __chk_user_ptr(ptr); \
\
switch (sizeof(*(ptr))) { \
- case 1: \
- __get_user_asm(__gu_err, *(u8*)&__gu_val, ptr, "ub", "=r"); \
+ case 1: { \
+ unsigned char __gu_val; \
+ __get_user_asm(__gu_err, __gu_val, ptr, "ub", "=r"); \
+ (x) = *(__force __typeof__(*(ptr)) *) &__gu_val; \
break; \
- case 2: \
- __get_user_asm(__gu_err, *(u16*)&__gu_val, ptr, "uh", "=r"); \
+ } \
+ case 2: { \
+ unsigned short __gu_val; \
+ __get_user_asm(__gu_err, __gu_val, ptr, "uh", "=r"); \
+ (x) = *(__force __typeof__(*(ptr)) *) &__gu_val; \
break; \
- case 4: \
- __get_user_asm(__gu_err, *(u32*)&__gu_val, ptr, "", "=r"); \
+ } \
+ case 4: { \
+ unsigned int __gu_val; \
+ __get_user_asm(__gu_err, __gu_val, ptr, "", "=r"); \
+ (x) = *(__force __typeof__(*(ptr)) *) &__gu_val; \
break; \
- case 8: \
- __get_user_asm(__gu_err, *(u64*)&__gu_val, ptr, "d", "=e"); \
+ } \
+ case 8: { \
+ unsigned long long __gu_val; \
+ __get_user_asm(__gu_err, __gu_val, ptr, "d", "=e"); \
+ (x) = *(__force __typeof__(*(ptr)) *) &__gu_val; \
break; \
+ } \
default: \
__gu_err = __get_user_bad(); \
break; \
} \
- (x) = __gu_val; \
__gu_err; \
})
#define get_user(x, ptr) \
({ \
- typeof(&*ptr) _p = (ptr); \
+ const typeof(*(ptr)) __user *_p = (ptr);\
int _e; \
\
_e = __range_ok(_p, sizeof(*_p)); \
@@ -248,19 +260,20 @@ do { \
/*
*
*/
+#define ____force(x) (__force void *)(void __user *)(x)
#ifdef CONFIG_MMU
extern long __memset_user(void *dst, unsigned long count);
extern long __memcpy_user(void *dst, const void *src, unsigned long count);
-#define clear_user(dst,count) __memset_user((dst), (count))
-#define __copy_from_user_inatomic(to, from, n) __memcpy_user((to), (from), (n))
-#define __copy_to_user_inatomic(to, from, n) __memcpy_user((to), (from), (n))
+#define clear_user(dst,count) __memset_user(____force(dst), (count))
+#define __copy_from_user_inatomic(to, from, n) __memcpy_user((to), ____force(from), (n))
+#define __copy_to_user_inatomic(to, from, n) __memcpy_user(____force(to), (from), (n))
#else
-#define clear_user(dst,count) (memset((dst), 0, (count)), 0)
-#define __copy_from_user_inatomic(to, from, n) (memcpy((to), (from), (n)), 0)
-#define __copy_to_user_inatomic(to, from, n) (memcpy((to), (from), (n)), 0)
+#define clear_user(dst,count) (memset(____force(dst), 0, (count)), 0)
+#define __copy_from_user_inatomic(to, from, n) (memcpy((to), ____force(from), (n)), 0)
+#define __copy_to_user_inatomic(to, from, n) (memcpy(____force(to), (from), (n)), 0)
#endif
@@ -278,7 +291,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
return __copy_from_user_inatomic(to, from, n);
}
-static inline long copy_from_user(void *to, const void *from, unsigned long n)
+static inline long copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long ret = n;
@@ -291,16 +304,13 @@ static inline long copy_from_user(void *to, const void *from, unsigned long n)
return ret;
}
-static inline long copy_to_user(void *to, const void *from, unsigned long n)
+static inline long copy_to_user(void __user *to, const void *from, unsigned long n)
{
return likely(__access_ok(to, n)) ? __copy_to_user(to, from, n) : n;
}
-#define copy_to_user_ret(to,from,n,retval) ({ if (copy_to_user(to,from,n)) return retval; })
-#define copy_from_user_ret(to,from,n,retval) ({ if (copy_from_user(to,from,n)) return retval; })
-
-extern long strncpy_from_user(char *dst, const char *src, long count);
-extern long strnlen_user(const char *src, long count);
+extern long strncpy_from_user(char *dst, const char __user *src, long count);
+extern long strnlen_user(const char __user *src, long count);
#define strlen_user(str) strnlen_user(str, 32767)
diff --git a/include/asm-frv/unistd.h b/include/asm-frv/unistd.h
index 7c2e712c3b7..b80dbd83947 100644
--- a/include/asm-frv/unistd.h
+++ b/include/asm-frv/unistd.h
@@ -306,7 +306,7 @@
#define __NR_mknodat 297
#define __NR_fchownat 298
#define __NR_futimesat 299
-#define __NR_newfstatat 300
+#define __NR_fstatat64 300
#define __NR_unlinkat 301
#define __NR_renameat 302
#define __NR_linkat 303
@@ -460,24 +460,7 @@ type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg
* some others too.
*/
#define __NR__exit __NR_exit
-static inline _syscall0(int,pause)
-static inline _syscall0(int,sync)
-static inline _syscall0(pid_t,setsid)
-static inline _syscall3(int,write,int,fd,const char *,buf,off_t,count)
-static inline _syscall3(int,read,int,fd,char *,buf,off_t,count)
-static inline _syscall3(off_t,lseek,int,fd,off_t,offset,int,count)
-static inline _syscall1(int,dup,int,fd)
static inline _syscall3(int,execve,const char *,file,char **,argv,char **,envp)
-static inline _syscall3(int,open,const char *,file,int,flag,int,mode)
-static inline _syscall1(int,close,int,fd)
-static inline _syscall1(int,_exit,int,exitcode)
-static inline _syscall3(pid_t,waitpid,pid_t,pid,int *,wait_stat,int,options)
-static inline _syscall1(int,delete_module,const char *,name)
-
-static inline pid_t wait(int * wait_stat)
-{
- return waitpid(-1,wait_stat,0);
-}
#endif /* __KERNEL_SYSCALLS__ */
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
index 0cfb086dd37..8078cbd2c01 100644
--- a/include/asm-generic/memory_model.h
+++ b/include/asm-generic/memory_model.h
@@ -23,29 +23,23 @@
#endif /* CONFIG_DISCONTIGMEM */
-#ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE
-struct page;
-/* this is useful when inlined pfn_to_page is too big */
-extern struct page *pfn_to_page(unsigned long pfn);
-extern unsigned long page_to_pfn(struct page *page);
-#else
/*
* supports 3 memory models.
*/
#if defined(CONFIG_FLATMEM)
-#define pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET))
-#define page_to_pfn(page) ((unsigned long)((page) - mem_map) + \
+#define __pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET))
+#define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \
ARCH_PFN_OFFSET)
#elif defined(CONFIG_DISCONTIGMEM)
-#define pfn_to_page(pfn) \
+#define __pfn_to_page(pfn) \
({ unsigned long __pfn = (pfn); \
unsigned long __nid = arch_pfn_to_nid(pfn); \
NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\
})
-#define page_to_pfn(pg) \
+#define __page_to_pfn(pg) \
({ struct page *__pg = (pg); \
struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \
(unsigned long)(__pg - __pgdat->node_mem_map) + \
@@ -57,18 +51,27 @@ extern unsigned long page_to_pfn(struct page *page);
* Note: section's mem_map is encorded to reflect its start_pfn.
* section[i].section_mem_map == mem_map's address - start_pfn;
*/
-#define page_to_pfn(pg) \
+#define __page_to_pfn(pg) \
({ struct page *__pg = (pg); \
int __sec = page_to_section(__pg); \
__pg - __section_mem_map_addr(__nr_to_section(__sec)); \
})
-#define pfn_to_page(pfn) \
+#define __pfn_to_page(pfn) \
({ unsigned long __pfn = (pfn); \
struct mem_section *__sec = __pfn_to_section(__pfn); \
__section_mem_map_addr(__sec) + __pfn; \
})
#endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */
+
+#ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE
+struct page;
+/* this is useful when inlined pfn_to_page is too big */
+extern struct page *pfn_to_page(unsigned long pfn);
+extern unsigned long page_to_pfn(struct page *page);
+#else
+#define page_to_pfn __page_to_pfn
+#define pfn_to_page __pfn_to_page
#endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */
#endif /* __ASSEMBLY__ */
diff --git a/include/asm-h8300/irq.h b/include/asm-h8300/irq.h
index 73065f5bda0..42a3ac424a9 100644
--- a/include/asm-h8300/irq.h
+++ b/include/asm-h8300/irq.h
@@ -63,8 +63,4 @@ extern void enable_irq(unsigned int);
extern void disable_irq(unsigned int);
#define disable_irq_nosync(x) disable_irq(x)
-struct irqaction;
-struct pt_regs;
-int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
-
#endif /* _H8300_IRQ_H_ */
diff --git a/include/asm-i386/alternative.h b/include/asm-i386/alternative.h
index e201decea0c..d79e9ee10fd 100644
--- a/include/asm-i386/alternative.h
+++ b/include/asm-i386/alternative.h
@@ -3,6 +3,8 @@
#ifdef __KERNEL__
+#include <asm/types.h>
+
struct alt_instr {
u8 *instr; /* original instruction */
u8 *replacement;
diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h
index cc9b940fb7e..1d8362cb2c5 100644
--- a/include/asm-i386/apic.h
+++ b/include/asm-i386/apic.h
@@ -138,8 +138,6 @@ void switch_ipi_to_APIC_timer(void *cpumask);
extern int timer_over_8254;
-extern int modern_apic(void);
-
#else /* !CONFIG_X86_LOCAL_APIC */
static inline void lapic_shutdown(void) { }
diff --git a/include/asm-i386/apicdef.h b/include/asm-i386/apicdef.h
index 5e4a35af292..9f6995341fd 100644
--- a/include/asm-i386/apicdef.h
+++ b/include/asm-i386/apicdef.h
@@ -121,7 +121,6 @@
*/
#define u32 unsigned int
-#define lapic ((volatile struct local_apic *)APIC_BASE)
struct local_apic {
diff --git a/include/asm-i386/cpufeature.h b/include/asm-i386/cpufeature.h
index b44bfc6239c..3ecedbafa8c 100644
--- a/include/asm-i386/cpufeature.h
+++ b/include/asm-i386/cpufeature.h
@@ -88,6 +88,12 @@
#define X86_FEATURE_XSTORE_EN (5*32+ 3) /* on-CPU RNG enabled */
#define X86_FEATURE_XCRYPT (5*32+ 6) /* on-CPU crypto (xcrypt insn) */
#define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* on-CPU crypto enabled */
+#define X86_FEATURE_ACE2 (5*32+ 8) /* Advanced Cryptography Engine v2 */
+#define X86_FEATURE_ACE2_EN (5*32+ 9) /* ACE v2 enabled */
+#define X86_FEATURE_PHE (5*32+ 10) /* PadLock Hash Engine */
+#define X86_FEATURE_PHE_EN (5*32+ 11) /* PHE enabled */
+#define X86_FEATURE_PMM (5*32+ 12) /* PadLock Montgomery Multiplier */
+#define X86_FEATURE_PMM_EN (5*32+ 13) /* PMM enabled */
/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
#define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */
@@ -121,6 +127,12 @@
#define cpu_has_xstore_enabled boot_cpu_has(X86_FEATURE_XSTORE_EN)
#define cpu_has_xcrypt boot_cpu_has(X86_FEATURE_XCRYPT)
#define cpu_has_xcrypt_enabled boot_cpu_has(X86_FEATURE_XCRYPT_EN)
+#define cpu_has_ace2 boot_cpu_has(X86_FEATURE_ACE2)
+#define cpu_has_ace2_enabled boot_cpu_has(X86_FEATURE_ACE2_EN)
+#define cpu_has_phe boot_cpu_has(X86_FEATURE_PHE)
+#define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN)
+#define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM)
+#define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN)
#endif /* __ASM_I386_CPUFEATURE_H */
diff --git a/include/asm-i386/mce.h b/include/asm-i386/mce.h
new file mode 100644
index 00000000000..7cc1a973bf0
--- /dev/null
+++ b/include/asm-i386/mce.h
@@ -0,0 +1,5 @@
+#ifdef CONFIG_X86_MCE
+extern void mcheck_init(struct cpuinfo_x86 *c);
+#else
+#define mcheck_init(c) do {} while(0)
+#endif
diff --git a/include/asm-i386/mtrr.h b/include/asm-i386/mtrr.h
index 5a46de08efe..07f063ae26e 100644
--- a/include/asm-i386/mtrr.h
+++ b/include/asm-i386/mtrr.h
@@ -76,6 +76,8 @@ extern int mtrr_add_page (unsigned long base, unsigned long size,
extern int mtrr_del (int reg, unsigned long base, unsigned long size);
extern int mtrr_del_page (int reg, unsigned long base, unsigned long size);
extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
+extern void mtrr_ap_init(void);
+extern void mtrr_bp_init(void);
# else
static __inline__ int mtrr_add (unsigned long base, unsigned long size,
unsigned int type, char increment)
@@ -100,6 +102,8 @@ static __inline__ int mtrr_del_page (int reg, unsigned long base,
static __inline__ void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) {;}
+#define mtrr_ap_init() do {} while (0)
+#define mtrr_bp_init() do {} while (0)
# endif
#endif
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index 4df3818e412..0c83cf12eec 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -728,18 +728,4 @@ extern unsigned long boot_option_idle_override;
extern void enable_sep_cpu(void);
extern int sysenter_setup(void);
-#ifdef CONFIG_MTRR
-extern void mtrr_ap_init(void);
-extern void mtrr_bp_init(void);
-#else
-#define mtrr_ap_init() do {} while (0)
-#define mtrr_bp_init() do {} while (0)
-#endif
-
-#ifdef CONFIG_X86_MCE
-extern void mcheck_init(struct cpuinfo_x86 *c);
-#else
-#define mcheck_init(c) do {} while(0)
-#endif
-
#endif /* __ASM_I386_PROCESSOR_H */
diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h
index 1ec65523ea5..8462f8e0e65 100644
--- a/include/asm-i386/uaccess.h
+++ b/include/asm-i386/uaccess.h
@@ -58,7 +58,7 @@ extern struct movsl_mask {
__chk_user_ptr(addr); \
asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \
:"=&r" (flag), "=r" (sum) \
- :"1" (addr),"g" ((int)(size)),"g" (current_thread_info()->addr_limit.seg)); \
+ :"1" (addr),"g" ((int)(size)),"rm" (current_thread_info()->addr_limit.seg)); \
flag; })
/**
@@ -390,6 +390,8 @@ unsigned long __must_check __copy_to_user_ll(void __user *to,
const void *from, unsigned long n);
unsigned long __must_check __copy_from_user_ll(void *to,
const void __user *from, unsigned long n);
+unsigned long __must_check __copy_from_user_ll_nocache(void *to,
+ const void __user *from, unsigned long n);
/*
* Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault
@@ -478,12 +480,43 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
return __copy_from_user_ll(to, from, n);
}
+#define ARCH_HAS_NOCACHE_UACCESS
+
+static __always_inline unsigned long __copy_from_user_inatomic_nocache(void *to,
+ const void __user *from, unsigned long n)
+{
+ if (__builtin_constant_p(n)) {
+ unsigned long ret;
+
+ switch (n) {
+ case 1:
+ __get_user_size(*(u8 *)to, from, 1, ret, 1);
+ return ret;
+ case 2:
+ __get_user_size(*(u16 *)to, from, 2, ret, 2);
+ return ret;
+ case 4:
+ __get_user_size(*(u32 *)to, from, 4, ret, 4);
+ return ret;
+ }
+ }
+ return __copy_from_user_ll_nocache(to, from, n);
+}
+
static __always_inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
might_sleep();
return __copy_from_user_inatomic(to, from, n);
}
+
+static __always_inline unsigned long
+__copy_from_user_nocache(void *to, const void __user *from, unsigned long n)
+{
+ might_sleep();
+ return __copy_from_user_inatomic_nocache(to, from, n);
+}
+
unsigned long __must_check copy_to_user(void __user *to,
const void *from, unsigned long n);
unsigned long __must_check copy_from_user(void *to,
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h
index de2ccc149e3..fc1c8ddae14 100644
--- a/include/asm-i386/unistd.h
+++ b/include/asm-i386/unistd.h
@@ -322,10 +322,11 @@
#define __NR_sync_file_range 314
#define __NR_tee 315
#define __NR_vmsplice 316
+#define __NR_move_pages 317
#ifdef __KERNEL__
-#define NR_syscalls 317
+#define NR_syscalls 318
/*
* user-visible error numbers are in the range -1 - -128: see
diff --git a/include/asm-ia64/unistd.h b/include/asm-ia64/unistd.h
index 632f2eedf72..bb0eb727dcd 100644
--- a/include/asm-ia64/unistd.h
+++ b/include/asm-ia64/unistd.h
@@ -265,7 +265,7 @@
#define __NR_keyctl 1273
#define __NR_ioprio_set 1274
#define __NR_ioprio_get 1275
-/* 1276 is available for reuse (was briefly sys_set_zone_reclaim) */
+#define __NR_move_pages 1276
#define __NR_inotify_init 1277
#define __NR_inotify_add_watch 1278
#define __NR_inotify_rm_watch 1279
diff --git a/include/asm-m68k/irq.h b/include/asm-m68k/irq.h
index b4f48b2a6a5..9727ca9d9f2 100644
--- a/include/asm-m68k/irq.h
+++ b/include/asm-m68k/irq.h
@@ -130,8 +130,4 @@ extern volatile unsigned int num_spurious;
*/
extern irq_node_t *new_irq_node(void);
-struct irqaction;
-struct pt_regs;
-int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
-
#endif /* _M68K_IRQ_H_ */
diff --git a/include/asm-m68k/processor.h b/include/asm-m68k/processor.h
index 352799e71f0..8455f778b60 100644
--- a/include/asm-m68k/processor.h
+++ b/include/asm-m68k/processor.h
@@ -71,10 +71,10 @@ struct thread_struct {
};
#define INIT_THREAD { \
- ksp: sizeof(init_stack) + (unsigned long) init_stack, \
- sr: PS_S, \
- fs: __KERNEL_DS, \
- info: INIT_THREAD_INFO(init_task) \
+ .ksp = sizeof(init_stack) + (unsigned long) init_stack, \
+ .sr = PS_S, \
+ .fs = __KERNEL_DS, \
+ .info = INIT_THREAD_INFO(init_task), \
}
/*
diff --git a/include/asm-m68k/uaccess.h b/include/asm-m68k/uaccess.h
index 2ffd87b0a76..b761ef218ce 100644
--- a/include/asm-m68k/uaccess.h
+++ b/include/asm-m68k/uaccess.h
@@ -4,8 +4,9 @@
/*
* User space memory access functions
*/
+#include <linux/compiler.h>
#include <linux/errno.h>
-#include <linux/sched.h>
+#include <linux/types.h>
#include <asm/segment.h>
#define VERIFY_READ 0
@@ -32,858 +33,315 @@ struct exception_table_entry
unsigned long insn, fixup;
};
+extern int __put_user_bad(void);
+extern int __get_user_bad(void);
+
+#define __put_user_asm(res, x, ptr, bwl, reg, err) \
+asm volatile ("\n" \
+ "1: moves."#bwl" %2,%1\n" \
+ "2:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .even\n" \
+ "10: moveq.l %3,%0\n" \
+ " jra 2b\n" \
+ " .previous\n" \
+ "\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 1b,10b\n" \
+ " .long 2b,10b\n" \
+ " .previous" \
+ : "+d" (res), "=m" (*(ptr)) \
+ : #reg (x), "i" (err))
/*
* These are the main single-value transfer routines. They automatically
* use the right size if we just have the right pointer type.
*/
-#define put_user(x, ptr) \
-({ \
- int __pu_err; \
- typeof(*(ptr)) __pu_val = (x); \
- __chk_user_ptr(ptr); \
- switch (sizeof (*(ptr))) { \
- case 1: \
- __put_user_asm(__pu_err, __pu_val, ptr, b); \
- break; \
- case 2: \
- __put_user_asm(__pu_err, __pu_val, ptr, w); \
- break; \
- case 4: \
- __put_user_asm(__pu_err, __pu_val, ptr, l); \
- break; \
- case 8: \
- __pu_err = __constant_copy_to_user(ptr, &__pu_val, 8); \
- break; \
- default: \
- __pu_err = __put_user_bad(); \
- break; \
- } \
- __pu_err; \
+#define __put_user(x, ptr) \
+({ \
+ typeof(*(ptr)) __pu_val = (x); \
+ int __pu_err = 0; \
+ __chk_user_ptr(ptr); \
+ switch (sizeof (*(ptr))) { \
+ case 1: \
+ __put_user_asm(__pu_err, __pu_val, ptr, b, d, -EFAULT); \
+ break; \
+ case 2: \
+ __put_user_asm(__pu_err, __pu_val, ptr, w, d, -EFAULT); \
+ break; \
+ case 4: \
+ __put_user_asm(__pu_err, __pu_val, ptr, l, r, -EFAULT); \
+ break; \
+ case 8: \
+ { \
+ const void *__pu_ptr = (ptr); \
+ asm volatile ("\n" \
+ "1: moves.l %2,(%1)+\n" \
+ "2: moves.l %R2,(%1)\n" \
+ "3:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .even\n" \
+ "10: movel %3,%0\n" \
+ " jra 3b\n" \
+ " .previous\n" \
+ "\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 1b,10b\n" \
+ " .long 2b,10b\n" \
+ " .long 3b,10b\n" \
+ " .previous" \
+ : "+d" (__pu_err), "+a" (__pu_ptr) \
+ : "r" (__pu_val), "i" (-EFAULT) \
+ : "memory"); \
+ break; \
+ } \
+ default: \
+ __pu_err = __put_user_bad(); \
+ break; \
+ } \
+ __pu_err; \
})
-#define __put_user(x, ptr) put_user(x, ptr)
+#define put_user(x, ptr) __put_user(x, ptr)
-extern int __put_user_bad(void);
-/*
- * Tell gcc we read from memory instead of writing: this is because
- * we do not write to any memory gcc knows about, so there are no
- * aliasing issues.
- */
-#define __put_user_asm(err,x,ptr,bwl) \
-__asm__ __volatile__ \
- ("21:moves" #bwl " %2,%1\n" \
- "1:\n" \
- ".section .fixup,\"ax\"\n" \
- " .even\n" \
- "2: movel %3,%0\n" \
- " jra 1b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 4\n" \
- " .long 21b,2b\n" \
- " .long 1b,2b\n" \
- ".previous" \
- : "=d"(err) \
- : "m"(*(ptr)), "r"(x), "i"(-EFAULT), "0"(0))
-
-#define get_user(x, ptr) \
-({ \
- int __gu_err; \
- typeof(*(ptr)) __gu_val; \
- __chk_user_ptr(ptr); \
- switch (sizeof(*(ptr))) { \
- case 1: \
- __get_user_asm(__gu_err, __gu_val, ptr, b, "=d"); \
- break; \
- case 2: \
- __get_user_asm(__gu_err, __gu_val, ptr, w, "=r"); \
- break; \
- case 4: \
- __get_user_asm(__gu_err, __gu_val, ptr, l, "=r"); \
- break; \
- case 8: \
- __gu_err = __constant_copy_from_user(&__gu_val, ptr, 8); \
- break; \
- default: \
- __gu_val = (typeof(*(ptr)))0; \
- __gu_err = __get_user_bad(); \
- break; \
- } \
- (x) = __gu_val; \
- __gu_err; \
+#define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \
+ type __gu_val; \
+ asm volatile ("\n" \
+ "1: moves."#bwl" %2,%1\n" \
+ "2:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .even\n" \
+ "10: move.l %3,%0\n" \
+ " sub."#bwl" %1,%1\n" \
+ " jra 2b\n" \
+ " .previous\n" \
+ "\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 1b,10b\n" \
+ " .previous" \
+ : "+d" (res), "=&" #reg (__gu_val) \
+ : "m" (*(ptr)), "i" (err)); \
+ (x) = (typeof(*(ptr)))(long)__gu_val; \
})
-#define __get_user(x, ptr) get_user(x, ptr)
-extern int __get_user_bad(void);
+#define __get_user(x, ptr) \
+({ \
+ int __gu_err = 0; \
+ __chk_user_ptr(ptr); \
+ switch (sizeof(*(ptr))) { \
+ case 1: \
+ __get_user_asm(__gu_err, x, ptr, u8, b, d, -EFAULT); \
+ break; \
+ case 2: \
+ __get_user_asm(__gu_err, x, ptr, u16, w, d, -EFAULT); \
+ break; \
+ case 4: \
+ __get_user_asm(__gu_err, x, ptr, u32, l, r, -EFAULT); \
+ break; \
+/* case 8: disabled because gcc-4.1 has a broken typeof \
+ { \
+ const void *__gu_ptr = (ptr); \
+ u64 __gu_val; \
+ asm volatile ("\n" \
+ "1: moves.l (%2)+,%1\n" \
+ "2: moves.l (%2),%R1\n" \
+ "3:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .even\n" \
+ "10: move.l %3,%0\n" \
+ " sub.l %1,%1\n" \
+ " sub.l %R1,%R1\n" \
+ " jra 3b\n" \
+ " .previous\n" \
+ "\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 1b,10b\n" \
+ " .long 2b,10b\n" \
+ " .previous" \
+ : "+d" (__gu_err), "=&r" (__gu_val), \
+ "+a" (__gu_ptr) \
+ : "i" (-EFAULT) \
+ : "memory"); \
+ (x) = (typeof(*(ptr)))__gu_val; \
+ break; \
+ } */ \
+ default: \
+ __gu_err = __get_user_bad(); \
+ break; \
+ } \
+ __gu_err; \
+})
+#define get_user(x, ptr) __get_user(x, ptr)
-#define __get_user_asm(err,x,ptr,bwl,reg) \
-__asm__ __volatile__ \
- ("1: moves" #bwl " %2,%1\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- " .even\n" \
- "3: movel %3,%0\n" \
- " sub" #bwl " %1,%1\n" \
- " jra 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 4\n" \
- " .long 1b,3b\n" \
- ".previous" \
- : "=d"(err), reg(x) \
- : "m"(*(ptr)), "i" (-EFAULT), "0"(0))
+unsigned long __generic_copy_from_user(void *to, const void __user *from, unsigned long n);
+unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned long n);
-static inline unsigned long
-__generic_copy_from_user(void *to, const void __user *from, unsigned long n)
-{
- unsigned long tmp;
- __asm__ __volatile__
- (" tstl %2\n"
- " jeq 2f\n"
- "1: movesl (%1)+,%3\n"
- " movel %3,(%0)+\n"
- " subql #1,%2\n"
- " jne 1b\n"
- "2: movel %4,%2\n"
- " bclr #1,%2\n"
- " jeq 4f\n"
- "3: movesw (%1)+,%3\n"
- " movew %3,(%0)+\n"
- "4: bclr #0,%2\n"
- " jeq 6f\n"
- "5: movesb (%1)+,%3\n"
- " moveb %3,(%0)+\n"
- "6:\n"
- ".section .fixup,\"ax\"\n"
- " .even\n"
- "7: movel %2,%%d0\n"
- "71:clrl (%0)+\n"
- " subql #1,%%d0\n"
- " jne 71b\n"
- " lsll #2,%2\n"
- " addl %4,%2\n"
- " btst #1,%4\n"
- " jne 81f\n"
- " btst #0,%4\n"
- " jne 91f\n"
- " jra 6b\n"
- "8: addql #2,%2\n"
- "81:clrw (%0)+\n"
- " btst #0,%4\n"
- " jne 91f\n"
- " jra 6b\n"
- "9: addql #1,%2\n"
- "91:clrb (%0)+\n"
- " jra 6b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 1b,7b\n"
- " .long 3b,8b\n"
- " .long 5b,9b\n"
- ".previous"
- : "=a"(to), "=a"(from), "=d"(n), "=&d"(tmp)
- : "d"(n & 3), "0"(to), "1"(from), "2"(n/4)
- : "d0", "memory");
- return n;
-}
-
-static inline unsigned long
-__generic_copy_to_user(void __user *to, const void *from, unsigned long n)
+static __always_inline unsigned long
+__constant_copy_from_user(void *to, const void __user *from, unsigned long n)
{
- unsigned long tmp;
- __asm__ __volatile__
- (" tstl %2\n"
- " jeq 3f\n"
- "1: movel (%1)+,%3\n"
- "22:movesl %3,(%0)+\n"
- "2: subql #1,%2\n"
- " jne 1b\n"
- "3: movel %4,%2\n"
- " bclr #1,%2\n"
- " jeq 4f\n"
- " movew (%1)+,%3\n"
- "24:movesw %3,(%0)+\n"
- "4: bclr #0,%2\n"
- " jeq 5f\n"
- " moveb (%1)+,%3\n"
- "25:movesb %3,(%0)+\n"
- "5:\n"
- ".section .fixup,\"ax\"\n"
- " .even\n"
- "60:addql #1,%2\n"
- "6: lsll #2,%2\n"
- " addl %4,%2\n"
- " jra 5b\n"
- "7: addql #2,%2\n"
- " jra 5b\n"
- "8: addql #1,%2\n"
- " jra 5b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 1b,60b\n"
- " .long 22b,6b\n"
- " .long 2b,6b\n"
- " .long 24b,7b\n"
- " .long 3b,60b\n"
- " .long 4b,7b\n"
- " .long 25b,8b\n"
- " .long 5b,8b\n"
- ".previous"
- : "=a"(to), "=a"(from), "=d"(n), "=&d"(tmp)
- : "r"(n & 3), "0"(to), "1"(from), "2"(n / 4)
- : "memory");
- return n;
-}
+ unsigned long res = 0, tmp;
-#define __copy_from_user_big(to, from, n, fixup, copy) \
- __asm__ __volatile__ \
- ("10: movesl (%1)+,%%d0\n" \
- " movel %%d0,(%0)+\n" \
- " subql #1,%2\n" \
- " jne 10b\n" \
- ".section .fixup,\"ax\"\n" \
- " .even\n" \
- "11: movel %2,%%d0\n" \
- "13: clrl (%0)+\n" \
- " subql #1,%%d0\n" \
- " jne 13b\n" \
- " lsll #2,%2\n" \
- fixup "\n" \
- " jra 12f\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 4\n" \
- " .long 10b,11b\n" \
- ".previous\n" \
- copy "\n" \
- "12:" \
- : "=a"(to), "=a"(from), "=d"(n) \
- : "0"(to), "1"(from), "2"(n/4) \
- : "d0", "memory")
+ /* limit the inlined version to 3 moves */
+ if (n == 11 || n > 12)
+ return __generic_copy_from_user(to, from, n);
-static inline unsigned long
-__constant_copy_from_user(void *to, const void __user *from, unsigned long n)
-{
- switch (n) {
- case 0:
- break;
- case 1:
- __asm__ __volatile__
- ("1: movesb (%1)+,%%d0\n"
- " moveb %%d0,(%0)+\n"
- "2:\n"
- ".section .fixup,\"ax\"\n"
- " .even\n"
- "3: addql #1,%2\n"
- " clrb (%0)+\n"
- " jra 2b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 1b,3b\n"
- ".previous"
- : "=a"(to), "=a"(from), "=d"(n)
- : "0"(to), "1"(from), "2"(0)
- : "d0", "memory");
- break;
- case 2:
- __asm__ __volatile__
- ("1: movesw (%1)+,%%d0\n"
- " movew %%d0,(%0)+\n"
- "2:\n"
- ".section .fixup,\"ax\"\n"
- " .even\n"
- "3: addql #2,%2\n"
- " clrw (%0)+\n"
- " jra 2b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 1b,3b\n"
- ".previous"
- : "=a"(to), "=a"(from), "=d"(n)
- : "0"(to), "1"(from), "2"(0)
- : "d0", "memory");
- break;
- case 3:
- __asm__ __volatile__
- ("1: movesw (%1)+,%%d0\n"
- " movew %%d0,(%0)+\n"
- "2: movesb (%1)+,%%d0\n"
- " moveb %%d0,(%0)+\n"
- "3:"
- ".section .fixup,\"ax\"\n"
- " .even\n"
- "4: addql #2,%2\n"
- " clrw (%0)+\n"
- "5: addql #1,%2\n"
- " clrb (%0)+\n"
- " jra 3b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 1b,4b\n"
- " .long 2b,5b\n"
- ".previous"
- : "=a"(to), "=a"(from), "=d"(n)
- : "0"(to), "1"(from), "2"(0)
- : "d0", "memory");
- break;
- case 4:
- __asm__ __volatile__
- ("1: movesl (%1)+,%%d0\n"
- " movel %%d0,(%0)+\n"
- "2:"
- ".section .fixup,\"ax\"\n"
- " .even\n"
- "3: addql #4,%2\n"
- " clrl (%0)+\n"
- " jra 2b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 1b,3b\n"
- ".previous"
- : "=a"(to), "=a"(from), "=d"(n)
- : "0"(to), "1"(from), "2"(0)
- : "d0", "memory");
- break;
- case 8:
- __asm__ __volatile__
- ("1: movesl (%1)+,%%d0\n"
- " movel %%d0,(%0)+\n"
- "2: movesl (%1)+,%%d0\n"
- " movel %%d0,(%0)+\n"
- "3:"
- ".section .fixup,\"ax\"\n"
- " .even\n"
- "4: addql #4,%2\n"
- " clrl (%0)+\n"
- "5: addql #4,%2\n"
- " clrl (%0)+\n"
- " jra 3b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 1b,4b\n"
- " .long 2b,5b\n"
- ".previous"
- : "=a"(to), "=a"(from), "=d"(n)
- : "0"(to), "1"(from), "2"(0)
- : "d0", "memory");
- break;
- case 12:
- __asm__ __volatile__
- ("1: movesl (%1)+,%%d0\n"
- " movel %%d0,(%0)+\n"
- "2: movesl (%1)+,%%d0\n"
- " movel %%d0,(%0)+\n"
- "3: movesl (%1)+,%%d0\n"
- " movel %%d0,(%0)+\n"
- "4:"
- ".section .fixup,\"ax\"\n"
- " .even\n"
- "5: addql #4,%2\n"
- " clrl (%0)+\n"
- "6: addql #4,%2\n"
- " clrl (%0)+\n"
- "7: addql #4,%2\n"
- " clrl (%0)+\n"
- " jra 4b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 1b,5b\n"
- " .long 2b,6b\n"
- " .long 3b,7b\n"
- ".previous"
- : "=a"(to), "=a"(from), "=d"(n)
- : "0"(to), "1"(from), "2"(0)
- : "d0", "memory");
- break;
- case 16:
- __asm__ __volatile__
- ("1: movesl (%1)+,%%d0\n"
- " movel %%d0,(%0)+\n"
- "2: movesl (%1)+,%%d0\n"
- " movel %%d0,(%0)+\n"
- "3: movesl (%1)+,%%d0\n"
- " movel %%d0,(%0)+\n"
- "4: movesl (%1)+,%%d0\n"
- " movel %%d0,(%0)+\n"
- "5:"
- ".section .fixup,\"ax\"\n"
- " .even\n"
- "6: addql #4,%2\n"
- " clrl (%0)+\n"
- "7: addql #4,%2\n"
- " clrl (%0)+\n"
- "8: addql #4,%2\n"
- " clrl (%0)+\n"
- "9: addql #4,%2\n"
- " clrl (%0)+\n"
- " jra 5b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 1b,6b\n"
- " .long 2b,7b\n"
- " .long 3b,8b\n"
- " .long 4b,9b\n"
- ".previous"
- : "=a"(to), "=a"(from), "=d"(n)
- : "0"(to), "1"(from), "2"(0)
- : "d0", "memory");
- break;
- default:
- switch (n & 3) {
- case 0:
- __copy_from_user_big(to, from, n, "", "");
- break;
+ switch (n) {
case 1:
- __copy_from_user_big(to, from, n,
- /* fixup */
- "1: addql #1,%2\n"
- " clrb (%0)+",
- /* copy */
- "2: movesb (%1)+,%%d0\n"
- " moveb %%d0,(%0)+\n"
- ".section __ex_table,\"a\"\n"
- " .long 2b,1b\n"
- ".previous");
- break;
+ __get_user_asm(res, *(u8 *)to, (u8 *)from, u8, b, d, 1);
+ return res;
case 2:
- __copy_from_user_big(to, from, n,
- /* fixup */
- "1: addql #2,%2\n"
- " clrw (%0)+",
- /* copy */
- "2: movesw (%1)+,%%d0\n"
- " movew %%d0,(%0)+\n"
- ".section __ex_table,\"a\"\n"
- " .long 2b,1b\n"
- ".previous");
- break;
- case 3:
- __copy_from_user_big(to, from, n,
- /* fixup */
- "1: addql #2,%2\n"
- " clrw (%0)+\n"
- "2: addql #1,%2\n"
- " clrb (%0)+",
- /* copy */
- "3: movesw (%1)+,%%d0\n"
- " movew %%d0,(%0)+\n"
- "4: movesb (%1)+,%%d0\n"
- " moveb %%d0,(%0)+\n"
- ".section __ex_table,\"a\"\n"
- " .long 3b,1b\n"
- " .long 4b,2b\n"
- ".previous");
- break;
+ __get_user_asm(res, *(u16 *)to, (u16 *)from, u16, w, d, 2);
+ return res;
+ case 4:
+ __get_user_asm(res, *(u32 *)to, (u32 *)from, u32, l, r, 4);
+ return res;
}
- break;
- }
- return n;
-}
-#define __copy_to_user_big(to, from, n, fixup, copy) \
- __asm__ __volatile__ \
- ("10: movel (%1)+,%%d0\n" \
- "31: movesl %%d0,(%0)+\n" \
- "11: subql #1,%2\n" \
- " jne 10b\n" \
- "41:\n" \
- ".section .fixup,\"ax\"\n" \
- " .even\n" \
- "22: addql #1,%2\n" \
- "12: lsll #2,%2\n" \
- fixup "\n" \
- " jra 13f\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 4\n" \
- " .long 10b,22b\n" \
- " .long 31b,12b\n" \
- " .long 11b,12b\n" \
- " .long 41b,22b\n" \
- ".previous\n" \
- copy "\n" \
- "13:" \
- : "=a"(to), "=a"(from), "=d"(n) \
- : "0"(to), "1"(from), "2"(n/4) \
- : "d0", "memory")
+ asm volatile ("\n"
+ " .ifndef .Lfrom_user\n"
+ " .set .Lfrom_user,1\n"
+ " .macro copy_from_user to,from,tmp\n"
+ " .if .Lcnt >= 4\n"
+ "1: moves.l (\\from)+,\\tmp\n"
+ " move.l \\tmp,(\\to)+\n"
+ " .set .Lcnt,.Lcnt-4\n"
+ " .elseif .Lcnt & 2\n"
+ "1: moves.w (\\from)+,\\tmp\n"
+ " move.w \\tmp,(\\to)+\n"
+ " .set .Lcnt,.Lcnt-2\n"
+ " .elseif .Lcnt & 1\n"
+ "1: moves.b (\\from)+,\\tmp\n"
+ " move.b \\tmp,(\\to)+\n"
+ " .set .Lcnt,.Lcnt-1\n"
+ " .else\n"
+ " .exitm\n"
+ " .endif\n"
+ "\n"
+ " .section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 1b,3f\n"
+ " .previous\n"
+ " .endm\n"
+ " .endif\n"
+ "\n"
+ " .set .Lcnt,%c4\n"
+ " copy_from_user %1,%2,%3\n"
+ " copy_from_user %1,%2,%3\n"
+ " copy_from_user %1,%2,%3\n"
+ "2:\n"
+ " .section .fixup,\"ax\"\n"
+ " .even\n"
+ "3: moveq.l %4,%0\n"
+ " move.l %5,%1\n"
+ " .rept %c4 / 4\n"
+ " clr.l (%1)+\n"
+ " .endr\n"
+ " .if %c4 & 2\n"
+ " clr.w (%1)+\n"
+ " .endif\n"
+ " .if %c4 & 1\n"
+ " clr.b (%1)+\n"
+ " .endif\n"
+ " jra 2b\n"
+ " .previous\n"
+ : "+r" (res), "+a" (to), "+a" (from), "=&d" (tmp)
+ : "i" (n), "g" (to)
+ : "memory");
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
+ return res;
+}
-static inline unsigned long
+static __always_inline unsigned long
__constant_copy_to_user(void __user *to, const void *from, unsigned long n)
{
- switch (n) {
- case 0:
- break;
- case 1:
- __asm__ __volatile__
- (" moveb (%1)+,%%d0\n"
- "21:movesb %%d0,(%0)+\n"
- "1:\n"
- ".section .fixup,\"ax\"\n"
- " .even\n"
- "2: addql #1,%2\n"
- " jra 1b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n "
- " .long 21b,2b\n"
- " .long 1b,2b\n"
- ".previous"
- : "=a"(to), "=a"(from), "=d"(n)
- : "0"(to), "1"(from), "2"(0)
- : "d0", "memory");
- break;
- case 2:
- __asm__ __volatile__
- (" movew (%1)+,%%d0\n"
- "21:movesw %%d0,(%0)+\n"
- "1:\n"
- ".section .fixup,\"ax\"\n"
- " .even\n"
- "2: addql #2,%2\n"
- " jra 1b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 21b,2b\n"
- " .long 1b,2b\n"
- ".previous"
- : "=a"(to), "=a"(from), "=d"(n)
- : "0"(to), "1"(from), "2"(0)
- : "d0", "memory");
- break;
- case 3:
- __asm__ __volatile__
- (" movew (%1)+,%%d0\n"
- "21:movesw %%d0,(%0)+\n"
- "1: moveb (%1)+,%%d0\n"
- "22:movesb %%d0,(%0)+\n"
- "2:\n"
- ".section .fixup,\"ax\"\n"
- " .even\n"
- "3: addql #2,%2\n"
- "4: addql #1,%2\n"
- " jra 2b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 21b,3b\n"
- " .long 1b,3b\n"
- " .long 22b,4b\n"
- " .long 2b,4b\n"
- ".previous"
- : "=a"(to), "=a"(from), "=d"(n)
- : "0"(to), "1"(from), "2"(0)
- : "d0", "memory");
- break;
- case 4:
- __asm__ __volatile__
- (" movel (%1)+,%%d0\n"
- "21:movesl %%d0,(%0)+\n"
- "1:\n"
- ".section .fixup,\"ax\"\n"
- " .even\n"
- "2: addql #4,%2\n"
- " jra 1b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 21b,2b\n"
- " .long 1b,2b\n"
- ".previous"
- : "=a"(to), "=a"(from), "=d"(n)
- : "0"(to), "1"(from), "2"(0)
- : "d0", "memory");
- break;
- case 8:
- __asm__ __volatile__
- (" movel (%1)+,%%d0\n"
- "21:movesl %%d0,(%0)+\n"
- "1: movel (%1)+,%%d0\n"
- "22:movesl %%d0,(%0)+\n"
- "2:\n"
- ".section .fixup,\"ax\"\n"
- " .even\n"
- "3: addql #4,%2\n"
- "4: addql #4,%2\n"
- " jra 2b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 21b,3b\n"
- " .long 1b,3b\n"
- " .long 22b,4b\n"
- " .long 2b,4b\n"
- ".previous"
- : "=a"(to), "=a"(from), "=d"(n)
- : "0"(to), "1"(from), "2"(0)
- : "d0", "memory");
- break;
- case 12:
- __asm__ __volatile__
- (" movel (%1)+,%%d0\n"
- "21:movesl %%d0,(%0)+\n"
- "1: movel (%1)+,%%d0\n"
- "22:movesl %%d0,(%0)+\n"
- "2: movel (%1)+,%%d0\n"
- "23:movesl %%d0,(%0)+\n"
- "3:\n"
- ".section .fixup,\"ax\"\n"
- " .even\n"
- "4: addql #4,%2\n"
- "5: addql #4,%2\n"
- "6: addql #4,%2\n"
- " jra 3b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 21b,4b\n"
- " .long 1b,4b\n"
- " .long 22b,5b\n"
- " .long 2b,5b\n"
- " .long 23b,6b\n"
- " .long 3b,6b\n"
- ".previous"
- : "=a"(to), "=a"(from), "=d"(n)
- : "0"(to), "1"(from), "2"(0)
- : "d0", "memory");
- break;
- case 16:
- __asm__ __volatile__
- (" movel (%1)+,%%d0\n"
- "21:movesl %%d0,(%0)+\n"
- "1: movel (%1)+,%%d0\n"
- "22:movesl %%d0,(%0)+\n"
- "2: movel (%1)+,%%d0\n"
- "23:movesl %%d0,(%0)+\n"
- "3: movel (%1)+,%%d0\n"
- "24:movesl %%d0,(%0)+\n"
- "4:"
- ".section .fixup,\"ax\"\n"
- " .even\n"
- "5: addql #4,%2\n"
- "6: addql #4,%2\n"
- "7: addql #4,%2\n"
- "8: addql #4,%2\n"
- " jra 4b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 21b,5b\n"
- " .long 1b,5b\n"
- " .long 22b,6b\n"
- " .long 2b,6b\n"
- " .long 23b,7b\n"
- " .long 3b,7b\n"
- " .long 24b,8b\n"
- " .long 4b,8b\n"
- ".previous"
- : "=a"(to), "=a"(from), "=d"(n)
- : "0"(to), "1"(from), "2"(0)
- : "d0", "memory");
- break;
- default:
- switch (n & 3) {
- case 0:
- __copy_to_user_big(to, from, n, "", "");
- break;
+ unsigned long res = 0, tmp;
+
+ /* limit the inlined version to 3 moves */
+ if (n == 11 || n > 12)
+ return __generic_copy_to_user(to, from, n);
+
+ switch (n) {
case 1:
- __copy_to_user_big(to, from, n,
- /* fixup */
- "1: addql #1,%2",
- /* copy */
- " moveb (%1)+,%%d0\n"
- "22:movesb %%d0,(%0)+\n"
- "2:"
- ".section __ex_table,\"a\"\n"
- " .long 22b,1b\n"
- " .long 2b,1b\n"
- ".previous");
- break;
+ __put_user_asm(res, *(u8 *)from, (u8 *)to, b, d, 1);
+ return res;
case 2:
- __copy_to_user_big(to, from, n,
- /* fixup */
- "1: addql #2,%2",
- /* copy */
- " movew (%1)+,%%d0\n"
- "22:movesw %%d0,(%0)+\n"
- "2:"
- ".section __ex_table,\"a\"\n"
- " .long 22b,1b\n"
- " .long 2b,1b\n"
- ".previous");
- break;
- case 3:
- __copy_to_user_big(to, from, n,
- /* fixup */
- "1: addql #2,%2\n"
- "2: addql #1,%2",
- /* copy */
- " movew (%1)+,%%d0\n"
- "23:movesw %%d0,(%0)+\n"
- "3: moveb (%1)+,%%d0\n"
- "24:movesb %%d0,(%0)+\n"
- "4:"
- ".section __ex_table,\"a\"\n"
- " .long 23b,1b\n"
- " .long 3b,1b\n"
- " .long 24b,2b\n"
- " .long 4b,2b\n"
- ".previous");
- break;
+ __put_user_asm(res, *(u16 *)from, (u16 *)to, w, d, 2);
+ return res;
+ case 4:
+ __put_user_asm(res, *(u32 *)from, (u32 *)to, l, r, 4);
+ return res;
}
- break;
- }
- return n;
+
+ asm volatile ("\n"
+ " .ifndef .Lto_user\n"
+ " .set .Lto_user,1\n"
+ " .macro copy_to_user to,from,tmp\n"
+ " .if .Lcnt >= 4\n"
+ " move.l (\\from)+,\\tmp\n"
+ "11: moves.l \\tmp,(\\to)+\n"
+ "12: .set .Lcnt,.Lcnt-4\n"
+ " .elseif .Lcnt & 2\n"
+ " move.w (\\from)+,\\tmp\n"
+ "11: moves.w \\tmp,(\\to)+\n"
+ "12: .set .Lcnt,.Lcnt-2\n"
+ " .elseif .Lcnt & 1\n"
+ " move.b (\\from)+,\\tmp\n"
+ "11: moves.b \\tmp,(\\to)+\n"
+ "12: .set .Lcnt,.Lcnt-1\n"
+ " .else\n"
+ " .exitm\n"
+ " .endif\n"
+ "\n"
+ " .section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 11b,3f\n"
+ " .long 12b,3f\n"
+ " .previous\n"
+ " .endm\n"
+ " .endif\n"
+ "\n"
+ " .set .Lcnt,%c4\n"
+ " copy_to_user %1,%2,%3\n"
+ " copy_to_user %1,%2,%3\n"
+ " copy_to_user %1,%2,%3\n"
+ "2:\n"
+ " .section .fixup,\"ax\"\n"
+ " .even\n"
+ "3: moveq.l %4,%0\n"
+ " jra 2b\n"
+ " .previous\n"
+ : "+r" (res), "+a" (to), "+a" (from), "=&d" (tmp)
+ : "i" (n)
+ : "memory");
+
+ return res;
}
-#define copy_from_user(to, from, n) \
+#define __copy_from_user(to, from, n) \
(__builtin_constant_p(n) ? \
__constant_copy_from_user(to, from, n) : \
__generic_copy_from_user(to, from, n))
-#define copy_to_user(to, from, n) \
+#define __copy_to_user(to, from, n) \
(__builtin_constant_p(n) ? \
__constant_copy_to_user(to, from, n) : \
__generic_copy_to_user(to, from, n))
-#define __copy_from_user(to, from, n) copy_from_user(to, from, n)
-#define __copy_to_user(to, from, n) copy_to_user(to, from, n)
+#define __copy_to_user_inatomic __copy_to_user
+#define __copy_from_user_inatomic __copy_from_user
-/*
- * Copy a null terminated string from userspace.
- */
+#define copy_from_user(to, from, n) __copy_from_user(to, from, n)
+#define copy_to_user(to, from, n) __copy_to_user(to, from, n)
-static inline long
-strncpy_from_user(char *dst, const char __user *src, long count)
-{
- long res;
- if (count == 0) return count;
- __asm__ __volatile__
- ("1: movesb (%2)+,%%d0\n"
- "12:moveb %%d0,(%1)+\n"
- " jeq 2f\n"
- " subql #1,%3\n"
- " jne 1b\n"
- "2: subl %3,%0\n"
- "3:\n"
- ".section .fixup,\"ax\"\n"
- " .even\n"
- "4: movel %4,%0\n"
- " jra 3b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 1b,4b\n"
- " .long 12b,4b\n"
- ".previous"
- : "=d"(res), "=a"(dst), "=a"(src), "=d"(count)
- : "i"(-EFAULT), "0"(count), "1"(dst), "2"(src), "3"(count)
- : "d0", "memory");
- return res;
-}
-
-/*
- * Return the size of a string (including the ending 0)
- *
- * Return 0 on exception, a value greater than N if too long
- */
-static inline long strnlen_user(const char __user *src, long n)
-{
- long res;
-
- res = -(unsigned long)src;
- __asm__ __volatile__
- ("1:\n"
- " tstl %2\n"
- " jeq 3f\n"
- "2: movesb (%1)+,%%d0\n"
- "22:\n"
- " subql #1,%2\n"
- " tstb %%d0\n"
- " jne 1b\n"
- " jra 4f\n"
- "3:\n"
- " addql #1,%0\n"
- "4:\n"
- " addl %1,%0\n"
- "5:\n"
- ".section .fixup,\"ax\"\n"
- " .even\n"
- "6: moveq %3,%0\n"
- " jra 5b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 2b,6b\n"
- " .long 22b,6b\n"
- ".previous"
- : "=d"(res), "=a"(src), "=d"(n)
- : "i"(0), "0"(res), "1"(src), "2"(n)
- : "d0");
- return res;
-}
+long strncpy_from_user(char *dst, const char __user *src, long count);
+long strnlen_user(const char __user *src, long n);
+unsigned long clear_user(void __user *to, unsigned long n);
#define strlen_user(str) strnlen_user(str, 32767)
-/*
- * Zero Userspace
- */
-
-static inline unsigned long
-clear_user(void __user *to, unsigned long n)
-{
- __asm__ __volatile__
- (" tstl %1\n"
- " jeq 3f\n"
- "1: movesl %3,(%0)+\n"
- "2: subql #1,%1\n"
- " jne 1b\n"
- "3: movel %2,%1\n"
- " bclr #1,%1\n"
- " jeq 4f\n"
- "24:movesw %3,(%0)+\n"
- "4: bclr #0,%1\n"
- " jeq 5f\n"
- "25:movesb %3,(%0)+\n"
- "5:\n"
- ".section .fixup,\"ax\"\n"
- " .even\n"
- "61:addql #1,%1\n"
- "6: lsll #2,%1\n"
- " addl %2,%1\n"
- " jra 5b\n"
- "7: addql #2,%1\n"
- " jra 5b\n"
- "8: addql #1,%1\n"
- " jra 5b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 1b,61b\n"
- " .long 2b,6b\n"
- " .long 3b,61b\n"
- " .long 24b,7b\n"
- " .long 4b,7b\n"
- " .long 25b,8b\n"
- " .long 5b,8b\n"
- ".previous"
- : "=a"(to), "=d"(n)
- : "r"(n & 3), "r"(0), "0"(to), "1"(n/4));
- return n;
-}
-
#endif /* _M68K_UACCESS_H */
diff --git a/include/asm-m68k/unistd.h b/include/asm-m68k/unistd.h
index f236fe92156..7c0b6296b45 100644
--- a/include/asm-m68k/unistd.h
+++ b/include/asm-m68k/unistd.h
@@ -410,46 +410,7 @@ __syscall_return(type,__res); \
#ifdef __KERNEL_SYSCALLS__
-#include <linux/compiler.h>
-#include <linux/interrupt.h>
-#include <linux/types.h>
-
-/*
- * we need this inline - forking from kernel space will result
- * in NO COPY ON WRITE (!!!), until an execve is executed. This
- * is no problem, but for the stack. This is handled by not letting
- * main() use the stack at all after fork(). Thus, no function
- * calls - which means inline code for fork too, as otherwise we
- * would use the stack upon exit from 'fork()'.
- *
- * Actually only pause and fork are needed inline, so that there
- * won't be any messing with the stack from main(), but we define
- * some others too.
- */
-#define __NR__exit __NR_exit
-static inline _syscall0(pid_t,setsid)
-static inline _syscall3(int,write,int,fd,const char *,buf,off_t,count)
-static inline _syscall3(int,read,int,fd,char *,buf,off_t,count)
-static inline _syscall3(off_t,lseek,int,fd,off_t,offset,int,count)
-static inline _syscall1(int,dup,int,fd)
static inline _syscall3(int,execve,const char *,file,char **,argv,char **,envp)
-static inline _syscall3(int,open,const char *,file,int,flag,int,mode)
-static inline _syscall1(int,close,int,fd)
-static inline _syscall1(int,_exit,int,exitcode)
-static inline _syscall3(pid_t,waitpid,pid_t,pid,int *,wait_stat,int,options)
-
-asmlinkage long sys_mmap2(
- unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long pgoff);
-asmlinkage int sys_execve(char *name, char **argv, char **envp);
-asmlinkage int sys_pipe(unsigned long *fildes);
-struct pt_regs;
-struct sigaction;
-asmlinkage long sys_rt_sigaction(int sig,
- const struct sigaction __user *act,
- struct sigaction __user *oact,
- size_t sigsetsize);
#endif /* __KERNEL_SYSCALLS__ */
diff --git a/include/asm-m68knommu/irq.h b/include/asm-m68knommu/irq.h
index 2b408842a30..c5247516fcf 100644
--- a/include/asm-m68knommu/irq.h
+++ b/include/asm-m68knommu/irq.h
@@ -87,8 +87,4 @@ extern void (*mach_disable_irq)(unsigned int);
#define disable_irq(x) do { } while (0)
#define disable_irq_nosync(x) disable_irq(x)
-struct irqaction;
-struct pt_regs;
-int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
-
#endif /* _M68K_IRQ_H_ */
diff --git a/include/asm-m68knommu/ptrace.h b/include/asm-m68knommu/ptrace.h
index f65bd90749e..1e19c457de7 100644
--- a/include/asm-m68knommu/ptrace.h
+++ b/include/asm-m68knommu/ptrace.h
@@ -70,7 +70,7 @@ struct switch_stack {
/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
#define PTRACE_GETREGS 12
#define PTRACE_SETREGS 13
-#ifdef COFNIG_FPU
+#ifdef CONFIG_FPU
#define PTRACE_GETFPREGS 14
#define PTRACE_SETFPREGS 15
#endif
diff --git a/include/asm-mips/mach-au1x00/au1xxx_psc.h b/include/asm-mips/mach-au1x00/au1xxx_psc.h
index 5c3e2a38ce1..d7cbacdd21f 100644
--- a/include/asm-mips/mach-au1x00/au1xxx_psc.h
+++ b/include/asm-mips/mach-au1x00/au1xxx_psc.h
@@ -39,7 +39,12 @@
#define PSC0_BASE_ADDR 0xb1a00000
#define PSC1_BASE_ADDR 0xb1b00000
#define PSC2_BASE_ADDR 0xb0a00000
-#define PSC3_BASE_ADDR 0xb0d00000
+#define PSC3_BASE_ADDR 0xb0b00000
+#endif
+
+#ifdef CONFIG_SOC_AU1200
+#define PSC0_BASE_ADDR 0xb1a00000
+#define PSC1_BASE_ADDR 0xb1b00000
#endif
/* The PSC select and control registers are common to
@@ -227,6 +232,8 @@ typedef struct psc_i2s {
#define PSC_I2SCFG_DD_DISABLE (1 << 27)
#define PSC_I2SCFG_DE_ENABLE (1 << 26)
#define PSC_I2SCFG_SET_WS(x) (((((x) / 2) - 1) & 0x7f) << 16)
+#define PSC_I2SCFG_WS(n) ((n & 0xFF) << 16)
+#define PSC_I2SCFG_WS_MASK (PSC_I2SCFG_WS(0x3F))
#define PSC_I2SCFG_WI (1 << 15)
#define PSC_I2SCFG_DIV_MASK (3 << 13)
diff --git a/include/asm-mips/mach-db1x00/db1x00.h b/include/asm-mips/mach-db1x00/db1x00.h
index 8fbb4b42a8b..0f5f4c29f4e 100644
--- a/include/asm-mips/mach-db1x00/db1x00.h
+++ b/include/asm-mips/mach-db1x00/db1x00.h
@@ -30,8 +30,20 @@
#ifdef CONFIG_MIPS_DB1550
+
+#define DBDMA_AC97_TX_CHAN DSCR_CMD0_PSC1_TX
+#define DBDMA_AC97_RX_CHAN DSCR_CMD0_PSC1_RX
+#define DBDMA_I2S_TX_CHAN DSCR_CMD0_PSC3_TX
+#define DBDMA_I2S_RX_CHAN DSCR_CMD0_PSC3_RX
+
+#define SPI_PSC_BASE PSC0_BASE_ADDR
+#define AC97_PSC_BASE PSC1_BASE_ADDR
+#define SMBUS_PSC_BASE PSC2_BASE_ADDR
+#define I2S_PSC_BASE PSC3_BASE_ADDR
+
#define BCSR_KSEG1_ADDR 0xAF000000
#define NAND_PHYS_ADDR 0x20000000
+
#else
#define BCSR_KSEG1_ADDR 0xAE000000
#endif
diff --git a/include/asm-mips/mmzone.h b/include/asm-mips/mmzone.h
index dc231c89bef..f53ec54c92f 100644
--- a/include/asm-mips/mmzone.h
+++ b/include/asm-mips/mmzone.h
@@ -10,7 +10,6 @@
#ifdef CONFIG_DISCONTIGMEM
-#define kvaddr_to_nid(kvaddr) pa_to_nid(__pa(kvaddr))
#define pfn_to_nid(pfn) pa_to_nid((pfn) << PAGE_SHIFT)
#endif /* CONFIG_DISCONTIGMEM */
diff --git a/include/asm-parisc/mmzone.h b/include/asm-parisc/mmzone.h
index ceb9b73199d..c87813662d4 100644
--- a/include/asm-parisc/mmzone.h
+++ b/include/asm-parisc/mmzone.h
@@ -14,11 +14,6 @@ extern struct node_map_data node_data[];
#define NODE_DATA(nid) (&node_data[nid].pg_data)
-/*
- * Given a kernel address, find the home node of the underlying memory.
- */
-#define kvaddr_to_nid(kaddr) pfn_to_nid(__pa(kaddr) >> PAGE_SHIFT)
-
#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
#define node_end_pfn(nid) \
({ \
diff --git a/include/asm-s390/irq.h b/include/asm-s390/irq.h
index 916a1aa0b07..bd1a721f7aa 100644
--- a/include/asm-s390/irq.h
+++ b/include/asm-s390/irq.h
@@ -21,10 +21,6 @@ enum interruption_class {
#define touch_nmi_watchdog() do { } while(0)
-struct irqaction;
-struct pt_regs;
-int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
-
#endif /* __KERNEL__ */
#endif
diff --git a/include/asm-sparc/irq.h b/include/asm-sparc/irq.h
index f2d64537e29..3141ddfea97 100644
--- a/include/asm-sparc/irq.h
+++ b/include/asm-sparc/irq.h
@@ -181,8 +181,4 @@ extern struct sun4m_intregs *sun4m_interrupts;
#define SUN4M_INT_SBUS(x) (1 << (x+7))
#define SUN4M_INT_VME(x) (1 << (x))
-struct irqaction;
-struct pt_regs;
-int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
-
#endif
diff --git a/include/asm-v850/irq.h b/include/asm-v850/irq.h
index 44431152b36..1bf096db8f4 100644
--- a/include/asm-v850/irq.h
+++ b/include/asm-v850/irq.h
@@ -62,8 +62,6 @@ extern void disable_irq (unsigned int irq);
/* Disable an irq without waiting. */
extern void disable_irq_nosync (unsigned int irq);
-extern int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
-
#endif /* !__ASSEMBLY__ */
#endif /* __V850_IRQ_H__ */
diff --git a/include/asm-x86_64/acpi.h b/include/asm-x86_64/acpi.h
index aa1c7b2e438..2c95a319c05 100644
--- a/include/asm-x86_64/acpi.h
+++ b/include/asm-x86_64/acpi.h
@@ -162,6 +162,8 @@ extern int acpi_pci_disabled;
extern u8 x86_acpiid_to_apicid[];
+#define ARCH_HAS_POWER_INIT 1
+
extern int acpi_skip_timer_override;
#endif /*__KERNEL__*/
diff --git a/include/asm-x86_64/apicdef.h b/include/asm-x86_64/apicdef.h
index 5a48e9bcf21..1dd40067c67 100644
--- a/include/asm-x86_64/apicdef.h
+++ b/include/asm-x86_64/apicdef.h
@@ -137,8 +137,6 @@
*/
#define u32 unsigned int
-#define lapic ((volatile struct local_apic *)APIC_BASE)
-
struct local_apic {
/*000*/ struct { u32 __reserved[4]; } __reserved_01;
diff --git a/include/asm-x86_64/mmzone.h b/include/asm-x86_64/mmzone.h
index 70bb9969766..c38ebdf6f42 100644
--- a/include/asm-x86_64/mmzone.h
+++ b/include/asm-x86_64/mmzone.h
@@ -42,7 +42,6 @@ static inline __attribute__((pure)) int phys_to_nid(unsigned long addr)
#ifdef CONFIG_DISCONTIGMEM
#define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT)
-#define kvaddr_to_nid(kaddr) phys_to_nid(__pa(kaddr))
extern int pfn_valid(unsigned long pfn);
#endif
diff --git a/include/asm-x86_64/numa.h b/include/asm-x86_64/numa.h
index 1cc92fe0250..933ff11ece1 100644
--- a/include/asm-x86_64/numa.h
+++ b/include/asm-x86_64/numa.h
@@ -8,7 +8,6 @@ struct bootnode {
};
extern int compute_hash_shift(struct bootnode *nodes, int numnodes);
-extern int pxm_to_node(int nid);
#define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT))
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h
index 0aff22bdbb2..94387c915e5 100644
--- a/include/asm-x86_64/unistd.h
+++ b/include/asm-x86_64/unistd.h
@@ -617,10 +617,12 @@ __SYSCALL(__NR_tee, sys_tee)
__SYSCALL(__NR_sync_file_range, sys_sync_file_range)
#define __NR_vmsplice 278
__SYSCALL(__NR_vmsplice, sys_vmsplice)
+#define __NR_move_pages 279
+__SYSCALL(__NR_move_pages, sys_move_pages)
#ifdef __KERNEL__
-#define __NR_syscall_max __NR_vmsplice
+#define __NR_syscall_max __NR_move_pages
#ifndef __NO_STUBS
diff --git a/include/asm-xtensa/checksum.h b/include/asm-xtensa/checksum.h
index bdc00ae9be4..03114f8d1e1 100644
--- a/include/asm-xtensa/checksum.h
+++ b/include/asm-xtensa/checksum.h
@@ -43,8 +43,7 @@ asmlinkage unsigned int csum_partial_copy_generic( const char *src, char *dst, i
* Note: when you get a NULL pointer exception here this means someone
* passed in an incorrect kernel address to one of these functions.
*
- * If you use these functions directly please don't forget the
- * verify_area().
+ * If you use these functions directly please don't forget the access_ok().
*/
static inline
unsigned int csum_partial_copy_nocheck ( const char *src, char *dst,
diff --git a/include/asm-xtensa/rwsem.h b/include/asm-xtensa/rwsem.h
index 3c02b0e033f..abcd86dc5ab 100644
--- a/include/asm-xtensa/rwsem.h
+++ b/include/asm-xtensa/rwsem.h
@@ -172,4 +172,9 @@ static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
return atomic_add_return(delta, (atomic_t *)(&sem->count));
}
-#endif /* _XTENSA_RWSEM_XADD_H */
+static inline int rwsem_is_locked(struct rw_semaphore *sem)
+{
+ return (sem->count != 0);
+}
+
+#endif /* _XTENSA_RWSEM_H */
diff --git a/include/asm-xtensa/uaccess.h b/include/asm-xtensa/uaccess.h
index 06a22b83ba1..88a64e1144d 100644
--- a/include/asm-xtensa/uaccess.h
+++ b/include/asm-xtensa/uaccess.h
@@ -154,35 +154,6 @@
.Laccess_ok_\@:
.endm
-/*
- * verify_area determines whether a memory access is allowed. It's
- * mostly an unnecessary wrapper for access_ok, but we provide it as a
- * duplicate of the verify_area() C inline function below. See the
- * equivalent C version below for clarity.
- *
- * On error, verify_area branches to a label indicated by parameter
- * <error>. This implies that the macro falls through to the next
- * instruction on success.
- *
- * Note that we assume success is the common case, and we optimize the
- * branch fall-through case on success.
- *
- * On Entry:
- * <aa> register containing memory address
- * <as> register containing memory size
- * <at> temp register
- * <error> label to branch to on error; implies fall-through
- * macro on success
- * On Exit:
- * <aa> preserved
- * <as> preserved
- * <at> destroyed
- */
- .macro verify_area aa, as, at, sp, error
- access_ok \at, \aa, \as, \sp, \error
- .endm
-
-
#else /* __ASSEMBLY__ not defined */
#include <linux/sched.h>
@@ -211,11 +182,6 @@
#define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size)))
#define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size))
-static inline int verify_area(int type, const void * addr, unsigned long size)
-{
- return access_ok(type,addr,size) ? 0 : -EFAULT;
-}
-
/*
* These are the main single-value transfer routines. They
* automatically use the right size if we just have the right pointer
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 1cf0b91d05b..90d6df1551e 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -37,6 +37,7 @@
#include <acpi/acpi.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
+#include <acpi/acpi_numa.h>
#include <asm/acpi.h>
@@ -407,10 +408,18 @@ void acpi_table_print_madt_entry (acpi_table_entry_header *madt);
void acpi_table_print_srat_entry (acpi_table_entry_header *srat);
/* the following four functions are architecture-dependent */
+#ifdef CONFIG_HAVE_ARCH_PARSE_SRAT
+#define NR_NODE_MEMBLKS MAX_NUMNODES
+#define acpi_numa_slit_init(slit) do {} while (0)
+#define acpi_numa_processor_affinity_init(pa) do {} while (0)
+#define acpi_numa_memory_affinity_init(ma) do {} while (0)
+#define acpi_numa_arch_fixup() do {} while (0)
+#else
void acpi_numa_slit_init (struct acpi_table_slit *slit);
void acpi_numa_processor_affinity_init (struct acpi_table_processor_affinity *pa);
void acpi_numa_memory_affinity_init (struct acpi_table_memory_affinity *ma);
void acpi_numa_arch_fixup(void);
+#endif
#ifdef CONFIG_ACPI_HOTPLUG_CPU
/* Arch dependent functions for cpu hotplug support */
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index da2d107fe2c..22866fa2d96 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -91,8 +91,8 @@ static inline void *alloc_remap(int nid, unsigned long size)
}
#endif
-extern unsigned long __initdata nr_kernel_pages;
-extern unsigned long __initdata nr_all_pages;
+extern unsigned long nr_kernel_pages;
+extern unsigned long nr_all_pages;
extern void *__init alloc_large_system_hash(const char *tablename,
unsigned long bucketsize,
diff --git a/include/linux/cn_proc.h b/include/linux/cn_proc.h
index 1417de93505..dbb7769009b 100644
--- a/include/linux/cn_proc.h
+++ b/include/linux/cn_proc.h
@@ -3,31 +3,22 @@
*
* Copyright (C) Matt Helsley, IBM Corp. 2005
* Based on cn_fork.h by Nguyen Anh Quynh and Guillaume Thouvenin
- * Original copyright notice follows:
* Copyright (C) 2005 Nguyen Anh Quynh <aquynh@gmail.com>
* Copyright (C) 2005 Guillaume Thouvenin <guillaume.thouvenin@bull.net>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2.1 of the GNU Lesser General Public License
+ * as published by the Free Software Foundation.
*
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*/
#ifndef CN_PROC_H
#define CN_PROC_H
#include <linux/types.h>
-#include <linux/time.h>
-#include <linux/connector.h>
/*
* Userspace sends this enum to register with the kernel that it is listening
diff --git a/include/linux/coda_linux.h b/include/linux/coda_linux.h
index b3ecf8f71d9..7b5c5df5cb6 100644
--- a/include/linux/coda_linux.h
+++ b/include/linux/coda_linux.h
@@ -36,7 +36,7 @@ extern const struct file_operations coda_ioctl_operations;
/* operations shared over more than one file */
int coda_open(struct inode *i, struct file *f);
-int coda_flush(struct file *f);
+int coda_flush(struct file *f, fl_owner_t id);
int coda_release(struct inode *i, struct file *f);
int coda_permission(struct inode *inode, int mask, struct nameidata *nd);
int coda_revalidate_inode(struct dentry *);
diff --git a/include/linux/coda_psdev.h b/include/linux/coda_psdev.h
index d539262a8f8..98f6c52c152 100644
--- a/include/linux/coda_psdev.h
+++ b/include/linux/coda_psdev.h
@@ -70,7 +70,7 @@ int venus_pioctl(struct super_block *sb, struct CodaFid *fid,
unsigned int cmd, struct PioctlData *data);
int coda_downcall(int opcode, union outputArgs *out, struct super_block *sb);
int venus_fsync(struct super_block *sb, struct CodaFid *fid);
-int venus_statfs(struct super_block *sb, struct kstatfs *sfs);
+int venus_statfs(struct dentry *dentry, struct kstatfs *sfs);
/* messages between coda filesystem in kernel and Venus */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 5aa95011f7e..466fbe9e489 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -72,6 +72,8 @@ struct cpufreq_real_policy {
struct cpufreq_policy {
cpumask_t cpus; /* affected CPUs */
+ unsigned int shared_type; /* ANY or ALL affected CPUs
+ should set cpufreq */
unsigned int cpu; /* cpu nr of registered CPU */
struct cpufreq_cpuinfo cpuinfo;/* see above */
@@ -98,6 +100,8 @@ struct cpufreq_policy {
#define CPUFREQ_INCOMPATIBLE (1)
#define CPUFREQ_NOTIFY (2)
+#define CPUFREQ_SHARED_TYPE_ALL (0) /* All dependent CPUs should set freq */
+#define CPUFREQ_SHARED_TYPE_ANY (1) /* Freq can be set from any dependent CPU */
/******************** cpufreq transition notifiers *******************/
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 46d0e079735..0dd1610a94a 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -217,7 +217,6 @@ extern struct dentry * d_alloc_anon(struct inode *);
extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
extern void shrink_dcache_sb(struct super_block *);
extern void shrink_dcache_parent(struct dentry *);
-extern void shrink_dcache_anon(struct super_block *);
extern int d_invalidate(struct dentry *);
/* only used at mount-time */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 73c7d6f04b3..dba4cbd157e 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -376,7 +376,8 @@ struct address_space_operations {
struct page* (*get_xip_page)(struct address_space *, sector_t,
int);
/* migrate the contents of a page to the specified target */
- int (*migratepage) (struct page *, struct page *);
+ int (*migratepage) (struct address_space *,
+ struct page *, struct page *);
};
struct backing_dev_info;
@@ -682,6 +683,7 @@ extern spinlock_t files_lock;
#define FL_FLOCK 2
#define FL_ACCESS 8 /* not trying to lock, just looking */
#define FL_LEASE 32 /* lease held on this file */
+#define FL_CLOSE 64 /* unlock on close */
#define FL_SLEEP 128 /* A blocking lock */
/*
@@ -774,7 +776,6 @@ extern int posix_lock_file_conf(struct file *, struct file_lock *, struct file_l
extern int posix_lock_file(struct file *, struct file_lock *);
extern int posix_lock_file_wait(struct file *, struct file_lock *);
extern int posix_unblock_lock(struct file *, struct file_lock *);
-extern int posix_locks_deadlock(struct file_lock *, struct file_lock *);
extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl);
extern int __break_lease(struct inode *inode, unsigned int flags);
extern void lease_get_mtime(struct inode *, struct timespec *time);
@@ -1024,7 +1025,7 @@ struct file_operations {
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
int (*mmap) (struct file *, struct vm_area_struct *);
int (*open) (struct inode *, struct file *);
- int (*flush) (struct file *);
+ int (*flush) (struct file *, fl_owner_t id);
int (*release) (struct inode *, struct file *);
int (*fsync) (struct file *, struct dentry *, int datasync);
int (*aio_fsync) (struct kiocb *, int datasync);
@@ -1096,7 +1097,7 @@ struct super_operations {
int (*sync_fs)(struct super_block *sb, int wait);
void (*write_super_lockfs) (struct super_block *);
void (*unlockfs) (struct super_block *);
- int (*statfs) (struct super_block *, struct kstatfs *);
+ int (*statfs) (struct dentry *, struct kstatfs *);
int (*remount_fs) (struct super_block *, int *, char *);
void (*clear_inode) (struct inode *);
void (*umount_begin) (struct super_block *);
@@ -1269,23 +1270,26 @@ find_exported_dentry(struct super_block *sb, void *obj, void *parent,
struct file_system_type {
const char *name;
int fs_flags;
- struct super_block *(*get_sb) (struct file_system_type *, int,
- const char *, void *);
+ int (*get_sb) (struct file_system_type *, int,
+ const char *, void *, struct vfsmount *);
void (*kill_sb) (struct super_block *);
struct module *owner;
struct file_system_type * next;
struct list_head fs_supers;
};
-struct super_block *get_sb_bdev(struct file_system_type *fs_type,
+extern int get_sb_bdev(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data,
- int (*fill_super)(struct super_block *, void *, int));
-struct super_block *get_sb_single(struct file_system_type *fs_type,
+ int (*fill_super)(struct super_block *, void *, int),
+ struct vfsmount *mnt);
+extern int get_sb_single(struct file_system_type *fs_type,
int flags, void *data,
- int (*fill_super)(struct super_block *, void *, int));
-struct super_block *get_sb_nodev(struct file_system_type *fs_type,
+ int (*fill_super)(struct super_block *, void *, int),
+ struct vfsmount *mnt);
+extern int get_sb_nodev(struct file_system_type *fs_type,
int flags, void *data,
- int (*fill_super)(struct super_block *, void *, int));
+ int (*fill_super)(struct super_block *, void *, int),
+ struct vfsmount *mnt);
void generic_shutdown_super(struct super_block *sb);
void kill_block_super(struct super_block *sb);
void kill_anon_super(struct super_block *sb);
@@ -1296,8 +1300,10 @@ struct super_block *sget(struct file_system_type *type,
int (*test)(struct super_block *,void *),
int (*set)(struct super_block *,void *),
void *data);
-struct super_block *get_sb_pseudo(struct file_system_type *, char *,
- struct super_operations *ops, unsigned long);
+extern int get_sb_pseudo(struct file_system_type *, char *,
+ struct super_operations *ops, unsigned long,
+ struct vfsmount *mnt);
+extern int simple_set_mnt(struct vfsmount *mnt, struct super_block *sb);
int __put_super(struct super_block *sb);
int __put_super_and_need_restart(struct super_block *sb);
void unnamed_dev_init(void);
@@ -1320,7 +1326,7 @@ extern struct vfsmount *copy_tree(struct vfsmount *, struct dentry *, int);
extern void mnt_set_mountpoint(struct vfsmount *, struct dentry *,
struct vfsmount *);
-extern int vfs_statfs(struct super_block *, struct kstatfs *);
+extern int vfs_statfs(struct dentry *, struct kstatfs *);
/* /sys/fs */
extern struct subsystem fs_subsys;
@@ -1741,7 +1747,7 @@ extern int dcache_dir_close(struct inode *, struct file *);
extern loff_t dcache_dir_lseek(struct file *, loff_t, int);
extern int dcache_readdir(struct file *, void *, filldir_t);
extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *);
-extern int simple_statfs(struct super_block *, struct kstatfs *);
+extern int simple_statfs(struct dentry *, struct kstatfs *);
extern int simple_link(struct dentry *, struct inode *, struct dentry *);
extern int simple_unlink(struct inode *, struct dentry *);
extern int simple_rmdir(struct inode *, struct dentry *);
@@ -1767,7 +1773,8 @@ extern void simple_release_fs(struct vfsmount **mount, int *count);
extern ssize_t simple_read_from_buffer(void __user *, size_t, loff_t *, const void *, size_t);
#ifdef CONFIG_MIGRATION
-extern int buffer_migrate_page(struct page *, struct page *);
+extern int buffer_migrate_page(struct address_space *,
+ struct page *, struct page *);
#else
#define buffer_migrate_page NULL
#endif
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index 7fd0576a445..690c42803d2 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -4,37 +4,32 @@
* Uses for this includes on-device special memory, uncached memory
* etc.
*
- * This code is based on the buddy allocator found in the sym53c8xx_2
- * driver, adapted for general purpose use.
- *
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
-#include <linux/spinlock.h>
-#define ALLOC_MIN_SHIFT 5 /* 32 bytes minimum */
/*
- * Link between free memory chunks of a given size.
+ * General purpose special memory pool descriptor.
*/
-struct gen_pool_link {
- struct gen_pool_link *next;
+struct gen_pool {
+ rwlock_t lock;
+ struct list_head chunks; /* list of chunks in this pool */
+ int min_alloc_order; /* minimum allocation order */
};
/*
- * Memory pool descriptor.
+ * General purpose special memory pool chunk descriptor.
*/
-struct gen_pool {
+struct gen_pool_chunk {
spinlock_t lock;
- unsigned long (*get_new_chunk)(struct gen_pool *);
- struct gen_pool *next;
- struct gen_pool_link *h;
- unsigned long private;
- int max_chunk_shift;
+ struct list_head next_chunk; /* next chunk in pool */
+ unsigned long start_addr; /* starting address of memory chunk */
+ unsigned long end_addr; /* ending address of memory chunk */
+ unsigned long bits[0]; /* bitmap for allocating memory chunk */
};
-unsigned long gen_pool_alloc(struct gen_pool *poolp, int size);
-void gen_pool_free(struct gen_pool *mp, unsigned long ptr, int size);
-struct gen_pool *gen_pool_create(int nr_chunks, int max_chunk_shift,
- unsigned long (*fp)(struct gen_pool *),
- unsigned long data);
+extern struct gen_pool *gen_pool_create(int, int);
+extern int gen_pool_add(struct gen_pool *, unsigned long, size_t, int);
+extern unsigned long gen_pool_alloc(struct gen_pool *, size_t);
+extern void gen_pool_free(struct gen_pool *, unsigned long, size_t);
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 4c5e610fe44..c25a38d8f60 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -23,6 +23,8 @@ int hugetlb_report_node_meminfo(int, char *);
unsigned long hugetlb_total_pages(void);
int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, int write_access);
+int hugetlb_reserve_pages(struct inode *inode, long from, long to);
+void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
extern unsigned long max_huge_pages;
extern const unsigned long hugetlb_zero, hugetlb_infinity;
@@ -139,8 +141,6 @@ struct hugetlbfs_sb_info {
struct hugetlbfs_inode_info {
struct shared_policy policy;
- /* Protected by the (global) hugetlb_lock */
- unsigned long prereserved_hpages;
struct inode vfs_inode;
};
@@ -157,10 +157,6 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
extern const struct file_operations hugetlbfs_file_operations;
extern struct vm_operations_struct hugetlb_vm_ops;
struct file *hugetlb_zero_setup(size_t);
-int hugetlb_extend_reservation(struct hugetlbfs_inode_info *info,
- unsigned long atleast_hpages);
-void hugetlb_truncate_reservation(struct hugetlbfs_inode_info *info,
- unsigned long atmost_hpages);
int hugetlb_get_quota(struct address_space *mapping);
void hugetlb_put_quota(struct address_space *mapping);
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 9e0fefd7884..70741e17011 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -7,32 +7,13 @@
#include <linux/bitops.h>
#include <linux/preempt.h>
#include <linux/cpumask.h>
+#include <linux/irqreturn.h>
#include <linux/hardirq.h>
#include <linux/sched.h>
#include <asm/atomic.h>
#include <asm/ptrace.h>
#include <asm/system.h>
-/*
- * For 2.4.x compatibility, 2.4.x can use
- *
- * typedef void irqreturn_t;
- * #define IRQ_NONE
- * #define IRQ_HANDLED
- * #define IRQ_RETVAL(x)
- *
- * To mix old-style and new-style irq handler returns.
- *
- * IRQ_NONE means we didn't handle it.
- * IRQ_HANDLED means that we did have a valid interrupt and handled it.
- * IRQ_RETVAL(x) selects on the two depending on x being non-zero (for handled)
- */
-typedef int irqreturn_t;
-
-#define IRQ_NONE (0)
-#define IRQ_HANDLED (1)
-#define IRQ_RETVAL(x) ((x) != 0)
-
struct irqaction {
irqreturn_t (*handler)(int, void *, struct pt_regs *);
unsigned long flags;
diff --git a/include/linux/ioc4.h b/include/linux/ioc4.h
index 3dd18b785eb..de73a3289cc 100644
--- a/include/linux/ioc4.h
+++ b/include/linux/ioc4.h
@@ -147,6 +147,10 @@ struct ioc4_misc_regs {
#define IOC4_GPCR_EDGE_6 0x40
#define IOC4_GPCR_EDGE_7 0x80
+#define IOC4_VARIANT_IO9 0x0900
+#define IOC4_VARIANT_PCI_RT 0x0901
+#define IOC4_VARIANT_IO10 0x1000
+
/* One of these per IOC4 */
struct ioc4_driver_data {
struct list_head idd_list;
@@ -156,6 +160,7 @@ struct ioc4_driver_data {
struct __iomem ioc4_misc_regs *idd_misc_regs;
unsigned long count_period;
void *idd_serial_data;
+ unsigned int idd_variant;
};
/* One per submodule */
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 42c9cd56286..676e00dfb21 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -17,6 +17,7 @@
#include <linux/cache.h>
#include <linux/spinlock.h>
#include <linux/cpumask.h>
+#include <linux/irqreturn.h>
#include <asm/irq.h>
#include <asm/ptrace.h>
@@ -164,10 +165,18 @@ static inline void set_irq_info(int irq, cpumask_t mask)
#endif // CONFIG_SMP
+#ifdef CONFIG_IRQBALANCE
+extern void set_balance_irq_affinity(unsigned int irq, cpumask_t mask);
+#else
+static inline void set_balance_irq_affinity(unsigned int irq, cpumask_t mask)
+{
+}
+#endif
+
extern int no_irq_affinity;
extern int noirqdebug_setup(char *str);
-extern fastcall int handle_IRQ_event(unsigned int irq, struct pt_regs *regs,
+extern fastcall irqreturn_t handle_IRQ_event(unsigned int irq, struct pt_regs *regs,
struct irqaction *action);
extern fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs);
extern void note_interrupt(unsigned int irq, irq_desc_t *desc,
diff --git a/include/linux/irqreturn.h b/include/linux/irqreturn.h
new file mode 100644
index 00000000000..881883c2009
--- /dev/null
+++ b/include/linux/irqreturn.h
@@ -0,0 +1,25 @@
+/* irqreturn.h */
+#ifndef _LINUX_IRQRETURN_H
+#define _LINUX_IRQRETURN_H
+
+/*
+ * For 2.4.x compatibility, 2.4.x can use
+ *
+ * typedef void irqreturn_t;
+ * #define IRQ_NONE
+ * #define IRQ_HANDLED
+ * #define IRQ_RETVAL(x)
+ *
+ * To mix old-style and new-style irq handler returns.
+ *
+ * IRQ_NONE means we didn't handle it.
+ * IRQ_HANDLED means that we did have a valid interrupt and handled it.
+ * IRQ_RETVAL(x) selects on the two depending on x being non-zero (for handled)
+ */
+typedef int irqreturn_t;
+
+#define IRQ_NONE (0)
+#define IRQ_HANDLED (1)
+#define IRQ_RETVAL(x) ((x) != 0)
+
+#endif
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 6a425e370cb..20eb34403d0 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -501,6 +501,12 @@ struct transaction_s
struct journal_head *t_checkpoint_list;
/*
+ * Doubly-linked circular list of all buffers submitted for IO while
+ * checkpointing. [j_list_lock]
+ */
+ struct journal_head *t_checkpoint_io_list;
+
+ /*
* Doubly-linked circular list of temporary buffers currently undergoing
* IO in the log [j_list_lock]
*/
@@ -849,7 +855,7 @@ extern void journal_commit_transaction(journal_t *);
/* Checkpoint list management */
int __journal_clean_checkpoint_list(journal_t *journal);
-void __journal_remove_checkpoint(struct journal_head *);
+int __journal_remove_checkpoint(struct journal_head *);
void __journal_insert_checkpoint(struct journal_head *, transaction_t *);
/* Buffer IO */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index f4fc576ed4c..8c21aaa248b 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -24,6 +24,9 @@ extern const char linux_banner[];
#define LONG_MAX ((long)(~0UL>>1))
#define LONG_MIN (-LONG_MAX - 1)
#define ULONG_MAX (~0UL)
+#define LLONG_MAX ((long long)(~0ULL>>1))
+#define LLONG_MIN (-LLONG_MAX - 1)
+#define ULLONG_MAX (~0ULL)
#define STACK_MAGIC 0xdeadbeef
@@ -75,7 +78,7 @@ extern int cond_resched(void);
# define might_sleep() do { might_resched(); } while (0)
#endif
-#define might_sleep_if(cond) do { if (unlikely(cond)) might_sleep(); } while (0)
+#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
#define abs(x) ({ \
int __x = (x); \
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index cfb3410e32b..6427949ddf9 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -106,6 +106,7 @@ extern struct page *kimage_alloc_control_pages(struct kimage *image,
extern void crash_kexec(struct pt_regs *);
int kexec_should_crash(struct task_struct *);
extern struct kimage *kexec_image;
+extern struct kimage *kexec_crash_image;
#define KEXEC_ON_CRASH 0x00000001
#define KEXEC_ARCH_MASK 0xffff0000
diff --git a/include/linux/list.h b/include/linux/list.h
index 76f05718342..a02642e4710 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -197,12 +197,35 @@ static inline void list_del_rcu(struct list_head *entry)
entry->prev = LIST_POISON2;
}
+/**
+ * list_replace - replace old entry by new one
+ * @old : the element to be replaced
+ * @new : the new element to insert
+ * Note: if 'old' was empty, it will be overwritten.
+ */
+static inline void list_replace(struct list_head *old,
+ struct list_head *new)
+{
+ new->next = old->next;
+ new->next->prev = new;
+ new->prev = old->prev;
+ new->prev->next = new;
+}
+
+static inline void list_replace_init(struct list_head *old,
+ struct list_head *new)
+{
+ list_replace(old, new);
+ INIT_LIST_HEAD(old);
+}
+
/*
* list_replace_rcu - replace old entry by new one
* @old : the element to be replaced
* @new : the new element to insert
*
* The old entry will be replaced with the new entry atomically.
+ * Note: 'old' should not be empty.
*/
static inline void list_replace_rcu(struct list_head *old,
struct list_head *new)
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 6789c4940c9..5dba23a1c0d 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -3,17 +3,17 @@
#include <linux/mm.h>
+typedef struct page *new_page_t(struct page *, unsigned long private, int **);
+
#ifdef CONFIG_MIGRATION
extern int isolate_lru_page(struct page *p, struct list_head *pagelist);
extern int putback_lru_pages(struct list_head *l);
-extern int migrate_page(struct page *, struct page *);
-extern void migrate_page_copy(struct page *, struct page *);
-extern int migrate_page_remove_references(struct page *, struct page *, int);
-extern int migrate_pages(struct list_head *l, struct list_head *t,
- struct list_head *moved, struct list_head *failed);
-extern int migrate_pages_to(struct list_head *pagelist,
- struct vm_area_struct *vma, int dest);
-extern int fail_migrate_page(struct page *, struct page *);
+extern int migrate_page(struct address_space *,
+ struct page *, struct page *);
+extern int migrate_pages(struct list_head *l, new_page_t x, unsigned long);
+
+extern int fail_migrate_page(struct address_space *,
+ struct page *, struct page *);
extern int migrate_prep(void);
@@ -22,8 +22,8 @@ extern int migrate_prep(void);
static inline int isolate_lru_page(struct page *p, struct list_head *list)
{ return -ENOSYS; }
static inline int putback_lru_pages(struct list_head *l) { return 0; }
-static inline int migrate_pages(struct list_head *l, struct list_head *t,
- struct list_head *moved, struct list_head *failed) { return -ENOSYS; }
+static inline int migrate_pages(struct list_head *l, new_page_t x,
+ unsigned long private) { return -ENOSYS; }
static inline int migrate_pages_to(struct list_head *pagelist,
struct vm_area_struct *vma, int dest) { return 0; }
diff --git a/include/linux/mm.h b/include/linux/mm.h
index e2fa375e478..3b09444121d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -199,6 +199,10 @@ struct vm_operations_struct {
void (*close)(struct vm_area_struct * area);
struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int *type);
int (*populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock);
+
+ /* notification that a previously read-only page is about to become
+ * writable, if an error is returned it will cause a SIGBUS */
+ int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page);
#ifdef CONFIG_NUMA
int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
@@ -465,10 +469,13 @@ static inline unsigned long page_zonenum(struct page *page)
struct zone;
extern struct zone *zone_table[];
+static inline int page_zone_id(struct page *page)
+{
+ return (page->flags >> ZONETABLE_PGSHIFT) & ZONETABLE_MASK;
+}
static inline struct zone *page_zone(struct page *page)
{
- return zone_table[(page->flags >> ZONETABLE_PGSHIFT) &
- ZONETABLE_MASK];
+ return zone_table[page_zone_id(page)];
}
static inline unsigned long page_to_nid(struct page *page)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 9742e3c1622..d6120fa6911 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -197,7 +197,7 @@ struct zone {
/*
* wait_table -- the array holding the hash table
- * wait_table_size -- the size of the hash table array
+ * wait_table_hash_nr_entries -- the size of the hash table array
* wait_table_bits -- wait_table_size == (1 << wait_table_bits)
*
* The purpose of all these is to keep track of the people
@@ -220,7 +220,7 @@ struct zone {
* free_area_init_core() performs the initialization of them.
*/
wait_queue_head_t * wait_table;
- unsigned long wait_table_size;
+ unsigned long wait_table_hash_nr_entries;
unsigned long wait_table_bits;
/*
@@ -333,6 +333,9 @@ void wakeup_kswapd(struct zone *zone, int order);
int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
int classzone_idx, int alloc_flags);
+extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
+ unsigned long size);
+
#ifdef CONFIG_HAVE_MEMORY_PRESENT
void memory_present(int nid, unsigned long start, unsigned long end);
#else
@@ -506,6 +509,10 @@ struct mem_section {
* pages. However, it is stored with some other magic.
* (see sparse.c::sparse_init_one_section())
*
+ * Additionally during early boot we encode node id of
+ * the location of the section here to guide allocation.
+ * (see sparse.c::memory_present())
+ *
* Making it a UL at least makes someone do a cast
* before using it wrong.
*/
@@ -545,6 +552,7 @@ extern int __section_nr(struct mem_section* ms);
#define SECTION_HAS_MEM_MAP (1UL<<1)
#define SECTION_MAP_LAST_BIT (1UL<<2)
#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
+#define SECTION_NID_SHIFT 2
static inline struct page *__section_mem_map_addr(struct mem_section *section)
{
diff --git a/include/linux/module.h b/include/linux/module.h
index c2d89e037af..2d366098eab 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -105,6 +105,8 @@ extern struct module __this_module;
* "GPL and additional rights" [GNU Public License v2 rights and more]
* "Dual BSD/GPL" [GNU Public License v2
* or BSD license choice]
+ * "Dual MIT/GPL" [GNU Public License v2
+ * or MIT license choice]
* "Dual MPL/GPL" [GNU Public License v2
* or Mozilla license choice]
*
diff --git a/include/linux/mount.h b/include/linux/mount.h
index b7472ae91fa..60718f12caa 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -17,6 +17,11 @@
#include <linux/spinlock.h>
#include <asm/atomic.h>
+struct super_block;
+struct vfsmount;
+struct dentry;
+struct namespace;
+
#define MNT_NOSUID 0x01
#define MNT_NODEV 0x02
#define MNT_NOEXEC 0x04
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index d276a4e2f82..0c076d58c67 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -7,6 +7,8 @@
#include <linux/percpu.h>
#include <linux/cache.h>
+#include <linux/types.h>
+
#include <asm/pgtable.h>
/*
@@ -88,7 +90,17 @@
#define PG_nosave_free 18 /* Free, should not be written */
#define PG_buddy 19 /* Page is free, on buddy lists */
-#define PG_uncached 20 /* Page has been mapped as uncached */
+
+#if (BITS_PER_LONG > 32)
+/*
+ * 64-bit-only flags build down from bit 31
+ *
+ * 32 bit -------------------------------| FIELDS | FLAGS |
+ * 64 bit | FIELDS | ?????? FLAGS |
+ * 63 32 0
+ */
+#define PG_uncached 31 /* Page has been mapped as uncached */
+#endif
/*
* Global page accounting. One instance per CPU. Only unsigned longs are
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 7a1af574ded..1245df7141a 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -99,6 +99,13 @@ extern struct page * read_cache_page(struct address_space *mapping,
extern int read_cache_pages(struct address_space *mapping,
struct list_head *pages, filler_t *filler, void *data);
+static inline struct page *read_mapping_page(struct address_space *mapping,
+ unsigned long index, void *data)
+{
+ filler_t *filler = (filler_t *)mapping->a_ops->readpage;
+ return read_cache_page(mapping, index, filler, data);
+}
+
int add_to_page_cache(struct page *page, struct address_space *mapping,
unsigned long index, gfp_t gfp_mask);
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index 66b5de404f2..f5aa593ccf3 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -10,13 +10,14 @@
#include <linux/smp.h>
#include <linux/threads.h>
#include <linux/percpu.h>
+#include <linux/types.h>
#ifdef CONFIG_SMP
struct percpu_counter {
spinlock_t lock;
- long count;
- long *counters;
+ s64 count;
+ s32 *counters;
};
#if NR_CPUS >= 16
@@ -25,11 +26,11 @@ struct percpu_counter {
#define FBC_BATCH (NR_CPUS*4)
#endif
-static inline void percpu_counter_init(struct percpu_counter *fbc)
+static inline void percpu_counter_init(struct percpu_counter *fbc, s64 amount)
{
spin_lock_init(&fbc->lock);
- fbc->count = 0;
- fbc->counters = alloc_percpu(long);
+ fbc->count = amount;
+ fbc->counters = alloc_percpu(s32);
}
static inline void percpu_counter_destroy(struct percpu_counter *fbc)
@@ -37,10 +38,10 @@ static inline void percpu_counter_destroy(struct percpu_counter *fbc)
free_percpu(fbc->counters);
}
-void percpu_counter_mod(struct percpu_counter *fbc, long amount);
-long percpu_counter_sum(struct percpu_counter *fbc);
+void percpu_counter_mod(struct percpu_counter *fbc, s32 amount);
+s64 percpu_counter_sum(struct percpu_counter *fbc);
-static inline long percpu_counter_read(struct percpu_counter *fbc)
+static inline s64 percpu_counter_read(struct percpu_counter *fbc)
{
return fbc->count;
}
@@ -48,13 +49,14 @@ static inline long percpu_counter_read(struct percpu_counter *fbc)
/*
* It is possible for the percpu_counter_read() to return a small negative
* number for some counter which should never be negative.
+ *
*/
-static inline long percpu_counter_read_positive(struct percpu_counter *fbc)
+static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
{
- long ret = fbc->count;
+ s64 ret = fbc->count;
barrier(); /* Prevent reloads of fbc->count */
- if (ret > 0)
+ if (ret >= 0)
return ret;
return 1;
}
@@ -62,12 +64,12 @@ static inline long percpu_counter_read_positive(struct percpu_counter *fbc)
#else
struct percpu_counter {
- long count;
+ s64 count;
};
-static inline void percpu_counter_init(struct percpu_counter *fbc)
+static inline void percpu_counter_init(struct percpu_counter *fbc, s64 amount)
{
- fbc->count = 0;
+ fbc->count = amount;
}
static inline void percpu_counter_destroy(struct percpu_counter *fbc)
@@ -75,24 +77,24 @@ static inline void percpu_counter_destroy(struct percpu_counter *fbc)
}
static inline void
-percpu_counter_mod(struct percpu_counter *fbc, long amount)
+percpu_counter_mod(struct percpu_counter *fbc, s32 amount)
{
preempt_disable();
fbc->count += amount;
preempt_enable();
}
-static inline long percpu_counter_read(struct percpu_counter *fbc)
+static inline s64 percpu_counter_read(struct percpu_counter *fbc)
{
return fbc->count;
}
-static inline long percpu_counter_read_positive(struct percpu_counter *fbc)
+static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
{
return fbc->count;
}
-static inline long percpu_counter_sum(struct percpu_counter *fbc)
+static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
{
return percpu_counter_read_positive(fbc);
}
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 0d36750fc0f..ee918bc6e18 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -51,6 +51,10 @@
#ifdef __KERNEL__
/*
* Ptrace flags
+ *
+ * The owner ship rules for task->ptrace which holds the ptrace
+ * flags is simple. When a task is running it owns it's task->ptrace
+ * flags. When the a task is stopped the ptracer owns task->ptrace.
*/
#define PT_PTRACED 0x00000001
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index dd83cca2800..9158a68140c 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -23,6 +23,9 @@
#include <linux/preempt.h>
#include <linux/types.h>
+#define RADIX_TREE_MAX_TAGS 2
+
+/* root tags are stored in gfp_mask, shifted by __GFP_BITS_SHIFT */
struct radix_tree_root {
unsigned int height;
gfp_t gfp_mask;
@@ -45,8 +48,6 @@ do { \
(root)->rnode = NULL; \
} while (0)
-#define RADIX_TREE_MAX_TAGS 2
-
int radix_tree_insert(struct radix_tree_root *, unsigned long, void *);
void *radix_tree_lookup(struct radix_tree_root *, unsigned long);
void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long);
diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h
index 78ecfa28b1c..00b340ba661 100644
--- a/include/linux/ramfs.h
+++ b/include/linux/ramfs.h
@@ -2,8 +2,8 @@
#define _LINUX_RAMFS_H
struct inode *ramfs_get_inode(struct super_block *sb, int mode, dev_t dev);
-struct super_block *ramfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data);
+extern int ramfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt);
#ifndef CONFIG_MMU
extern unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 970284f571a..6312758393b 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -246,7 +246,7 @@ extern int rcu_needs_cpu(int cpu);
* softirq handlers will have completed, since in some kernels, these
* handlers can run in process context, and can block.
*
- * This primitive provides the guarantees made by the (deprecated)
+ * This primitive provides the guarantees made by the (now removed)
* synchronize_kernel() API. In contrast, synchronize_rcu() only
* guarantees that rcu_read_lock() sections will have completed.
* In "classic RCU", these two guarantees happen to be one and
@@ -264,7 +264,6 @@ extern void FASTCALL(call_rcu(struct rcu_head *head,
void (*func)(struct rcu_head *head)));
extern void FASTCALL(call_rcu_bh(struct rcu_head *head,
void (*func)(struct rcu_head *head)));
-extern __deprecated_for_modules void synchronize_kernel(void);
extern void synchronize_rcu(void);
void synchronize_idle(void);
extern void rcu_barrier(void);
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 2d4c81a220d..bf97b090001 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -91,7 +91,6 @@ static inline void page_dup_rmap(struct page *page)
*/
int page_referenced(struct page *, int is_locked);
int try_to_unmap(struct page *, int ignore_refs);
-void remove_from_swap(struct page *page);
/*
* Called from mm/filemap_xip.c to unmap empty zero page
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 267f1525704..a9d23c7d1b2 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1225,7 +1225,7 @@ static inline int thread_group_empty(task_t *p)
(thread_group_leader(p) && !thread_group_empty(p))
/*
- * Protects ->fs, ->files, ->mm, ->ptrace, ->group_info, ->comm, keyring
+ * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
* subscriptions and synchronises with wait4(). Also used in procfs. Also
* pins the final release of task.io_context. Also protects ->cpuset.
*
diff --git a/include/linux/security.h b/include/linux/security.h
index 47722d35553..d2c17bd91a2 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -171,9 +171,9 @@ struct swap_info_struct;
* Deallocate and clear the sb->s_security field.
* @sb contains the super_block structure to be modified.
* @sb_statfs:
- * Check permission before obtaining filesystem statistics for the @sb
- * filesystem.
- * @sb contains the super_block structure for the filesystem.
+ * Check permission before obtaining filesystem statistics for the @mnt
+ * mountpoint.
+ * @dentry is a handle on the superblock for the filesystem.
* Return 0 if permission is granted.
* @sb_mount:
* Check permission before an object specified by @dev_name is mounted on
@@ -577,6 +577,11 @@ struct swap_info_struct;
* @p contains the task_struct of process.
* @nice contains the new nice value.
* Return 0 if permission is granted.
+ * @task_setioprio
+ * Check permission before setting the ioprio value of @p to @ioprio.
+ * @p contains the task_struct of process.
+ * @ioprio contains the new ioprio value
+ * Return 0 if permission is granted.
* @task_setrlimit:
* Check permission before setting the resource limits of the current
* process for @resource to @new_rlim. The old resource limit values can
@@ -596,6 +601,10 @@ struct swap_info_struct;
* @p.
* @p contains the task_struct for process.
* Return 0 if permission is granted.
+ * @task_movememory
+ * Check permission before moving memory owned by process @p.
+ * @p contains the task_struct for process.
+ * Return 0 if permission is granted.
* @task_kill:
* Check permission before sending signal @sig to @p. @info can be NULL,
* the constant 1, or a pointer to a siginfo structure. If @info is 1 or
@@ -1127,7 +1136,7 @@ struct security_operations {
int (*sb_copy_data)(struct file_system_type *type,
void *orig, void *copy);
int (*sb_kern_mount) (struct super_block *sb, void *data);
- int (*sb_statfs) (struct super_block * sb);
+ int (*sb_statfs) (struct dentry *dentry);
int (*sb_mount) (char *dev_name, struct nameidata * nd,
char *type, unsigned long flags, void *data);
int (*sb_check_sb) (struct vfsmount * mnt, struct nameidata * nd);
@@ -1210,10 +1219,12 @@ struct security_operations {
int (*task_getsid) (struct task_struct * p);
int (*task_setgroups) (struct group_info *group_info);
int (*task_setnice) (struct task_struct * p, int nice);
+ int (*task_setioprio) (struct task_struct * p, int ioprio);
int (*task_setrlimit) (unsigned int resource, struct rlimit * new_rlim);
int (*task_setscheduler) (struct task_struct * p, int policy,
struct sched_param * lp);
int (*task_getscheduler) (struct task_struct * p);
+ int (*task_movememory) (struct task_struct * p);
int (*task_kill) (struct task_struct * p,
struct siginfo * info, int sig);
int (*task_wait) (struct task_struct * p);
@@ -1450,9 +1461,9 @@ static inline int security_sb_kern_mount (struct super_block *sb, void *data)
return security_ops->sb_kern_mount (sb, data);
}
-static inline int security_sb_statfs (struct super_block *sb)
+static inline int security_sb_statfs (struct dentry *dentry)
{
- return security_ops->sb_statfs (sb);
+ return security_ops->sb_statfs (dentry);
}
static inline int security_sb_mount (char *dev_name, struct nameidata *nd,
@@ -1836,6 +1847,11 @@ static inline int security_task_setnice (struct task_struct *p, int nice)
return security_ops->task_setnice (p, nice);
}
+static inline int security_task_setioprio (struct task_struct *p, int ioprio)
+{
+ return security_ops->task_setioprio (p, ioprio);
+}
+
static inline int security_task_setrlimit (unsigned int resource,
struct rlimit *new_rlim)
{
@@ -1854,6 +1870,11 @@ static inline int security_task_getscheduler (struct task_struct *p)
return security_ops->task_getscheduler (p);
}
+static inline int security_task_movememory (struct task_struct *p)
+{
+ return security_ops->task_movememory (p);
+}
+
static inline int security_task_kill (struct task_struct *p,
struct siginfo *info, int sig)
{
@@ -2162,7 +2183,7 @@ static inline int security_sb_kern_mount (struct super_block *sb, void *data)
return 0;
}
-static inline int security_sb_statfs (struct super_block *sb)
+static inline int security_sb_statfs (struct dentry *dentry)
{
return 0;
}
@@ -2478,6 +2499,11 @@ static inline int security_task_setnice (struct task_struct *p, int nice)
return 0;
}
+static inline int security_task_setioprio (struct task_struct *p, int ioprio)
+{
+ return 0;
+}
+
static inline int security_task_setrlimit (unsigned int resource,
struct rlimit *new_rlim)
{
@@ -2496,6 +2522,11 @@ static inline int security_task_getscheduler (struct task_struct *p)
return 0;
}
+static inline int security_task_movememory (struct task_struct *p)
+{
+ return 0;
+}
+
static inline int security_task_kill (struct task_struct *p,
struct siginfo *info, int sig)
{
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 9dc93163e06..45ad55b70d1 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -86,6 +86,51 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, void*);
__kmalloc_track_caller(size, flags, __builtin_return_address(0))
#endif
+/**
+ * kmalloc - allocate memory
+ * @size: how many bytes of memory are required.
+ * @flags: the type of memory to allocate.
+ *
+ * kmalloc is the normal method of allocating memory
+ * in the kernel.
+ *
+ * The @flags argument may be one of:
+ *
+ * %GFP_USER - Allocate memory on behalf of user. May sleep.
+ *
+ * %GFP_KERNEL - Allocate normal kernel ram. May sleep.
+ *
+ * %GFP_ATOMIC - Allocation will not sleep.
+ * For example, use this inside interrupt handlers.
+ *
+ * %GFP_HIGHUSER - Allocate pages from high memory.
+ *
+ * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
+ *
+ * %GFP_NOFS - Do not make any fs calls while trying to get memory.
+ *
+ * Also it is possible to set different flags by OR'ing
+ * in one or more of the following additional @flags:
+ *
+ * %__GFP_COLD - Request cache-cold pages instead of
+ * trying to return cache-warm pages.
+ *
+ * %__GFP_DMA - Request memory from the DMA-capable zone.
+ *
+ * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
+ *
+ * %__GFP_HIGHMEM - Allocated memory may be from highmem.
+ *
+ * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
+ * (think twice before using).
+ *
+ * %__GFP_NORETRY - If memory is not immediately available,
+ * then give up at once.
+ *
+ * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
+ *
+ * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
+ */
static inline void *kmalloc(size_t size, gfp_t flags)
{
if (__builtin_constant_p(size)) {
@@ -111,6 +156,11 @@ found:
extern void *__kzalloc(size_t, gfp_t);
+/**
+ * kzalloc - allocate memory. The memory is set to zero.
+ * @size: how many bytes of memory are required.
+ * @flags: the type of memory to allocate (see kmalloc).
+ */
static inline void *kzalloc(size_t size, gfp_t flags)
{
if (__builtin_constant_p(size)) {
diff --git a/include/linux/string.h b/include/linux/string.h
index c61306da8c5..e4c75586031 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -56,6 +56,7 @@ extern char * strnchr(const char *, size_t, int);
#ifndef __HAVE_ARCH_STRRCHR
extern char * strrchr(const char *,int);
#endif
+extern char * strstrip(char *);
#ifndef __HAVE_ARCH_STRSTR
extern char * strstr(const char *,const char *);
#endif
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 96e31aa64cc..e82cb10fb3e 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -71,6 +71,7 @@ struct saved_context;
void __save_processor_state(struct saved_context *ctxt);
void __restore_processor_state(struct saved_context *ctxt);
unsigned long get_safe_page(gfp_t gfp_mask);
+int swsusp_add_arch_pages(unsigned long start, unsigned long end);
/*
* XXX: We try to keep some more pages free so that I/O operations succeed
diff --git a/include/linux/swap.h b/include/linux/swap.h
index aca9bfae208..dc3f3aa0c83 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -28,7 +28,14 @@ static inline int current_is_kswapd(void)
* the type/offset into the pte as 5/27 as well.
*/
#define MAX_SWAPFILES_SHIFT 5
+#ifndef CONFIG_MIGRATION
#define MAX_SWAPFILES (1 << MAX_SWAPFILES_SHIFT)
+#else
+/* Use last two entries for page migration swap entries */
+#define MAX_SWAPFILES ((1 << MAX_SWAPFILES_SHIFT)-2)
+#define SWP_MIGRATION_READ MAX_SWAPFILES
+#define SWP_MIGRATION_WRITE (MAX_SWAPFILES + 1)
+#endif
/*
* Magic header for a swap area. The first part of the union is
@@ -48,12 +55,14 @@ union swap_header {
char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */
} magic;
struct {
- char bootbits[1024]; /* Space for disklabel etc. */
- unsigned int version;
- unsigned int last_page;
- unsigned int nr_badpages;
- unsigned int padding[125];
- unsigned int badpages[1];
+ char bootbits[1024]; /* Space for disklabel etc. */
+ __u32 version;
+ __u32 last_page;
+ __u32 nr_badpages;
+ unsigned char sws_uuid[16];
+ unsigned char sws_volume[16];
+ __u32 padding[117];
+ __u32 badpages[1];
} info;
};
@@ -176,20 +185,7 @@ extern unsigned long try_to_free_pages(struct zone **, gfp_t);
extern unsigned long shrink_all_memory(unsigned long nr_pages);
extern int vm_swappiness;
extern int remove_mapping(struct address_space *mapping, struct page *page);
-
-/* possible outcome of pageout() */
-typedef enum {
- /* failed to write page out, page is locked */
- PAGE_KEEP,
- /* move page to the active list, page is locked */
- PAGE_ACTIVATE,
- /* page has been sent to the disk successfully, page is unlocked */
- PAGE_SUCCESS,
- /* page is clean and locked */
- PAGE_CLEAN,
-} pageout_t;
-
-extern pageout_t pageout(struct page *page, struct address_space *mapping);
+extern long vm_total_pages;
#ifdef CONFIG_NUMA
extern int zone_reclaim_mode;
@@ -250,7 +246,6 @@ extern int remove_exclusive_swap_page(struct page *);
struct backing_dev_info;
extern spinlock_t swap_lock;
-extern int remove_vma_swap(struct vm_area_struct *vma, struct page *page);
/* linux/mm/thrash.c */
extern struct mm_struct * swap_token_mm;
@@ -288,18 +283,60 @@ static inline void disable_swap_token(void)
#define free_pages_and_swap_cache(pages, nr) \
release_pages((pages), (nr), 0);
-#define show_swap_cache_info() /*NOTHING*/
-#define free_swap_and_cache(swp) /*NOTHING*/
-#define swap_duplicate(swp) /*NOTHING*/
-#define swap_free(swp) /*NOTHING*/
-#define read_swap_cache_async(swp,vma,addr) NULL
-#define lookup_swap_cache(swp) NULL
-#define valid_swaphandles(swp, off) 0
+static inline void show_swap_cache_info(void)
+{
+}
+
+static inline void free_swap_and_cache(swp_entry_t swp)
+{
+}
+
+static inline int swap_duplicate(swp_entry_t swp)
+{
+ return 0;
+}
+
+static inline void swap_free(swp_entry_t swp)
+{
+}
+
+static inline struct page *read_swap_cache_async(swp_entry_t swp,
+ struct vm_area_struct *vma, unsigned long addr)
+{
+ return NULL;
+}
+
+static inline struct page *lookup_swap_cache(swp_entry_t swp)
+{
+ return NULL;
+}
+
+static inline int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
+{
+ return 0;
+}
+
#define can_share_swap_page(p) (page_mapcount(p) == 1)
-#define move_to_swap_cache(p, swp) 1
-#define move_from_swap_cache(p, i, m) 1
-#define __delete_from_swap_cache(p) /*NOTHING*/
-#define delete_from_swap_cache(p) /*NOTHING*/
+
+static inline int move_to_swap_cache(struct page *page, swp_entry_t entry)
+{
+ return 1;
+}
+
+static inline int move_from_swap_cache(struct page *page, unsigned long index,
+ struct address_space *mapping)
+{
+ return 1;
+}
+
+static inline void __delete_from_swap_cache(struct page *page)
+{
+}
+
+static inline void delete_from_swap_cache(struct page *page)
+{
+}
+
#define swap_token_default_timeout 0
static inline int remove_exclusive_swap_page(struct page *p)
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 87b9d14c710..ec639aa3a1d 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -67,3 +67,56 @@ static inline pte_t swp_entry_to_pte(swp_entry_t entry)
BUG_ON(pte_file(__swp_entry_to_pte(arch_entry)));
return __swp_entry_to_pte(arch_entry);
}
+
+#ifdef CONFIG_MIGRATION
+static inline swp_entry_t make_migration_entry(struct page *page, int write)
+{
+ BUG_ON(!PageLocked(page));
+ return swp_entry(write ? SWP_MIGRATION_WRITE : SWP_MIGRATION_READ,
+ page_to_pfn(page));
+}
+
+static inline int is_migration_entry(swp_entry_t entry)
+{
+ return unlikely(swp_type(entry) == SWP_MIGRATION_READ ||
+ swp_type(entry) == SWP_MIGRATION_WRITE);
+}
+
+static inline int is_write_migration_entry(swp_entry_t entry)
+{
+ return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
+}
+
+static inline struct page *migration_entry_to_page(swp_entry_t entry)
+{
+ struct page *p = pfn_to_page(swp_offset(entry));
+ /*
+ * Any use of migration entries may only occur while the
+ * corresponding page is locked
+ */
+ BUG_ON(!PageLocked(p));
+ return p;
+}
+
+static inline void make_migration_entry_read(swp_entry_t *entry)
+{
+ *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry));
+}
+
+extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long address);
+#else
+
+#define make_migration_entry(page, write) swp_entry(0, 0)
+#define is_migration_entry(swp) 0
+#define migration_entry_to_page(swp) NULL
+static inline void make_migration_entry_read(swp_entry_t *entryp) { }
+static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long address) { }
+static inline int is_write_migration_entry(swp_entry_t entry)
+{
+ return 0;
+}
+
+#endif
+
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index bd67a4413df..33785b79d54 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -516,6 +516,16 @@ asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
const unsigned long __user *from,
const unsigned long __user *to);
+asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages,
+ const void __user * __user *pages,
+ const int __user *nodes,
+ int __user *status,
+ int flags);
+asmlinkage long compat_sys_move_pages(pid_t pid, unsigned long nr_page,
+ __u32 __user *pages,
+ const int __user *nodes,
+ int __user *status,
+ int flags);
asmlinkage long sys_mbind(unsigned long start, unsigned long len,
unsigned long mode,
unsigned long __user *nmask,
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index cee944dbdcd..c7132029af0 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -186,6 +186,7 @@ enum
VM_PERCPU_PAGELIST_FRACTION=30,/* int: fraction of pages in each percpu_pagelist */
VM_ZONE_RECLAIM_MODE=31, /* reclaim local zone memory before going off node */
VM_ZONE_RECLAIM_INTERVAL=32, /* time period to wait after reclaim failure */
+ VM_PANIC_ON_OOM=33, /* panic at out-of-memory */
};
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
new file mode 100644
index 00000000000..391e7ed1eb3
--- /dev/null
+++ b/include/linux/uaccess.h
@@ -0,0 +1,22 @@
+#ifndef __LINUX_UACCESS_H__
+#define __LINUX_UACCESS_H__
+
+#include <asm/uaccess.h>
+
+#ifndef ARCH_HAS_NOCACHE_UACCESS
+
+static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
+ const void __user *from, unsigned long n)
+{
+ return __copy_from_user_inatomic(to, from, n);
+}
+
+static inline unsigned long __copy_from_user_nocache(void *to,
+ const void __user *from, unsigned long n)
+{
+ return __copy_from_user(to, from, n);
+}
+
+#endif /* ARCH_HAS_NOCACHE_UACCESS */
+
+#endif /* __LINUX_UACCESS_H__ */
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 1d5577b2b75..f6024ab4eff 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -4,10 +4,13 @@
#include <linux/spinlock.h>
#include <asm/page.h> /* pgprot_t */
+struct vm_area_struct;
+
/* bits in vm_struct->flags */
#define VM_IOREMAP 0x00000001 /* ioremap() and friends */
#define VM_ALLOC 0x00000002 /* vmalloc() */
#define VM_MAP 0x00000004 /* vmap()ed pages */
+#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
/* bits [20..32] reserved for arch specific ioremap internals */
/*
@@ -32,9 +35,11 @@ struct vm_struct {
* Highlevel APIs for driver use
*/
extern void *vmalloc(unsigned long size);
+extern void *vmalloc_user(unsigned long size);
extern void *vmalloc_node(unsigned long size, int node);
extern void *vmalloc_exec(unsigned long size);
extern void *vmalloc_32(unsigned long size);
+extern void *vmalloc_32_user(unsigned long size);
extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask,
pgprot_t prot);
@@ -45,6 +50,9 @@ extern void vfree(void *addr);
extern void *vmap(struct page **pages, unsigned int count,
unsigned long flags, pgprot_t prot);
extern void vunmap(void *addr);
+
+extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
+ unsigned long pgoff);
/*
* Lowlevel-APIs (not for driver use!)
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 56f92fcbe94..9e38b566d0e 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -50,14 +50,15 @@ struct writeback_control {
* a hint that the filesystem need only write out the pages inside that
* byterange. The byte at `end' is included in the writeout request.
*/
- loff_t start;
- loff_t end;
+ loff_t range_start;
+ loff_t range_end;
unsigned nonblocking:1; /* Don't get stuck on request queues */
unsigned encountered_congestion:1; /* An output: a queue is full */
unsigned for_kupdate:1; /* A kupdate writeback */
unsigned for_reclaim:1; /* Invoked from the page allocator */
unsigned for_writepages:1; /* This is a writepages() call */
+ unsigned range_cyclic:1; /* range_start is cyclic */
};
/*
diff --git a/include/linux/zorro.h b/include/linux/zorro.h
index 2f135cf6eef..913bfc226dd 100644
--- a/include/linux/zorro.h
+++ b/include/linux/zorro.h
@@ -11,8 +11,6 @@
#ifndef _LINUX_ZORRO_H
#define _LINUX_ZORRO_H
-#ifndef __ASSEMBLY__
-
#include <linux/device.h>
@@ -112,45 +110,6 @@ struct ConfigDev {
__u32 cd_Unused[4]; /* for whatever the driver wants */
} __attribute__ ((packed));
-#else /* __ASSEMBLY__ */
-
-LN_Succ = 0
-LN_Pred = LN_Succ+4
-LN_Type = LN_Pred+4
-LN_Pri = LN_Type+1
-LN_Name = LN_Pri+1
-LN_sizeof = LN_Name+4
-
-ER_Type = 0
-ER_Product = ER_Type+1
-ER_Flags = ER_Product+1
-ER_Reserved03 = ER_Flags+1
-ER_Manufacturer = ER_Reserved03+1
-ER_SerialNumber = ER_Manufacturer+2
-ER_InitDiagVec = ER_SerialNumber+4
-ER_Reserved0c = ER_InitDiagVec+2
-ER_Reserved0d = ER_Reserved0c+1
-ER_Reserved0e = ER_Reserved0d+1
-ER_Reserved0f = ER_Reserved0e+1
-ER_sizeof = ER_Reserved0f+1
-
-CD_Node = 0
-CD_Flags = CD_Node+LN_sizeof
-CD_Pad = CD_Flags+1
-CD_Rom = CD_Pad+1
-CD_BoardAddr = CD_Rom+ER_sizeof
-CD_BoardSize = CD_BoardAddr+4
-CD_SlotAddr = CD_BoardSize+4
-CD_SlotSize = CD_SlotAddr+2
-CD_Driver = CD_SlotSize+2
-CD_NextCD = CD_Driver+4
-CD_Unused = CD_NextCD+4
-CD_sizeof = CD_Unused+(4*4)
-
-#endif /* __ASSEMBLY__ */
-
-#ifndef __ASSEMBLY__
-
#define ZORRO_NUM_AUTO 16
#ifdef __KERNEL__
@@ -290,7 +249,6 @@ extern DECLARE_BITMAP(zorro_unused_z2ram, 128);
#define Z2RAM_CHUNKSHIFT (16)
-#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _LINUX_ZORRO_H */
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 1511714a958..02e6f679897 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -205,11 +205,11 @@ static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
return 0;
}
-static struct super_block *mqueue_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data)
+static int mqueue_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name,
+ void *data, struct vfsmount *mnt)
{
- return get_sb_single(fs_type, flags, data, mqueue_fill_super);
+ return get_sb_single(fs_type, flags, data, mqueue_fill_super, mnt);
}
static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags)
@@ -359,7 +359,7 @@ static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
return count;
}
-static int mqueue_flush_file(struct file *filp)
+static int mqueue_flush_file(struct file *filp, fl_owner_t id)
{
struct mqueue_inode_info *info = MQUEUE_I(filp->f_dentry->d_inode);
diff --git a/ipc/shm.c b/ipc/shm.c
index 4f133d24030..fe7ae73b698 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -698,7 +698,6 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
int err;
unsigned long flags;
unsigned long prot;
- unsigned long o_flags;
int acc_mode;
void *user_addr;
@@ -725,11 +724,9 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
if (shmflg & SHM_RDONLY) {
prot = PROT_READ;
- o_flags = O_RDONLY;
acc_mode = S_IRUGO;
} else {
prot = PROT_READ | PROT_WRITE;
- o_flags = O_RDWR;
acc_mode = S_IRUGO | S_IWUGO;
}
if (shmflg & SHM_EXEC) {
diff --git a/kernel/acct.c b/kernel/acct.c
index b327f4d2010..6802020e0ce 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -118,7 +118,7 @@ static int check_free_space(struct file *file)
spin_unlock(&acct_globals.lock);
/* May block */
- if (vfs_statfs(file->f_dentry->d_inode->i_sb, &sbuf))
+ if (vfs_statfs(file->f_dentry, &sbuf))
return res;
suspend = sbuf.f_blocks * SUSPEND;
resume = sbuf.f_blocks * RESUME;
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index b097ccb4eb7..9ebd96fda29 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -1558,6 +1558,7 @@ int __audit_ipc_obj(struct kern_ipc_perm *ipcp)
* @uid: msgq user id
* @gid: msgq group id
* @mode: msgq mode (permissions)
+ * @ipcp: in-kernel IPC permissions
*
* Returns 0 for success or NULL context or < 0 on error.
*/
diff --git a/kernel/compat.c b/kernel/compat.c
index c1601a84f8d..2f672332430 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -21,6 +21,7 @@
#include <linux/unistd.h>
#include <linux/security.h>
#include <linux/timex.h>
+#include <linux/migrate.h>
#include <asm/uaccess.h>
@@ -934,3 +935,25 @@ asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp)
return ret;
}
+
+#ifdef CONFIG_NUMA
+asmlinkage long compat_sys_move_pages(pid_t pid, unsigned long nr_pages,
+ compat_uptr_t __user *pages32,
+ const int __user *nodes,
+ int __user *status,
+ int flags)
+{
+ const void __user * __user *pages;
+ int i;
+
+ pages = compat_alloc_user_space(nr_pages * sizeof(void *));
+ for (i = 0; i < nr_pages; i++) {
+ compat_uptr_t p;
+
+ if (get_user(p, pages32 + i) ||
+ put_user(compat_ptr(p), pages + i))
+ return -EFAULT;
+ }
+ return sys_move_pages(pid, nr_pages, pages, nodes, status, flags);
+}
+#endif
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index ab81fdd4572..b602f73fb38 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -41,6 +41,7 @@
#include <linux/rcupdate.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
+#include <linux/security.h>
#include <linux/slab.h>
#include <linux/smp_lock.h>
#include <linux/spinlock.h>
@@ -392,11 +393,11 @@ static int cpuset_fill_super(struct super_block *sb, void *unused_data,
return 0;
}
-static struct super_block *cpuset_get_sb(struct file_system_type *fs_type,
- int flags, const char *unused_dev_name,
- void *data)
+static int cpuset_get_sb(struct file_system_type *fs_type,
+ int flags, const char *unused_dev_name,
+ void *data, struct vfsmount *mnt)
{
- return get_sb_single(fs_type, flags, data, cpuset_fill_super);
+ return get_sb_single(fs_type, flags, data, cpuset_fill_super, mnt);
}
static struct file_system_type cpuset_fs_type = {
@@ -1177,6 +1178,7 @@ static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf)
cpumask_t cpus;
nodemask_t from, to;
struct mm_struct *mm;
+ int retval;
if (sscanf(pidbuf, "%d", &pid) != 1)
return -EIO;
@@ -1205,6 +1207,12 @@ static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf)
get_task_struct(tsk);
}
+ retval = security_task_setscheduler(tsk, 0, NULL);
+ if (retval) {
+ put_task_struct(tsk);
+ return retval;
+ }
+
mutex_lock(&callback_mutex);
task_lock(tsk);
diff --git a/kernel/exit.c b/kernel/exit.c
index e06d0c10a24..a3baf92462b 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -579,7 +579,7 @@ static void exit_mm(struct task_struct * tsk)
down_read(&mm->mmap_sem);
}
atomic_inc(&mm->mm_count);
- if (mm != tsk->active_mm) BUG();
+ BUG_ON(mm != tsk->active_mm);
/* more a memory barrier than a real lock */
task_lock(tsk);
tsk->mm = NULL;
@@ -1530,8 +1530,7 @@ check_continued:
if (options & __WNOTHREAD)
break;
tsk = next_thread(tsk);
- if (tsk->signal != current->signal)
- BUG();
+ BUG_ON(tsk->signal != current->signal);
} while (tsk != current);
read_unlock(&tasklist_lock);
diff --git a/kernel/fork.c b/kernel/fork.c
index ac8100e3088..49adc0e8d47 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -368,6 +368,8 @@ void fastcall __mmdrop(struct mm_struct *mm)
*/
void mmput(struct mm_struct *mm)
{
+ might_sleep();
+
if (atomic_dec_and_test(&mm->mm_users)) {
exit_aio(mm);
exit_mmap(mm);
@@ -623,6 +625,7 @@ out:
/*
* Allocate a new files structure and copy contents from the
* passed in files structure.
+ * errorp will be valid only when the returned files_struct is NULL.
*/
static struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
{
@@ -631,6 +634,7 @@ static struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
int open_files, size, i, expand;
struct fdtable *old_fdt, *new_fdt;
+ *errorp = -ENOMEM;
newf = alloc_files();
if (!newf)
goto out;
@@ -744,7 +748,6 @@ static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
* break this.
*/
tsk->files = NULL;
- error = -ENOMEM;
newf = dup_fd(oldf, &error);
if (!newf)
goto out;
diff --git a/kernel/futex.c b/kernel/futex.c
index 5699c512057..e1a380c77a5 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1056,11 +1056,11 @@ asmlinkage long sys_futex(u32 __user *uaddr, int op, int val,
(unsigned long)uaddr2, val2, val3);
}
-static struct super_block *
-futexfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int futexfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data,
+ struct vfsmount *mnt)
{
- return get_sb_pseudo(fs_type, "futex", NULL, 0xBAD1DEA);
+ return get_sb_pseudo(fs_type, "futex", NULL, 0xBAD1DEA, mnt);
}
static struct file_system_type futex_fs_type = {
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 51df337b37d..0f653011710 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -76,10 +76,11 @@ irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs)
/*
* Have got an event to handle:
*/
-fastcall int handle_IRQ_event(unsigned int irq, struct pt_regs *regs,
+fastcall irqreturn_t handle_IRQ_event(unsigned int irq, struct pt_regs *regs,
struct irqaction *action)
{
- int ret, retval = 0, status = 0;
+ irqreturn_t ret, retval = IRQ_NONE;
+ unsigned int status = 0;
if (!(action->flags & SA_INTERRUPT))
local_irq_enable();
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index 134f9f2e0e3..a12d00eb5e7 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -30,7 +30,7 @@ void move_native_irq(int irq)
desc->move_irq = 0;
- if (likely(cpus_empty(pending_irq_cpumask[irq])))
+ if (unlikely(cpus_empty(pending_irq_cpumask[irq])))
return;
if (!desc->handler->set_affinity)
@@ -49,7 +49,7 @@ void move_native_irq(int irq)
* cause some ioapics to mal-function.
* Being paranoid i guess!
*/
- if (unlikely(!cpus_empty(tmp))) {
+ if (likely(!cpus_empty(tmp))) {
if (likely(!(desc->status & IRQ_DISABLED)))
desc->handler->disable(irq);
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index d03b5eef8ce..afacd6f585f 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -24,6 +24,8 @@ static struct proc_dir_entry *smp_affinity_entry[NR_IRQS];
#ifdef CONFIG_GENERIC_PENDING_IRQ
void proc_set_irq_affinity(unsigned int irq, cpumask_t mask_val)
{
+ set_balance_irq_affinity(irq, mask_val);
+
/*
* Save these away for later use. Re-progam when the
* interrupt is pending
@@ -33,6 +35,7 @@ void proc_set_irq_affinity(unsigned int irq, cpumask_t mask_val)
#else
void proc_set_irq_affinity(unsigned int irq, cpumask_t mask_val)
{
+ set_balance_irq_affinity(irq, mask_val);
irq_affinity[irq] = mask_val;
irq_desc[irq].handler->set_affinity(irq, mask_val);
}
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index 7df9abd5ec8..b2fb3c18d06 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -11,7 +11,7 @@
#include <linux/kallsyms.h>
#include <linux/interrupt.h>
-static int irqfixup;
+static int irqfixup __read_mostly;
/*
* Recovery handler for misrouted interrupts.
@@ -136,9 +136,9 @@ static void report_bad_irq(unsigned int irq, irq_desc_t *desc, irqreturn_t actio
void note_interrupt(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret,
struct pt_regs *regs)
{
- if (action_ret != IRQ_HANDLED) {
+ if (unlikely(action_ret != IRQ_HANDLED)) {
desc->irqs_unhandled++;
- if (action_ret != IRQ_NONE)
+ if (unlikely(action_ret != IRQ_NONE))
report_bad_irq(irq, desc, action_ret);
}
@@ -152,11 +152,11 @@ void note_interrupt(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret,
}
desc->irq_count++;
- if (desc->irq_count < 100000)
+ if (likely(desc->irq_count < 100000))
return;
desc->irq_count = 0;
- if (desc->irqs_unhandled > 99900) {
+ if (unlikely(desc->irqs_unhandled > 99900)) {
/*
* The interrupt is stuck
*/
@@ -171,7 +171,7 @@ void note_interrupt(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret,
desc->irqs_unhandled = 0;
}
-int noirqdebug;
+int noirqdebug __read_mostly;
int __init noirqdebug_setup(char *str)
{
diff --git a/kernel/kexec.c b/kernel/kexec.c
index bf39d28e4c0..58f0f382597 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -902,14 +902,14 @@ static int kimage_load_segment(struct kimage *image,
* kexec does not sync, or unmount filesystems so if you need
* that to happen you need to do that yourself.
*/
-struct kimage *kexec_image = NULL;
-static struct kimage *kexec_crash_image = NULL;
+struct kimage *kexec_image;
+struct kimage *kexec_crash_image;
/*
* A home grown binary mutex.
* Nothing can wait so this mutex is safe to use
* in interrupt context :)
*/
-static int kexec_lock = 0;
+static int kexec_lock;
asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
struct kexec_segment __user *segments,
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
index f119e098e67..9e28478a17a 100644
--- a/kernel/ksysfs.c
+++ b/kernel/ksysfs.c
@@ -14,6 +14,7 @@
#include <linux/sysfs.h>
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/kexec.h>
#define KERNEL_ATTR_RO(_name) \
static struct subsys_attribute _name##_attr = __ATTR_RO(_name)
@@ -48,6 +49,20 @@ static ssize_t uevent_helper_store(struct subsystem *subsys, const char *page, s
KERNEL_ATTR_RW(uevent_helper);
#endif
+#ifdef CONFIG_KEXEC
+static ssize_t kexec_loaded_show(struct subsystem *subsys, char *page)
+{
+ return sprintf(page, "%d\n", !!kexec_image);
+}
+KERNEL_ATTR_RO(kexec_loaded);
+
+static ssize_t kexec_crash_loaded_show(struct subsystem *subsys, char *page)
+{
+ return sprintf(page, "%d\n", !!kexec_crash_image);
+}
+KERNEL_ATTR_RO(kexec_crash_loaded);
+#endif /* CONFIG_KEXEC */
+
decl_subsys(kernel, NULL, NULL);
EXPORT_SYMBOL_GPL(kernel_subsys);
@@ -56,6 +71,10 @@ static struct attribute * kernel_attrs[] = {
&uevent_seqnum_attr.attr,
&uevent_helper_attr.attr,
#endif
+#ifdef CONFIG_KEXEC
+ &kexec_loaded_attr.attr,
+ &kexec_crash_loaded_attr.attr,
+#endif
NULL
};
diff --git a/kernel/power/power.h b/kernel/power/power.h
index f06f12f2176..98c41423f3b 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -55,7 +55,7 @@ struct snapshot_handle {
unsigned int page;
unsigned int page_offset;
unsigned int prev;
- struct pbe *pbe;
+ struct pbe *pbe, *last_pbe;
void *buffer;
unsigned int buf_offset;
};
@@ -105,6 +105,10 @@ extern struct bitmap_page *alloc_bitmap(unsigned int nr_bits);
extern unsigned long alloc_swap_page(int swap, struct bitmap_page *bitmap);
extern void free_all_swap_pages(int swap, struct bitmap_page *bitmap);
+extern unsigned int count_special_pages(void);
+extern int save_special_mem(void);
+extern int restore_special_mem(void);
+
extern int swsusp_check(void);
extern int swsusp_shrink_memory(void);
extern void swsusp_free(void);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 3eeedbb13b7..3d9284100b2 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -39,8 +39,90 @@ static unsigned int nr_copy_pages;
static unsigned int nr_meta_pages;
static unsigned long *buffer;
+struct arch_saveable_page {
+ unsigned long start;
+ unsigned long end;
+ char *data;
+ struct arch_saveable_page *next;
+};
+static struct arch_saveable_page *arch_pages;
+
+int swsusp_add_arch_pages(unsigned long start, unsigned long end)
+{
+ struct arch_saveable_page *tmp;
+
+ while (start < end) {
+ tmp = kzalloc(sizeof(struct arch_saveable_page), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+ tmp->start = start;
+ tmp->end = ((start >> PAGE_SHIFT) + 1) << PAGE_SHIFT;
+ if (tmp->end > end)
+ tmp->end = end;
+ tmp->next = arch_pages;
+ start = tmp->end;
+ arch_pages = tmp;
+ }
+ return 0;
+}
+
+static unsigned int count_arch_pages(void)
+{
+ unsigned int count = 0;
+ struct arch_saveable_page *tmp = arch_pages;
+ while (tmp) {
+ count++;
+ tmp = tmp->next;
+ }
+ return count;
+}
+
+static int save_arch_mem(void)
+{
+ char *kaddr;
+ struct arch_saveable_page *tmp = arch_pages;
+ int offset;
+
+ pr_debug("swsusp: Saving arch specific memory");
+ while (tmp) {
+ tmp->data = (char *)__get_free_page(GFP_ATOMIC);
+ if (!tmp->data)
+ return -ENOMEM;
+ offset = tmp->start - (tmp->start & PAGE_MASK);
+ /* arch pages might haven't a 'struct page' */
+ kaddr = kmap_atomic_pfn(tmp->start >> PAGE_SHIFT, KM_USER0);
+ memcpy(tmp->data + offset, kaddr + offset,
+ tmp->end - tmp->start);
+ kunmap_atomic(kaddr, KM_USER0);
+
+ tmp = tmp->next;
+ }
+ return 0;
+}
+
+static int restore_arch_mem(void)
+{
+ char *kaddr;
+ struct arch_saveable_page *tmp = arch_pages;
+ int offset;
+
+ while (tmp) {
+ if (!tmp->data)
+ continue;
+ offset = tmp->start - (tmp->start & PAGE_MASK);
+ kaddr = kmap_atomic_pfn(tmp->start >> PAGE_SHIFT, KM_USER0);
+ memcpy(kaddr + offset, tmp->data + offset,
+ tmp->end - tmp->start);
+ kunmap_atomic(kaddr, KM_USER0);
+ free_page((long)tmp->data);
+ tmp->data = NULL;
+ tmp = tmp->next;
+ }
+ return 0;
+}
+
#ifdef CONFIG_HIGHMEM
-unsigned int count_highmem_pages(void)
+static unsigned int count_highmem_pages(void)
{
struct zone *zone;
unsigned long zone_pfn;
@@ -117,7 +199,7 @@ static int save_highmem_zone(struct zone *zone)
return 0;
}
-int save_highmem(void)
+static int save_highmem(void)
{
struct zone *zone;
int res = 0;
@@ -134,7 +216,7 @@ int save_highmem(void)
return 0;
}
-int restore_highmem(void)
+static int restore_highmem(void)
{
printk("swsusp: Restoring Highmem\n");
while (highmem_copy) {
@@ -150,8 +232,35 @@ int restore_highmem(void)
}
return 0;
}
+#else
+static inline unsigned int count_highmem_pages(void) {return 0;}
+static inline int save_highmem(void) {return 0;}
+static inline int restore_highmem(void) {return 0;}
#endif
+unsigned int count_special_pages(void)
+{
+ return count_arch_pages() + count_highmem_pages();
+}
+
+int save_special_mem(void)
+{
+ int ret;
+ ret = save_arch_mem();
+ if (!ret)
+ ret = save_highmem();
+ return ret;
+}
+
+int restore_special_mem(void)
+{
+ int ret;
+ ret = restore_arch_mem();
+ if (!ret)
+ ret = restore_highmem();
+ return ret;
+}
+
static int pfn_is_nosave(unsigned long pfn)
{
unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
@@ -177,7 +286,6 @@ static int saveable(struct zone *zone, unsigned long *zone_pfn)
return 0;
page = pfn_to_page(pfn);
- BUG_ON(PageReserved(page) && PageNosave(page));
if (PageNosave(page))
return 0;
if (PageReserved(page) && pfn_is_nosave(pfn))
@@ -293,62 +401,29 @@ static inline void create_pbe_list(struct pbe *pblist, unsigned int nr_pages)
}
}
-/**
- * On resume it is necessary to trace and eventually free the unsafe
- * pages that have been allocated, because they are needed for I/O
- * (on x86-64 we likely will "eat" these pages once again while
- * creating the temporary page translation tables)
- */
-
-struct eaten_page {
- struct eaten_page *next;
- char padding[PAGE_SIZE - sizeof(void *)];
-};
-
-static struct eaten_page *eaten_pages = NULL;
-
-static void release_eaten_pages(void)
-{
- struct eaten_page *p, *q;
-
- p = eaten_pages;
- while (p) {
- q = p->next;
- /* We don't want swsusp_free() to free this page again */
- ClearPageNosave(virt_to_page(p));
- free_page((unsigned long)p);
- p = q;
- }
- eaten_pages = NULL;
-}
+static unsigned int unsafe_pages;
/**
* @safe_needed - on resume, for storing the PBE list and the image,
* we can only use memory pages that do not conflict with the pages
- * which had been used before suspend.
+ * used before suspend.
*
* The unsafe pages are marked with the PG_nosave_free flag
- *
- * Allocated but unusable (ie eaten) memory pages should be marked
- * so that swsusp_free() can release them
+ * and we count them using unsafe_pages
*/
static inline void *alloc_image_page(gfp_t gfp_mask, int safe_needed)
{
void *res;
+ res = (void *)get_zeroed_page(gfp_mask);
if (safe_needed)
- do {
+ while (res && PageNosaveFree(virt_to_page(res))) {
+ /* The page is unsafe, mark it for swsusp_free() */
+ SetPageNosave(virt_to_page(res));
+ unsafe_pages++;
res = (void *)get_zeroed_page(gfp_mask);
- if (res && PageNosaveFree(virt_to_page(res))) {
- /* This is for swsusp_free() */
- SetPageNosave(virt_to_page(res));
- ((struct eaten_page *)res)->next = eaten_pages;
- eaten_pages = res;
- }
- } while (res && PageNosaveFree(virt_to_page(res)));
- else
- res = (void *)get_zeroed_page(gfp_mask);
+ }
if (res) {
SetPageNosave(virt_to_page(res));
SetPageNosaveFree(virt_to_page(res));
@@ -374,7 +449,8 @@ unsigned long get_safe_page(gfp_t gfp_mask)
* On each page we set up a list of struct_pbe elements.
*/
-struct pbe *alloc_pagedir(unsigned int nr_pages, gfp_t gfp_mask, int safe_needed)
+static struct pbe *alloc_pagedir(unsigned int nr_pages, gfp_t gfp_mask,
+ int safe_needed)
{
unsigned int num;
struct pbe *pblist, *pbe;
@@ -642,6 +718,8 @@ static int mark_unsafe_pages(struct pbe *pblist)
return -EFAULT;
}
+ unsafe_pages = 0;
+
return 0;
}
@@ -719,42 +797,99 @@ static inline struct pbe *unpack_orig_addresses(unsigned long *buf,
}
/**
- * create_image - use metadata contained in the PBE list
+ * prepare_image - use metadata contained in the PBE list
* pointed to by pagedir_nosave to mark the pages that will
* be overwritten in the process of restoring the system
- * memory state from the image and allocate memory for
- * the image avoiding these pages
+ * memory state from the image ("unsafe" pages) and allocate
+ * memory for the image
+ *
+ * The idea is to allocate the PBE list first and then
+ * allocate as many pages as it's needed for the image data,
+ * but not to assign these pages to the PBEs initially.
+ * Instead, we just mark them as allocated and create a list
+ * of "safe" which will be used later
*/
-static int create_image(struct snapshot_handle *handle)
+struct safe_page {
+ struct safe_page *next;
+ char padding[PAGE_SIZE - sizeof(void *)];
+};
+
+static struct safe_page *safe_pages;
+
+static int prepare_image(struct snapshot_handle *handle)
{
int error = 0;
- struct pbe *p, *pblist;
+ unsigned int nr_pages = nr_copy_pages;
+ struct pbe *p, *pblist = NULL;
p = pagedir_nosave;
error = mark_unsafe_pages(p);
if (!error) {
- pblist = alloc_pagedir(nr_copy_pages, GFP_ATOMIC, 1);
+ pblist = alloc_pagedir(nr_pages, GFP_ATOMIC, 1);
if (pblist)
copy_page_backup_list(pblist, p);
free_pagedir(p, 0);
if (!pblist)
error = -ENOMEM;
}
- if (!error)
- error = alloc_data_pages(pblist, GFP_ATOMIC, 1);
+ safe_pages = NULL;
+ if (!error && nr_pages > unsafe_pages) {
+ nr_pages -= unsafe_pages;
+ while (nr_pages--) {
+ struct safe_page *ptr;
+
+ ptr = (struct safe_page *)get_zeroed_page(GFP_ATOMIC);
+ if (!ptr) {
+ error = -ENOMEM;
+ break;
+ }
+ if (!PageNosaveFree(virt_to_page(ptr))) {
+ /* The page is "safe", add it to the list */
+ ptr->next = safe_pages;
+ safe_pages = ptr;
+ }
+ /* Mark the page as allocated */
+ SetPageNosave(virt_to_page(ptr));
+ SetPageNosaveFree(virt_to_page(ptr));
+ }
+ }
if (!error) {
- release_eaten_pages();
pagedir_nosave = pblist;
} else {
- pagedir_nosave = NULL;
handle->pbe = NULL;
- nr_copy_pages = 0;
- nr_meta_pages = 0;
+ swsusp_free();
}
return error;
}
+static void *get_buffer(struct snapshot_handle *handle)
+{
+ struct pbe *pbe = handle->pbe, *last = handle->last_pbe;
+ struct page *page = virt_to_page(pbe->orig_address);
+
+ if (PageNosave(page) && PageNosaveFree(page)) {
+ /*
+ * We have allocated the "original" page frame and we can
+ * use it directly to store the read page
+ */
+ pbe->address = 0;
+ if (last && last->next)
+ last->next = NULL;
+ return (void *)pbe->orig_address;
+ }
+ /*
+ * The "original" page frame has not been allocated and we have to
+ * use a "safe" page frame to store the read page
+ */
+ pbe->address = (unsigned long)safe_pages;
+ safe_pages = safe_pages->next;
+ if (last)
+ last->next = pbe;
+ handle->last_pbe = pbe;
+ return (void *)pbe->address;
+}
+
/**
* snapshot_write_next - used for writing the system memory snapshot.
*
@@ -799,15 +934,16 @@ int snapshot_write_next(struct snapshot_handle *handle, size_t count)
} else if (handle->prev <= nr_meta_pages) {
handle->pbe = unpack_orig_addresses(buffer, handle->pbe);
if (!handle->pbe) {
- error = create_image(handle);
+ error = prepare_image(handle);
if (error)
return error;
handle->pbe = pagedir_nosave;
- handle->buffer = (void *)handle->pbe->address;
+ handle->last_pbe = NULL;
+ handle->buffer = get_buffer(handle);
}
} else {
handle->pbe = handle->pbe->next;
- handle->buffer = (void *)handle->pbe->address;
+ handle->buffer = get_buffer(handle);
}
handle->prev = handle->page;
}
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index c4016cbbd3e..f0ee4e7780d 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -62,16 +62,6 @@ unsigned long image_size = 500 * 1024 * 1024;
int in_suspend __nosavedata = 0;
-#ifdef CONFIG_HIGHMEM
-unsigned int count_highmem_pages(void);
-int save_highmem(void);
-int restore_highmem(void);
-#else
-static int save_highmem(void) { return 0; }
-static int restore_highmem(void) { return 0; }
-static unsigned int count_highmem_pages(void) { return 0; }
-#endif
-
/**
* The following functions are used for tracing the allocated
* swap pages, so that they can be freed in case of an error.
@@ -175,6 +165,12 @@ void free_all_swap_pages(int swap, struct bitmap_page *bitmap)
*/
#define SHRINK_BITE 10000
+static inline unsigned long __shrink_memory(long tmp)
+{
+ if (tmp > SHRINK_BITE)
+ tmp = SHRINK_BITE;
+ return shrink_all_memory(tmp);
+}
int swsusp_shrink_memory(void)
{
@@ -186,21 +182,23 @@ int swsusp_shrink_memory(void)
printk("Shrinking memory... ");
do {
- size = 2 * count_highmem_pages();
+ size = 2 * count_special_pages();
size += size / 50 + count_data_pages();
size += (size + PBES_PER_PAGE - 1) / PBES_PER_PAGE +
PAGES_FOR_IO;
tmp = size;
for_each_zone (zone)
- if (!is_highmem(zone))
+ if (!is_highmem(zone) && populated_zone(zone)) {
tmp -= zone->free_pages;
+ tmp += zone->lowmem_reserve[ZONE_NORMAL];
+ }
if (tmp > 0) {
- tmp = shrink_all_memory(SHRINK_BITE);
+ tmp = __shrink_memory(tmp);
if (!tmp)
return -ENOMEM;
pages += tmp;
} else if (size > image_size / PAGE_SIZE) {
- tmp = shrink_all_memory(SHRINK_BITE);
+ tmp = __shrink_memory(size - (image_size / PAGE_SIZE));
pages += tmp;
}
printk("\b%c", p[i++%4]);
@@ -228,7 +226,7 @@ int swsusp_suspend(void)
goto Enable_irqs;
}
- if ((error = save_highmem())) {
+ if ((error = save_special_mem())) {
printk(KERN_ERR "swsusp: Not enough free pages for highmem\n");
goto Restore_highmem;
}
@@ -239,7 +237,7 @@ int swsusp_suspend(void)
/* Restore control flow magically appears here */
restore_processor_state();
Restore_highmem:
- restore_highmem();
+ restore_special_mem();
device_power_up();
Enable_irqs:
local_irq_enable();
@@ -265,7 +263,7 @@ int swsusp_resume(void)
*/
swsusp_free();
restore_processor_state();
- restore_highmem();
+ restore_special_mem();
touch_softlockup_watchdog();
device_power_up();
local_irq_enable();
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 2058f88c7bb..20e9710fc21 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -612,14 +612,6 @@ void synchronize_rcu(void)
wait_for_completion(&rcu.completion);
}
-/*
- * Deprecated, use synchronize_rcu() or synchronize_sched() instead.
- */
-void synchronize_kernel(void)
-{
- synchronize_rcu();
-}
-
module_param(blimit, int, 0);
module_param(qhimark, int, 0);
module_param(qlowmark, int, 0);
@@ -627,7 +619,6 @@ module_param(qlowmark, int, 0);
module_param(rsinterval, int, 0);
#endif
EXPORT_SYMBOL_GPL(rcu_batches_completed);
-EXPORT_SYMBOL_GPL_FUTURE(call_rcu); /* WARNING: GPL-only in April 2006. */
-EXPORT_SYMBOL_GPL_FUTURE(call_rcu_bh); /* WARNING: GPL-only in April 2006. */
+EXPORT_SYMBOL_GPL(call_rcu);
+EXPORT_SYMBOL_GPL(call_rcu_bh);
EXPORT_SYMBOL_GPL(synchronize_rcu);
-EXPORT_SYMBOL_GPL_FUTURE(synchronize_kernel); /* WARNING: GPL-only in April 2006. */
diff --git a/kernel/sched.c b/kernel/sched.c
index c13f1bd2df7..5dbc4269447 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3886,6 +3886,10 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask)
!capable(CAP_SYS_NICE))
goto out_unlock;
+ retval = security_task_setscheduler(p, 0, NULL);
+ if (retval)
+ goto out_unlock;
+
cpus_allowed = cpuset_cpus_allowed(p);
cpus_and(new_mask, new_mask, cpus_allowed);
retval = set_cpus_allowed(p, new_mask);
@@ -3954,7 +3958,10 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask)
if (!p)
goto out_unlock;
- retval = 0;
+ retval = security_task_getscheduler(p);
+ if (retval)
+ goto out_unlock;
+
cpus_and(*mask, p->cpus_allowed, cpu_online_map);
out_unlock:
@@ -4046,6 +4053,9 @@ asmlinkage long sys_sched_yield(void)
static inline void __cond_resched(void)
{
+#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
+ __might_sleep(__FILE__, __LINE__);
+#endif
/*
* The BKS might be reacquired before we have dropped
* PREEMPT_ACTIVE, which could trigger a second
diff --git a/kernel/sys.c b/kernel/sys.c
index a57a00597ce..90930b28d2c 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -13,7 +13,6 @@
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/prctl.h>
-#include <linux/init.h>
#include <linux/highuid.h>
#include <linux/fs.h>
#include <linux/kernel.h>
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 5433195040f..6991bece67e 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -87,6 +87,7 @@ cond_syscall(sys_inotify_init);
cond_syscall(sys_inotify_add_watch);
cond_syscall(sys_inotify_rm_watch);
cond_syscall(sys_migrate_pages);
+cond_syscall(sys_move_pages);
cond_syscall(sys_chown16);
cond_syscall(sys_fchown16);
cond_syscall(sys_getegid16);
@@ -132,3 +133,4 @@ cond_syscall(sys_mincore);
cond_syscall(sys_madvise);
cond_syscall(sys_mremap);
cond_syscall(sys_remap_file_pages);
+cond_syscall(compat_sys_move_pages);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 0d656e61621..eb8bd214e7d 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -59,6 +59,7 @@ extern int proc_nr_files(ctl_table *table, int write, struct file *filp,
extern int C_A_D;
extern int sysctl_overcommit_memory;
extern int sysctl_overcommit_ratio;
+extern int sysctl_panic_on_oom;
extern int max_threads;
extern int sysrq_enabled;
extern int core_uses_pid;
@@ -398,7 +399,7 @@ static ctl_table kern_table[] = {
.strategy = &sysctl_string,
},
#endif
-#ifdef CONFIG_HOTPLUG
+#if defined(CONFIG_HOTPLUG) && defined(CONFIG_NET)
{
.ctl_name = KERN_HOTPLUG,
.procname = "hotplug",
@@ -702,6 +703,14 @@ static ctl_table vm_table[] = {
.proc_handler = &proc_dointvec,
},
{
+ .ctl_name = VM_PANIC_ON_OOM,
+ .procname = "panic_on_oom",
+ .data = &sysctl_panic_on_oom,
+ .maxlen = sizeof(sysctl_panic_on_oom),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+ {
.ctl_name = VM_OVERCOMMIT_RATIO,
.procname = "overcommit_ratio",
.data = &sysctl_overcommit_ratio,
diff --git a/kernel/timer.c b/kernel/timer.c
index 9e49deed468..f35b3939e93 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -383,23 +383,19 @@ EXPORT_SYMBOL(del_timer_sync);
static int cascade(tvec_base_t *base, tvec_t *tv, int index)
{
/* cascade all the timers from tv up one level */
- struct list_head *head, *curr;
+ struct timer_list *timer, *tmp;
+ struct list_head tv_list;
+
+ list_replace_init(tv->vec + index, &tv_list);
- head = tv->vec + index;
- curr = head->next;
/*
- * We are removing _all_ timers from the list, so we don't have to
- * detach them individually, just clear the list afterwards.
+ * We are removing _all_ timers from the list, so we
+ * don't have to detach them individually.
*/
- while (curr != head) {
- struct timer_list *tmp;
-
- tmp = list_entry(curr, struct timer_list, entry);
- BUG_ON(tmp->base != base);
- curr = curr->next;
- internal_add_timer(base, tmp);
+ list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
+ BUG_ON(timer->base != base);
+ internal_add_timer(base, timer);
}
- INIT_LIST_HEAD(head);
return index;
}
@@ -419,10 +415,10 @@ static inline void __run_timers(tvec_base_t *base)
spin_lock_irq(&base->lock);
while (time_after_eq(jiffies, base->timer_jiffies)) {
- struct list_head work_list = LIST_HEAD_INIT(work_list);
+ struct list_head work_list;
struct list_head *head = &work_list;
int index = base->timer_jiffies & TVR_MASK;
-
+
/*
* Cascade timers:
*/
@@ -431,8 +427,8 @@ static inline void __run_timers(tvec_base_t *base)
(!cascade(base, &base->tv3, INDEX(1))) &&
!cascade(base, &base->tv4, INDEX(2)))
cascade(base, &base->tv5, INDEX(3));
- ++base->timer_jiffies;
- list_splice_init(base->tv1.vec + index, &work_list);
+ ++base->timer_jiffies;
+ list_replace_init(base->tv1.vec + index, &work_list);
while (!list_empty(head)) {
void (*fn)(unsigned long);
unsigned long data;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 880fb415a8f..740c5abceb0 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -531,11 +531,11 @@ int current_is_keventd(void)
static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
{
struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
- LIST_HEAD(list);
+ struct list_head list;
struct work_struct *work;
spin_lock_irq(&cwq->lock);
- list_splice_init(&cwq->worklist, &list);
+ list_replace_init(&cwq->worklist, &list);
while (!list_empty(&list)) {
printk("Taking work for %s\n", wq->name);
diff --git a/lib/Makefile b/lib/Makefile
index b830c9a1554..79358ad1f11 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -46,6 +46,7 @@ obj-$(CONFIG_TEXTSEARCH) += textsearch.o
obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o
obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o
obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o
+obj-$(CONFIG_SMP) += percpu_counter.o
obj-$(CONFIG_SWIOTLB) += swiotlb.o
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 9ce0a6a3b85..71338b48e88 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -4,10 +4,6 @@
* Uses for this includes on-device special memory, uncached memory
* etc.
*
- * This code is based on the buddy allocator found in the sym53c8xx_2
- * driver Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>,
- * and adapted for general purpose use.
- *
* Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
*
* This source code is licensed under the GNU General Public License,
@@ -15,172 +11,155 @@
*/
#include <linux/module.h>
-#include <linux/stddef.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/spinlock.h>
#include <linux/genalloc.h>
-#include <asm/page.h>
-
-struct gen_pool *gen_pool_create(int nr_chunks, int max_chunk_shift,
- unsigned long (*fp)(struct gen_pool *),
- unsigned long data)
+/*
+ * Create a new special memory pool.
+ *
+ * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
+ * @nid: node id of the node the pool structure should be allocated on, or -1
+ */
+struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
{
- struct gen_pool *poolp;
- unsigned long tmp;
- int i;
-
- /*
- * This is really an arbitrary limit, +10 is enough for
- * IA64_GRANULE_SHIFT, aka 16MB. If anyone needs a large limit
- * this can be increased without problems.
- */
- if ((max_chunk_shift > (PAGE_SHIFT + 10)) ||
- ((max_chunk_shift < ALLOC_MIN_SHIFT) && max_chunk_shift))
- return NULL;
-
- if (!max_chunk_shift)
- max_chunk_shift = PAGE_SHIFT;
-
- poolp = kmalloc(sizeof(struct gen_pool), GFP_KERNEL);
- if (!poolp)
- return NULL;
- memset(poolp, 0, sizeof(struct gen_pool));
- poolp->h = kmalloc(sizeof(struct gen_pool_link) *
- (max_chunk_shift - ALLOC_MIN_SHIFT + 1),
- GFP_KERNEL);
- if (!poolp->h) {
- printk(KERN_WARNING "gen_pool_alloc() failed to allocate\n");
- kfree(poolp);
- return NULL;
- }
- memset(poolp->h, 0, sizeof(struct gen_pool_link) *
- (max_chunk_shift - ALLOC_MIN_SHIFT + 1));
-
- spin_lock_init(&poolp->lock);
- poolp->get_new_chunk = fp;
- poolp->max_chunk_shift = max_chunk_shift;
- poolp->private = data;
-
- for (i = 0; i < nr_chunks; i++) {
- tmp = poolp->get_new_chunk(poolp);
- printk(KERN_INFO "allocated %lx\n", tmp);
- if (!tmp)
- break;
- gen_pool_free(poolp, tmp, (1 << poolp->max_chunk_shift));
- }
+ struct gen_pool *pool;
- return poolp;
+ pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
+ if (pool != NULL) {
+ rwlock_init(&pool->lock);
+ INIT_LIST_HEAD(&pool->chunks);
+ pool->min_alloc_order = min_alloc_order;
+ }
+ return pool;
}
EXPORT_SYMBOL(gen_pool_create);
/*
- * Simple power of two buddy-like generic allocator.
- * Provides naturally aligned memory chunks.
+ * Add a new chunk of memory to the specified pool.
+ *
+ * @pool: pool to add new memory chunk to
+ * @addr: starting address of memory chunk to add to pool
+ * @size: size in bytes of the memory chunk to add to pool
+ * @nid: node id of the node the chunk structure and bitmap should be
+ * allocated on, or -1
*/
-unsigned long gen_pool_alloc(struct gen_pool *poolp, int size)
+int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size,
+ int nid)
{
- int j, i, s, max_chunk_size;
- unsigned long a, flags;
- struct gen_pool_link *h = poolp->h;
+ struct gen_pool_chunk *chunk;
+ int nbits = size >> pool->min_alloc_order;
+ int nbytes = sizeof(struct gen_pool_chunk) +
+ (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
- max_chunk_size = 1 << poolp->max_chunk_shift;
+ chunk = kmalloc_node(nbytes, GFP_KERNEL, nid);
+ if (unlikely(chunk == NULL))
+ return -1;
- if (size > max_chunk_size)
- return 0;
+ memset(chunk, 0, nbytes);
+ spin_lock_init(&chunk->lock);
+ chunk->start_addr = addr;
+ chunk->end_addr = addr + size;
- size = max(size, 1 << ALLOC_MIN_SHIFT);
- i = fls(size - 1);
- s = 1 << i;
- j = i -= ALLOC_MIN_SHIFT;
-
- spin_lock_irqsave(&poolp->lock, flags);
- while (!h[j].next) {
- if (s == max_chunk_size) {
- struct gen_pool_link *ptr;
- spin_unlock_irqrestore(&poolp->lock, flags);
- ptr = (struct gen_pool_link *)poolp->get_new_chunk(poolp);
- spin_lock_irqsave(&poolp->lock, flags);
- h[j].next = ptr;
- if (h[j].next)
- h[j].next->next = NULL;
- break;
- }
- j++;
- s <<= 1;
- }
- a = (unsigned long) h[j].next;
- if (a) {
- h[j].next = h[j].next->next;
- /*
- * This should be split into a seperate function doing
- * the chunk split in order to support custom
- * handling memory not physically accessible by host
- */
- while (j > i) {
- j -= 1;
- s >>= 1;
- h[j].next = (struct gen_pool_link *) (a + s);
- h[j].next->next = NULL;
- }
- }
- spin_unlock_irqrestore(&poolp->lock, flags);
- return a;
+ write_lock(&pool->lock);
+ list_add(&chunk->next_chunk, &pool->chunks);
+ write_unlock(&pool->lock);
+
+ return 0;
}
-EXPORT_SYMBOL(gen_pool_alloc);
+EXPORT_SYMBOL(gen_pool_add);
/*
- * Counter-part of the generic allocator.
+ * Allocate the requested number of bytes from the specified pool.
+ * Uses a first-fit algorithm.
+ *
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
*/
-void gen_pool_free(struct gen_pool *poolp, unsigned long ptr, int size)
+unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
{
- struct gen_pool_link *q;
- struct gen_pool_link *h = poolp->h;
- unsigned long a, b, flags;
- int i, s, max_chunk_size;
-
- max_chunk_size = 1 << poolp->max_chunk_shift;
+ struct list_head *_chunk;
+ struct gen_pool_chunk *chunk;
+ unsigned long addr, flags;
+ int order = pool->min_alloc_order;
+ int nbits, bit, start_bit, end_bit;
- if (size > max_chunk_size)
- return;
-
- size = max(size, 1 << ALLOC_MIN_SHIFT);
- i = fls(size - 1);
- s = 1 << i;
- i -= ALLOC_MIN_SHIFT;
-
- a = ptr;
+ if (size == 0)
+ return 0;
- spin_lock_irqsave(&poolp->lock, flags);
- while (1) {
- if (s == max_chunk_size) {
- ((struct gen_pool_link *)a)->next = h[i].next;
- h[i].next = (struct gen_pool_link *)a;
- break;
+ nbits = (size + (1UL << order) - 1) >> order;
+
+ read_lock(&pool->lock);
+ list_for_each(_chunk, &pool->chunks) {
+ chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
+
+ end_bit = (chunk->end_addr - chunk->start_addr) >> order;
+ end_bit -= nbits + 1;
+
+ spin_lock_irqsave(&chunk->lock, flags);
+ bit = -1;
+ while (bit + 1 < end_bit) {
+ bit = find_next_zero_bit(chunk->bits, end_bit, bit + 1);
+ if (bit >= end_bit)
+ break;
+
+ start_bit = bit;
+ if (nbits > 1) {
+ bit = find_next_bit(chunk->bits, bit + nbits,
+ bit + 1);
+ if (bit - start_bit < nbits)
+ continue;
+ }
+
+ addr = chunk->start_addr +
+ ((unsigned long)start_bit << order);
+ while (nbits--)
+ __set_bit(start_bit++, &chunk->bits);
+ spin_unlock_irqrestore(&chunk->lock, flags);
+ read_unlock(&pool->lock);
+ return addr;
}
- b = a ^ s;
- q = &h[i];
+ spin_unlock_irqrestore(&chunk->lock, flags);
+ }
+ read_unlock(&pool->lock);
+ return 0;
+}
+EXPORT_SYMBOL(gen_pool_alloc);
- while (q->next && q->next != (struct gen_pool_link *)b)
- q = q->next;
- if (!q->next) {
- ((struct gen_pool_link *)a)->next = h[i].next;
- h[i].next = (struct gen_pool_link *)a;
+/*
+ * Free the specified memory back to the specified pool.
+ *
+ * @pool: pool to free to
+ * @addr: starting address of memory to free back to pool
+ * @size: size in bytes of memory to free
+ */
+void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
+{
+ struct list_head *_chunk;
+ struct gen_pool_chunk *chunk;
+ unsigned long flags;
+ int order = pool->min_alloc_order;
+ int bit, nbits;
+
+ nbits = (size + (1UL << order) - 1) >> order;
+
+ read_lock(&pool->lock);
+ list_for_each(_chunk, &pool->chunks) {
+ chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
+
+ if (addr >= chunk->start_addr && addr < chunk->end_addr) {
+ BUG_ON(addr + size > chunk->end_addr);
+ spin_lock_irqsave(&chunk->lock, flags);
+ bit = (addr - chunk->start_addr) >> order;
+ while (nbits--)
+ __clear_bit(bit++, &chunk->bits);
+ spin_unlock_irqrestore(&chunk->lock, flags);
break;
}
- q->next = q->next->next;
- a = a & b;
- s <<= 1;
- i++;
}
- spin_unlock_irqrestore(&poolp->lock, flags);
+ BUG_ON(nbits > 0);
+ read_unlock(&pool->lock);
}
EXPORT_SYMBOL(gen_pool_free);
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
new file mode 100644
index 00000000000..850449080e1
--- /dev/null
+++ b/lib/percpu_counter.c
@@ -0,0 +1,46 @@
+/*
+ * Fast batching percpu counters.
+ */
+
+#include <linux/percpu_counter.h>
+#include <linux/module.h>
+
+void percpu_counter_mod(struct percpu_counter *fbc, s32 amount)
+{
+ long count;
+ s32 *pcount;
+ int cpu = get_cpu();
+
+ pcount = per_cpu_ptr(fbc->counters, cpu);
+ count = *pcount + amount;
+ if (count >= FBC_BATCH || count <= -FBC_BATCH) {
+ spin_lock(&fbc->lock);
+ fbc->count += count;
+ *pcount = 0;
+ spin_unlock(&fbc->lock);
+ } else {
+ *pcount = count;
+ }
+ put_cpu();
+}
+EXPORT_SYMBOL(percpu_counter_mod);
+
+/*
+ * Add up all the per-cpu counts, return the result. This is a more accurate
+ * but much slower version of percpu_counter_read_positive()
+ */
+s64 percpu_counter_sum(struct percpu_counter *fbc)
+{
+ s64 ret;
+ int cpu;
+
+ spin_lock(&fbc->lock);
+ ret = fbc->count;
+ for_each_possible_cpu(cpu) {
+ s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
+ ret += *pcount;
+ }
+ spin_unlock(&fbc->lock);
+ return ret < 0 ? 0 : ret;
+}
+EXPORT_SYMBOL(percpu_counter_sum);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 7097bb239e4..b32efae7688 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -33,7 +33,7 @@
#ifdef __KERNEL__
-#define RADIX_TREE_MAP_SHIFT 6
+#define RADIX_TREE_MAP_SHIFT (CONFIG_BASE_SMALL ? 4 : 6)
#else
#define RADIX_TREE_MAP_SHIFT 3 /* For more stressful testing */
#endif
@@ -74,6 +74,11 @@ struct radix_tree_preload {
};
DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
+static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
+{
+ return root->gfp_mask & __GFP_BITS_MASK;
+}
+
/*
* This assumes that the caller has performed appropriate preallocation, and
* that the caller has pinned this thread of control to the current CPU.
@@ -82,9 +87,10 @@ static struct radix_tree_node *
radix_tree_node_alloc(struct radix_tree_root *root)
{
struct radix_tree_node *ret;
+ gfp_t gfp_mask = root_gfp_mask(root);
- ret = kmem_cache_alloc(radix_tree_node_cachep, root->gfp_mask);
- if (ret == NULL && !(root->gfp_mask & __GFP_WAIT)) {
+ ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
+ if (ret == NULL && !(gfp_mask & __GFP_WAIT)) {
struct radix_tree_preload *rtp;
rtp = &__get_cpu_var(radix_tree_preloads);
@@ -152,6 +158,27 @@ static inline int tag_get(struct radix_tree_node *node, unsigned int tag,
return test_bit(offset, node->tags[tag]);
}
+static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag)
+{
+ root->gfp_mask |= (1 << (tag + __GFP_BITS_SHIFT));
+}
+
+
+static inline void root_tag_clear(struct radix_tree_root *root, unsigned int tag)
+{
+ root->gfp_mask &= ~(1 << (tag + __GFP_BITS_SHIFT));
+}
+
+static inline void root_tag_clear_all(struct radix_tree_root *root)
+{
+ root->gfp_mask &= __GFP_BITS_MASK;
+}
+
+static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag)
+{
+ return root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT));
+}
+
/*
* Returns 1 if any slot in the node has this tag set.
* Otherwise returns 0.
@@ -182,7 +209,6 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
{
struct radix_tree_node *node;
unsigned int height;
- char tags[RADIX_TREE_MAX_TAGS];
int tag;
/* Figure out what the height should be. */
@@ -195,16 +221,6 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
goto out;
}
- /*
- * Prepare the tag status of the top-level node for propagation
- * into the newly-pushed top-level node(s)
- */
- for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
- tags[tag] = 0;
- if (any_tag_set(root->rnode, tag))
- tags[tag] = 1;
- }
-
do {
if (!(node = radix_tree_node_alloc(root)))
return -ENOMEM;
@@ -214,7 +230,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
/* Propagate the aggregated tag info into the new root */
for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
- if (tags[tag])
+ if (root_tag_get(root, tag))
tag_set(node, tag, 0);
}
@@ -243,8 +259,7 @@ int radix_tree_insert(struct radix_tree_root *root,
int error;
/* Make sure the tree is high enough. */
- if ((!index && !root->rnode) ||
- index > radix_tree_maxindex(root->height)) {
+ if (index > radix_tree_maxindex(root->height)) {
error = radix_tree_extend(root, index);
if (error)
return error;
@@ -255,7 +270,7 @@ int radix_tree_insert(struct radix_tree_root *root,
shift = (height-1) * RADIX_TREE_MAP_SHIFT;
offset = 0; /* uninitialised var warning */
- do {
+ while (height > 0) {
if (slot == NULL) {
/* Have to add a child node. */
if (!(slot = radix_tree_node_alloc(root)))
@@ -273,16 +288,21 @@ int radix_tree_insert(struct radix_tree_root *root,
slot = node->slots[offset];
shift -= RADIX_TREE_MAP_SHIFT;
height--;
- } while (height > 0);
+ }
if (slot != NULL)
return -EEXIST;
- BUG_ON(!node);
- node->count++;
- node->slots[offset] = item;
- BUG_ON(tag_get(node, 0, offset));
- BUG_ON(tag_get(node, 1, offset));
+ if (node) {
+ node->count++;
+ node->slots[offset] = item;
+ BUG_ON(tag_get(node, 0, offset));
+ BUG_ON(tag_get(node, 1, offset));
+ } else {
+ root->rnode = item;
+ BUG_ON(root_tag_get(root, 0));
+ BUG_ON(root_tag_get(root, 1));
+ }
return 0;
}
@@ -295,9 +315,13 @@ static inline void **__lookup_slot(struct radix_tree_root *root,
struct radix_tree_node **slot;
height = root->height;
+
if (index > radix_tree_maxindex(height))
return NULL;
+ if (height == 0 && root->rnode)
+ return (void **)&root->rnode;
+
shift = (height-1) * RADIX_TREE_MAP_SHIFT;
slot = &root->rnode;
@@ -365,11 +389,10 @@ void *radix_tree_tag_set(struct radix_tree_root *root,
struct radix_tree_node *slot;
height = root->height;
- if (index > radix_tree_maxindex(height))
- return NULL;
+ BUG_ON(index > radix_tree_maxindex(height));
- shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
slot = root->rnode;
+ shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
while (height > 0) {
int offset;
@@ -383,6 +406,10 @@ void *radix_tree_tag_set(struct radix_tree_root *root,
height--;
}
+ /* set the root's tag bit */
+ if (slot && !root_tag_get(root, tag))
+ root_tag_set(root, tag);
+
return slot;
}
EXPORT_SYMBOL(radix_tree_tag_set);
@@ -405,9 +432,8 @@ void *radix_tree_tag_clear(struct radix_tree_root *root,
unsigned long index, unsigned int tag)
{
struct radix_tree_path path[RADIX_TREE_MAX_PATH], *pathp = path;
- struct radix_tree_node *slot;
+ struct radix_tree_node *slot = NULL;
unsigned int height, shift;
- void *ret = NULL;
height = root->height;
if (index > radix_tree_maxindex(height))
@@ -432,20 +458,24 @@ void *radix_tree_tag_clear(struct radix_tree_root *root,
height--;
}
- ret = slot;
- if (ret == NULL)
+ if (slot == NULL)
goto out;
- do {
+ while (pathp->node) {
if (!tag_get(pathp->node, tag, pathp->offset))
goto out;
tag_clear(pathp->node, tag, pathp->offset);
if (any_tag_set(pathp->node, tag))
goto out;
pathp--;
- } while (pathp->node);
+ }
+
+ /* clear the root's tag bit */
+ if (root_tag_get(root, tag))
+ root_tag_clear(root, tag);
+
out:
- return ret;
+ return slot;
}
EXPORT_SYMBOL(radix_tree_tag_clear);
@@ -458,9 +488,8 @@ EXPORT_SYMBOL(radix_tree_tag_clear);
*
* Return values:
*
- * 0: tag not present
- * 1: tag present, set
- * -1: tag present, unset
+ * 0: tag not present or not set
+ * 1: tag set
*/
int radix_tree_tag_get(struct radix_tree_root *root,
unsigned long index, unsigned int tag)
@@ -473,6 +502,13 @@ int radix_tree_tag_get(struct radix_tree_root *root,
if (index > radix_tree_maxindex(height))
return 0;
+ /* check the root's tag bit */
+ if (!root_tag_get(root, tag))
+ return 0;
+
+ if (height == 0)
+ return 1;
+
shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
slot = root->rnode;
@@ -494,7 +530,7 @@ int radix_tree_tag_get(struct radix_tree_root *root,
int ret = tag_get(slot, tag, offset);
BUG_ON(ret && saw_unset_tag);
- return ret ? 1 : -1;
+ return ret;
}
slot = slot->slots[offset];
shift -= RADIX_TREE_MAP_SHIFT;
@@ -514,8 +550,11 @@ __lookup(struct radix_tree_root *root, void **results, unsigned long index,
unsigned long i;
height = root->height;
- if (height == 0)
+ if (height == 0) {
+ if (root->rnode && index == 0)
+ results[nr_found++] = root->rnode;
goto out;
+ }
shift = (height-1) * RADIX_TREE_MAP_SHIFT;
slot = root->rnode;
@@ -603,10 +642,16 @@ __lookup_tag(struct radix_tree_root *root, void **results, unsigned long index,
unsigned int height = root->height;
struct radix_tree_node *slot;
+ if (height == 0) {
+ if (root->rnode && index == 0)
+ results[nr_found++] = root->rnode;
+ goto out;
+ }
+
shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
slot = root->rnode;
- while (height > 0) {
+ do {
unsigned long i = (index >> shift) & RADIX_TREE_MAP_MASK;
for ( ; i < RADIX_TREE_MAP_SIZE; i++) {
@@ -637,7 +682,7 @@ __lookup_tag(struct radix_tree_root *root, void **results, unsigned long index,
}
shift -= RADIX_TREE_MAP_SHIFT;
slot = slot->slots[i];
- }
+ } while (height > 0);
out:
*next_index = index;
return nr_found;
@@ -665,6 +710,10 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
unsigned long cur_index = first_index;
unsigned int ret = 0;
+ /* check the root's tag bit */
+ if (!root_tag_get(root, tag))
+ return 0;
+
while (ret < max_items) {
unsigned int nr_found;
unsigned long next_index; /* Index of next search */
@@ -689,7 +738,7 @@ EXPORT_SYMBOL(radix_tree_gang_lookup_tag);
static inline void radix_tree_shrink(struct radix_tree_root *root)
{
/* try to shrink tree height */
- while (root->height > 1 &&
+ while (root->height > 0 &&
root->rnode->count == 1 &&
root->rnode->slots[0]) {
struct radix_tree_node *to_free = root->rnode;
@@ -717,12 +766,8 @@ static inline void radix_tree_shrink(struct radix_tree_root *root)
void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
{
struct radix_tree_path path[RADIX_TREE_MAX_PATH], *pathp = path;
- struct radix_tree_path *orig_pathp;
- struct radix_tree_node *slot;
+ struct radix_tree_node *slot = NULL;
unsigned int height, shift;
- void *ret = NULL;
- char tags[RADIX_TREE_MAX_TAGS];
- int nr_cleared_tags;
int tag;
int offset;
@@ -730,11 +775,17 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
if (index > radix_tree_maxindex(height))
goto out;
+ slot = root->rnode;
+ if (height == 0 && root->rnode) {
+ root_tag_clear_all(root);
+ root->rnode = NULL;
+ goto out;
+ }
+
shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
pathp->node = NULL;
- slot = root->rnode;
- for ( ; height > 0; height--) {
+ do {
if (slot == NULL)
goto out;
@@ -744,44 +795,22 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
pathp->node = slot;
slot = slot->slots[offset];
shift -= RADIX_TREE_MAP_SHIFT;
- }
+ height--;
+ } while (height > 0);
- ret = slot;
- if (ret == NULL)
+ if (slot == NULL)
goto out;
- orig_pathp = pathp;
-
/*
* Clear all tags associated with the just-deleted item
*/
- nr_cleared_tags = 0;
for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
- tags[tag] = 1;
- if (tag_get(pathp->node, tag, pathp->offset)) {
- tag_clear(pathp->node, tag, pathp->offset);
- if (!any_tag_set(pathp->node, tag)) {
- tags[tag] = 0;
- nr_cleared_tags++;
- }
- }
- }
-
- for (pathp--; nr_cleared_tags && pathp->node; pathp--) {
- for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
- if (tags[tag])
- continue;
-
- tag_clear(pathp->node, tag, pathp->offset);
- if (any_tag_set(pathp->node, tag)) {
- tags[tag] = 1;
- nr_cleared_tags--;
- }
- }
+ if (tag_get(pathp->node, tag, pathp->offset))
+ radix_tree_tag_clear(root, index, tag);
}
/* Now free the nodes we do not need anymore */
- for (pathp = orig_pathp; pathp->node; pathp--) {
+ while (pathp->node) {
pathp->node->slots[pathp->offset] = NULL;
pathp->node->count--;
@@ -793,11 +822,15 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
/* Node with zero slots in use so free it */
radix_tree_node_free(pathp->node);
+
+ pathp--;
}
- root->rnode = NULL;
+ root_tag_clear_all(root);
root->height = 0;
+ root->rnode = NULL;
+
out:
- return ret;
+ return slot;
}
EXPORT_SYMBOL(radix_tree_delete);
@@ -808,11 +841,7 @@ EXPORT_SYMBOL(radix_tree_delete);
*/
int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag)
{
- struct radix_tree_node *rnode;
- rnode = root->rnode;
- if (!rnode)
- return 0;
- return any_tag_set(rnode, tag);
+ return root_tag_get(root, tag);
}
EXPORT_SYMBOL(radix_tree_tagged);
diff --git a/lib/string.c b/lib/string.c
index 064f6315b1c..63077267367 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -301,6 +301,36 @@ char *strnchr(const char *s, size_t count, int c)
EXPORT_SYMBOL(strnchr);
#endif
+/**
+ * strstrip - Removes leading and trailing whitespace from @s.
+ * @s: The string to be stripped.
+ *
+ * Note that the first trailing whitespace is replaced with a %NUL-terminator
+ * in the given string @s. Returns a pointer to the first non-whitespace
+ * character in @s.
+ */
+char *strstrip(char *s)
+{
+ size_t size;
+ char *end;
+
+ size = strlen(s);
+
+ if (!size)
+ return s;
+
+ end = s + size - 1;
+ while (end != s && isspace(*end))
+ end--;
+ *(end + 1) = '\0';
+
+ while (*s && isspace(*s))
+ s++;
+
+ return s;
+}
+EXPORT_SYMBOL(strstrip);
+
#ifndef __HAVE_ARCH_STRLEN
/**
* strlen - Find the length of a string
diff --git a/mm/Kconfig b/mm/Kconfig
index 332f5c29b53..66e65ab3942 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -138,8 +138,8 @@ config SPLIT_PTLOCK_CPUS
#
config MIGRATION
bool "Page migration"
- def_bool y if NUMA
- depends on SWAP && NUMA
+ def_bool y
+ depends on NUMA
help
Allows the migration of the physical location of pages of processes
while the virtual addresses are not changed. This is useful for
diff --git a/mm/filemap.c b/mm/filemap.c
index fd57442186c..807a463fd5e 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/compiler.h>
#include <linux/fs.h>
+#include <linux/uaccess.h>
#include <linux/aio.h>
#include <linux/capability.h>
#include <linux/kernel_stat.h>
@@ -38,7 +39,6 @@
*/
#include <linux/buffer_head.h> /* for generic_osync_inode */
-#include <asm/uaccess.h>
#include <asm/mman.h>
static ssize_t
@@ -171,15 +171,17 @@ static int sync_page(void *word)
}
/**
- * filemap_fdatawrite_range - start writeback against all of a mapping's
- * dirty pages that lie within the byte offsets <start, end>
+ * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
* @mapping: address space structure to write
* @start: offset in bytes where the range starts
* @end: offset in bytes where the range ends (inclusive)
* @sync_mode: enable synchronous operation
*
+ * Start writeback against all of a mapping's dirty pages that lie
+ * within the byte offsets <start, end> inclusive.
+ *
* If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
- * opposed to a regular memory * cleansing writeback. The difference between
+ * opposed to a regular memory cleansing writeback. The difference between
* these two operations is that if a dirty page/buffer is encountered, it must
* be waited upon, and not just skipped over.
*/
@@ -190,8 +192,8 @@ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
struct writeback_control wbc = {
.sync_mode = sync_mode,
.nr_to_write = mapping->nrpages * 2,
- .start = start,
- .end = end,
+ .range_start = start,
+ .range_end = end,
};
if (!mapping_cap_writeback_dirty(mapping))
@@ -204,7 +206,7 @@ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
static inline int __filemap_fdatawrite(struct address_space *mapping,
int sync_mode)
{
- return __filemap_fdatawrite_range(mapping, 0, 0, sync_mode);
+ return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
}
int filemap_fdatawrite(struct address_space *mapping)
@@ -219,7 +221,10 @@ static int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
}
-/*
+/**
+ * filemap_flush - mostly a non-blocking flush
+ * @mapping: target address_space
+ *
* This is a mostly non-blocking flush. Not suitable for data-integrity
* purposes - I/O may not be started against all dirty pages.
*/
@@ -229,7 +234,12 @@ int filemap_flush(struct address_space *mapping)
}
EXPORT_SYMBOL(filemap_flush);
-/*
+/**
+ * wait_on_page_writeback_range - wait for writeback to complete
+ * @mapping: target address_space
+ * @start: beginning page index
+ * @end: ending page index
+ *
* Wait for writeback to complete against pages indexed by start->end
* inclusive
*/
@@ -276,7 +286,13 @@ int wait_on_page_writeback_range(struct address_space *mapping,
return ret;
}
-/*
+/**
+ * sync_page_range - write and wait on all pages in the passed range
+ * @inode: target inode
+ * @mapping: target address_space
+ * @pos: beginning offset in pages to write
+ * @count: number of bytes to write
+ *
* Write and wait upon all the pages in the passed range. This is a "data
* integrity" operation. It waits upon in-flight writeout before starting and
* waiting upon new writeout. If there was an IO error, return it.
@@ -305,7 +321,13 @@ int sync_page_range(struct inode *inode, struct address_space *mapping,
}
EXPORT_SYMBOL(sync_page_range);
-/*
+/**
+ * sync_page_range_nolock
+ * @inode: target inode
+ * @mapping: target address_space
+ * @pos: beginning offset in pages to write
+ * @count: number of bytes to write
+ *
* Note: Holding i_mutex across sync_page_range_nolock is not a good idea
* as it forces O_SYNC writers to different parts of the same file
* to be serialised right until io completion.
@@ -329,10 +351,11 @@ int sync_page_range_nolock(struct inode *inode, struct address_space *mapping,
EXPORT_SYMBOL(sync_page_range_nolock);
/**
- * filemap_fdatawait - walk the list of under-writeback pages of the given
- * address space and wait for all of them.
- *
+ * filemap_fdatawait - wait for all under-writeback pages to complete
* @mapping: address space structure to wait for
+ *
+ * Walk the list of under-writeback pages of the given address space
+ * and wait for all of them.
*/
int filemap_fdatawait(struct address_space *mapping)
{
@@ -368,7 +391,12 @@ int filemap_write_and_wait(struct address_space *mapping)
}
EXPORT_SYMBOL(filemap_write_and_wait);
-/*
+/**
+ * filemap_write_and_wait_range - write out & wait on a file range
+ * @mapping: the address_space for the pages
+ * @lstart: offset in bytes where the range starts
+ * @lend: offset in bytes where the range ends (inclusive)
+ *
* Write out and wait upon file offsets lstart->lend, inclusive.
*
* Note that `lend' is inclusive (describes the last byte to be written) so
@@ -394,8 +422,14 @@ int filemap_write_and_wait_range(struct address_space *mapping,
return err;
}
-/*
- * This function is used to add newly allocated pagecache pages:
+/**
+ * add_to_page_cache - add newly allocated pagecache pages
+ * @page: page to add
+ * @mapping: the page's address_space
+ * @offset: page index
+ * @gfp_mask: page allocation mode
+ *
+ * This function is used to add newly allocated pagecache pages;
* the page is new, so we can just run SetPageLocked() against it.
* The other page state flags were set by rmqueue().
*
@@ -422,7 +456,6 @@ int add_to_page_cache(struct page *page, struct address_space *mapping,
}
return error;
}
-
EXPORT_SYMBOL(add_to_page_cache);
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
@@ -489,8 +522,7 @@ void fastcall wait_on_page_bit(struct page *page, int bit_nr)
EXPORT_SYMBOL(wait_on_page_bit);
/**
- * unlock_page() - unlock a locked page
- *
+ * unlock_page - unlock a locked page
* @page: the page
*
* Unlocks the page and wakes up sleepers in ___wait_on_page_locked().
@@ -513,8 +545,9 @@ void fastcall unlock_page(struct page *page)
}
EXPORT_SYMBOL(unlock_page);
-/*
- * End writeback against a page.
+/**
+ * end_page_writeback - end writeback against a page
+ * @page: the page
*/
void end_page_writeback(struct page *page)
{
@@ -527,10 +560,11 @@ void end_page_writeback(struct page *page)
}
EXPORT_SYMBOL(end_page_writeback);
-/*
- * Get a lock on the page, assuming we need to sleep to get it.
+/**
+ * __lock_page - get a lock on the page, assuming we need to sleep to get it
+ * @page: the page to lock
*
- * Ugly: running sync_page() in state TASK_UNINTERRUPTIBLE is scary. If some
+ * Ugly. Running sync_page() in state TASK_UNINTERRUPTIBLE is scary. If some
* random driver's requestfn sets TASK_RUNNING, we could busywait. However
* chances are that on the second loop, the block layer's plug list is empty,
* so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
@@ -544,8 +578,12 @@ void fastcall __lock_page(struct page *page)
}
EXPORT_SYMBOL(__lock_page);
-/*
- * a rather lightweight function, finding and getting a reference to a
+/**
+ * find_get_page - find and get a page reference
+ * @mapping: the address_space to search
+ * @offset: the page index
+ *
+ * A rather lightweight function, finding and getting a reference to a
* hashed page atomically.
*/
struct page * find_get_page(struct address_space *mapping, unsigned long offset)
@@ -559,11 +597,14 @@ struct page * find_get_page(struct address_space *mapping, unsigned long offset)
read_unlock_irq(&mapping->tree_lock);
return page;
}
-
EXPORT_SYMBOL(find_get_page);
-/*
- * Same as above, but trylock it instead of incrementing the count.
+/**
+ * find_trylock_page - find and lock a page
+ * @mapping: the address_space to search
+ * @offset: the page index
+ *
+ * Same as find_get_page(), but trylock it instead of incrementing the count.
*/
struct page *find_trylock_page(struct address_space *mapping, unsigned long offset)
{
@@ -576,12 +617,10 @@ struct page *find_trylock_page(struct address_space *mapping, unsigned long offs
read_unlock_irq(&mapping->tree_lock);
return page;
}
-
EXPORT_SYMBOL(find_trylock_page);
/**
* find_lock_page - locate, pin and lock a pagecache page
- *
* @mapping: the address_space to search
* @offset: the page index
*
@@ -617,12 +656,10 @@ repeat:
read_unlock_irq(&mapping->tree_lock);
return page;
}
-
EXPORT_SYMBOL(find_lock_page);
/**
* find_or_create_page - locate or add a pagecache page
- *
* @mapping: the page's address_space
* @index: the page's index into the mapping
* @gfp_mask: page allocation mode
@@ -663,7 +700,6 @@ repeat:
page_cache_release(cached_page);
return page;
}
-
EXPORT_SYMBOL(find_or_create_page);
/**
@@ -729,9 +765,16 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
return i;
}
-/*
+/**
+ * find_get_pages_tag - find and return pages that match @tag
+ * @mapping: the address_space to search
+ * @index: the starting page index
+ * @tag: the tag index
+ * @nr_pages: the maximum number of pages
+ * @pages: where the resulting pages are placed
+ *
* Like find_get_pages, except we only return pages which are tagged with
- * `tag'. We update *index to index the next page for the traversal.
+ * @tag. We update @index to index the next page for the traversal.
*/
unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
int tag, unsigned int nr_pages, struct page **pages)
@@ -750,7 +793,11 @@ unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
return ret;
}
-/*
+/**
+ * grab_cache_page_nowait - returns locked page at given index in given cache
+ * @mapping: target address_space
+ * @index: the page index
+ *
* Same as grab_cache_page, but do not wait if the page is unavailable.
* This is intended for speculative data generators, where the data can
* be regenerated if the page couldn't be grabbed. This routine should
@@ -779,19 +826,25 @@ grab_cache_page_nowait(struct address_space *mapping, unsigned long index)
}
return page;
}
-
EXPORT_SYMBOL(grab_cache_page_nowait);
-/*
+/**
+ * do_generic_mapping_read - generic file read routine
+ * @mapping: address_space to be read
+ * @_ra: file's readahead state
+ * @filp: the file to read
+ * @ppos: current file position
+ * @desc: read_descriptor
+ * @actor: read method
+ *
* This is a generic file read routine, and uses the
- * mapping->a_ops->readpage() function for the actual low-level
- * stuff.
+ * mapping->a_ops->readpage() function for the actual low-level stuff.
*
* This is really ugly. But the goto's actually try to clarify some
* of the logic when it comes to error handling etc.
*
- * Note the struct file* is only passed for the use of readpage. It may be
- * NULL.
+ * Note the struct file* is only passed for the use of readpage.
+ * It may be NULL.
*/
void do_generic_mapping_read(struct address_space *mapping,
struct file_ra_state *_ra,
@@ -1004,7 +1057,6 @@ out:
if (filp)
file_accessed(filp);
}
-
EXPORT_SYMBOL(do_generic_mapping_read);
int file_read_actor(read_descriptor_t *desc, struct page *page,
@@ -1045,7 +1097,13 @@ success:
return size;
}
-/*
+/**
+ * __generic_file_aio_read - generic filesystem read routine
+ * @iocb: kernel I/O control block
+ * @iov: io vector request
+ * @nr_segs: number of segments in the iovec
+ * @ppos: current file position
+ *
* This is the "read()" routine for all filesystems
* that can use the page cache directly.
*/
@@ -1124,7 +1182,6 @@ __generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
out:
return retval;
}
-
EXPORT_SYMBOL(__generic_file_aio_read);
ssize_t
@@ -1135,7 +1192,6 @@ generic_file_aio_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t
BUG_ON(iocb->ki_pos != pos);
return __generic_file_aio_read(iocb, &local_iov, 1, &iocb->ki_pos);
}
-
EXPORT_SYMBOL(generic_file_aio_read);
ssize_t
@@ -1151,7 +1207,6 @@ generic_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppo
ret = wait_on_sync_kiocb(&kiocb);
return ret;
}
-
EXPORT_SYMBOL(generic_file_read);
int file_send_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size)
@@ -1192,7 +1247,6 @@ ssize_t generic_file_sendfile(struct file *in_file, loff_t *ppos,
return desc.written;
return desc.error;
}
-
EXPORT_SYMBOL(generic_file_sendfile);
static ssize_t
@@ -1228,11 +1282,15 @@ asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
}
#ifdef CONFIG_MMU
-/*
+static int FASTCALL(page_cache_read(struct file * file, unsigned long offset));
+/**
+ * page_cache_read - adds requested page to the page cache if not already there
+ * @file: file to read
+ * @offset: page index
+ *
* This adds the requested page to the page cache if it isn't already there,
* and schedules an I/O to read in its contents from disk.
*/
-static int FASTCALL(page_cache_read(struct file * file, unsigned long offset));
static int fastcall page_cache_read(struct file * file, unsigned long offset)
{
struct address_space *mapping = file->f_mapping;
@@ -1259,7 +1317,12 @@ static int fastcall page_cache_read(struct file * file, unsigned long offset)
#define MMAP_LOTSAMISS (100)
-/*
+/**
+ * filemap_nopage - read in file data for page fault handling
+ * @area: the applicable vm_area
+ * @address: target address to read in
+ * @type: returned with VM_FAULT_{MINOR,MAJOR} if not %NULL
+ *
* filemap_nopage() is invoked via the vma operations vector for a
* mapped memory region to read in file data during a page fault.
*
@@ -1462,7 +1525,6 @@ page_not_uptodate:
page_cache_release(page);
return NULL;
}
-
EXPORT_SYMBOL(filemap_nopage);
static struct page * filemap_getpage(struct file *file, unsigned long pgoff,
@@ -1716,7 +1778,13 @@ repeat:
return page;
}
-/*
+/**
+ * read_cache_page - read into page cache, fill it if needed
+ * @mapping: the page's address_space
+ * @index: the page index
+ * @filler: function to perform the read
+ * @data: destination for read data
+ *
* Read into the page cache. If a page already exists,
* and PageUptodate() is not set, try to fill the page.
*/
@@ -1754,7 +1822,6 @@ retry:
out:
return page;
}
-
EXPORT_SYMBOL(read_cache_page);
/*
@@ -1835,7 +1902,7 @@ __filemap_copy_from_user_iovec(char *vaddr,
int copy = min(bytes, iov->iov_len - base);
base = 0;
- left = __copy_from_user_inatomic(vaddr, buf, copy);
+ left = __copy_from_user_inatomic_nocache(vaddr, buf, copy);
copied += copy;
bytes -= copy;
vaddr += copy;
@@ -1854,7 +1921,7 @@ __filemap_copy_from_user_iovec(char *vaddr,
/*
* Performs necessary checks before doing a write
*
- * Can adjust writing position aor amount of bytes to write.
+ * Can adjust writing position or amount of bytes to write.
* Returns appropriate error code that caller should return or
* zero in case that write should be allowed.
*/
diff --git a/mm/filemap.h b/mm/filemap.h
index 13793ba0ce1..5683cde2205 100644
--- a/mm/filemap.h
+++ b/mm/filemap.h
@@ -13,7 +13,7 @@
#include <linux/highmem.h>
#include <linux/uio.h>
#include <linux/config.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
size_t
__filemap_copy_from_user_iovec(char *vaddr,
@@ -34,13 +34,13 @@ filemap_copy_from_user(struct page *page, unsigned long offset,
int left;
kaddr = kmap_atomic(page, KM_USER0);
- left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
+ left = __copy_from_user_inatomic_nocache(kaddr + offset, buf, bytes);
kunmap_atomic(kaddr, KM_USER0);
if (left != 0) {
/* Do it the slow way */
kaddr = kmap(page);
- left = __copy_from_user(kaddr + offset, buf, bytes);
+ left = __copy_from_user_nocache(kaddr + offset, buf, bytes);
kunmap(page);
}
return bytes - left;
diff --git a/mm/fremap.c b/mm/fremap.c
index 9f381e58bf4..21b7d0cbc98 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -83,6 +83,7 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
page_add_file_rmap(page);
pte_val = *pte;
update_mmu_cache(vma, addr, pte_val);
+ lazy_mmu_prot_update(pte_val);
err = 0;
unlock:
pte_unmap_unlock(pte, ptl);
@@ -114,7 +115,13 @@ int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
pte_val = *pte;
- update_mmu_cache(vma, addr, pte_val);
+ /*
+ * We don't need to run update_mmu_cache() here because the "file pte"
+ * being installed by install_file_pte() is not a real pte - it's a
+ * non-present entry (like a swap entry), noting what file offset should
+ * be mapped there when there's a fault (in a non-linear vma where
+ * that's not obvious).
+ */
pte_unmap_unlock(pte, ptl);
err = 0;
out:
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 832f676ca03..df499973255 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -22,7 +22,7 @@
#include "internal.h"
const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
-static unsigned long nr_huge_pages, free_huge_pages, reserved_huge_pages;
+static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages;
unsigned long max_huge_pages;
static struct list_head hugepage_freelists[MAX_NUMNODES];
static unsigned int nr_huge_pages_node[MAX_NUMNODES];
@@ -123,39 +123,13 @@ static int alloc_fresh_huge_page(void)
static struct page *alloc_huge_page(struct vm_area_struct *vma,
unsigned long addr)
{
- struct inode *inode = vma->vm_file->f_dentry->d_inode;
struct page *page;
- int use_reserve = 0;
- unsigned long idx;
spin_lock(&hugetlb_lock);
-
- if (vma->vm_flags & VM_MAYSHARE) {
-
- /* idx = radix tree index, i.e. offset into file in
- * HPAGE_SIZE units */
- idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
- + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
-
- /* The hugetlbfs specific inode info stores the number
- * of "guaranteed available" (huge) pages. That is,
- * the first 'prereserved_hpages' pages of the inode
- * are either already instantiated, or have been
- * pre-reserved (by hugetlb_reserve_for_inode()). Here
- * we're in the process of instantiating the page, so
- * we use this to determine whether to draw from the
- * pre-reserved pool or the truly free pool. */
- if (idx < HUGETLBFS_I(inode)->prereserved_hpages)
- use_reserve = 1;
- }
-
- if (!use_reserve) {
- if (free_huge_pages <= reserved_huge_pages)
- goto fail;
- } else {
- BUG_ON(reserved_huge_pages == 0);
- reserved_huge_pages--;
- }
+ if (vma->vm_flags & VM_MAYSHARE)
+ resv_huge_pages--;
+ else if (free_huge_pages <= resv_huge_pages)
+ goto fail;
page = dequeue_huge_page(vma, addr);
if (!page)
@@ -165,96 +139,11 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
set_page_refcounted(page);
return page;
- fail:
- WARN_ON(use_reserve); /* reserved allocations shouldn't fail */
+fail:
spin_unlock(&hugetlb_lock);
return NULL;
}
-/* hugetlb_extend_reservation()
- *
- * Ensure that at least 'atleast' hugepages are, and will remain,
- * available to instantiate the first 'atleast' pages of the given
- * inode. If the inode doesn't already have this many pages reserved
- * or instantiated, set aside some hugepages in the reserved pool to
- * satisfy later faults (or fail now if there aren't enough, rather
- * than getting the SIGBUS later).
- */
-int hugetlb_extend_reservation(struct hugetlbfs_inode_info *info,
- unsigned long atleast)
-{
- struct inode *inode = &info->vfs_inode;
- unsigned long change_in_reserve = 0;
- int ret = 0;
-
- spin_lock(&hugetlb_lock);
- read_lock_irq(&inode->i_mapping->tree_lock);
-
- if (info->prereserved_hpages >= atleast)
- goto out;
-
- /* Because we always call this on shared mappings, none of the
- * pages beyond info->prereserved_hpages can have been
- * instantiated, so we need to reserve all of them now. */
- change_in_reserve = atleast - info->prereserved_hpages;
-
- if ((reserved_huge_pages + change_in_reserve) > free_huge_pages) {
- ret = -ENOMEM;
- goto out;
- }
-
- reserved_huge_pages += change_in_reserve;
- info->prereserved_hpages = atleast;
-
- out:
- read_unlock_irq(&inode->i_mapping->tree_lock);
- spin_unlock(&hugetlb_lock);
-
- return ret;
-}
-
-/* hugetlb_truncate_reservation()
- *
- * This returns pages reserved for the given inode to the general free
- * hugepage pool. If the inode has any pages prereserved, but not
- * instantiated, beyond offset (atmost << HPAGE_SIZE), then release
- * them.
- */
-void hugetlb_truncate_reservation(struct hugetlbfs_inode_info *info,
- unsigned long atmost)
-{
- struct inode *inode = &info->vfs_inode;
- struct address_space *mapping = inode->i_mapping;
- unsigned long idx;
- unsigned long change_in_reserve = 0;
- struct page *page;
-
- spin_lock(&hugetlb_lock);
- read_lock_irq(&inode->i_mapping->tree_lock);
-
- if (info->prereserved_hpages <= atmost)
- goto out;
-
- /* Count pages which were reserved, but not instantiated, and
- * which we can now release. */
- for (idx = atmost; idx < info->prereserved_hpages; idx++) {
- page = radix_tree_lookup(&mapping->page_tree, idx);
- if (!page)
- /* Pages which are already instantiated can't
- * be unreserved (and in fact have already
- * been removed from the reserved pool) */
- change_in_reserve++;
- }
-
- BUG_ON(reserved_huge_pages < change_in_reserve);
- reserved_huge_pages -= change_in_reserve;
- info->prereserved_hpages = atmost;
-
- out:
- read_unlock_irq(&inode->i_mapping->tree_lock);
- spin_unlock(&hugetlb_lock);
-}
-
static int __init hugetlb_init(void)
{
unsigned long i;
@@ -334,7 +223,7 @@ static unsigned long set_max_huge_pages(unsigned long count)
return nr_huge_pages;
spin_lock(&hugetlb_lock);
- count = max(count, reserved_huge_pages);
+ count = max(count, resv_huge_pages);
try_to_free_low(count);
while (count < nr_huge_pages) {
struct page *page = dequeue_huge_page(NULL, 0);
@@ -361,11 +250,11 @@ int hugetlb_report_meminfo(char *buf)
return sprintf(buf,
"HugePages_Total: %5lu\n"
"HugePages_Free: %5lu\n"
- "HugePages_Rsvd: %5lu\n"
+ "HugePages_Rsvd: %5lu\n"
"Hugepagesize: %5lu kB\n",
nr_huge_pages,
free_huge_pages,
- reserved_huge_pages,
+ resv_huge_pages,
HPAGE_SIZE/1024);
}
@@ -754,3 +643,156 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
flush_tlb_range(vma, start, end);
}
+struct file_region {
+ struct list_head link;
+ long from;
+ long to;
+};
+
+static long region_add(struct list_head *head, long f, long t)
+{
+ struct file_region *rg, *nrg, *trg;
+
+ /* Locate the region we are either in or before. */
+ list_for_each_entry(rg, head, link)
+ if (f <= rg->to)
+ break;
+
+ /* Round our left edge to the current segment if it encloses us. */
+ if (f > rg->from)
+ f = rg->from;
+
+ /* Check for and consume any regions we now overlap with. */
+ nrg = rg;
+ list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
+ if (&rg->link == head)
+ break;
+ if (rg->from > t)
+ break;
+
+ /* If this area reaches higher then extend our area to
+ * include it completely. If this is not the first area
+ * which we intend to reuse, free it. */
+ if (rg->to > t)
+ t = rg->to;
+ if (rg != nrg) {
+ list_del(&rg->link);
+ kfree(rg);
+ }
+ }
+ nrg->from = f;
+ nrg->to = t;
+ return 0;
+}
+
+static long region_chg(struct list_head *head, long f, long t)
+{
+ struct file_region *rg, *nrg;
+ long chg = 0;
+
+ /* Locate the region we are before or in. */
+ list_for_each_entry(rg, head, link)
+ if (f <= rg->to)
+ break;
+
+ /* If we are below the current region then a new region is required.
+ * Subtle, allocate a new region at the position but make it zero
+ * size such that we can guarentee to record the reservation. */
+ if (&rg->link == head || t < rg->from) {
+ nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
+ if (nrg == 0)
+ return -ENOMEM;
+ nrg->from = f;
+ nrg->to = f;
+ INIT_LIST_HEAD(&nrg->link);
+ list_add(&nrg->link, rg->link.prev);
+
+ return t - f;
+ }
+
+ /* Round our left edge to the current segment if it encloses us. */
+ if (f > rg->from)
+ f = rg->from;
+ chg = t - f;
+
+ /* Check for and consume any regions we now overlap with. */
+ list_for_each_entry(rg, rg->link.prev, link) {
+ if (&rg->link == head)
+ break;
+ if (rg->from > t)
+ return chg;
+
+ /* We overlap with this area, if it extends futher than
+ * us then we must extend ourselves. Account for its
+ * existing reservation. */
+ if (rg->to > t) {
+ chg += rg->to - t;
+ t = rg->to;
+ }
+ chg -= rg->to - rg->from;
+ }
+ return chg;
+}
+
+static long region_truncate(struct list_head *head, long end)
+{
+ struct file_region *rg, *trg;
+ long chg = 0;
+
+ /* Locate the region we are either in or before. */
+ list_for_each_entry(rg, head, link)
+ if (end <= rg->to)
+ break;
+ if (&rg->link == head)
+ return 0;
+
+ /* If we are in the middle of a region then adjust it. */
+ if (end > rg->from) {
+ chg = rg->to - end;
+ rg->to = end;
+ rg = list_entry(rg->link.next, typeof(*rg), link);
+ }
+
+ /* Drop any remaining regions. */
+ list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
+ if (&rg->link == head)
+ break;
+ chg += rg->to - rg->from;
+ list_del(&rg->link);
+ kfree(rg);
+ }
+ return chg;
+}
+
+static int hugetlb_acct_memory(long delta)
+{
+ int ret = -ENOMEM;
+
+ spin_lock(&hugetlb_lock);
+ if ((delta + resv_huge_pages) <= free_huge_pages) {
+ resv_huge_pages += delta;
+ ret = 0;
+ }
+ spin_unlock(&hugetlb_lock);
+ return ret;
+}
+
+int hugetlb_reserve_pages(struct inode *inode, long from, long to)
+{
+ long ret, chg;
+
+ chg = region_chg(&inode->i_mapping->private_list, from, to);
+ if (chg < 0)
+ return chg;
+ ret = hugetlb_acct_memory(chg);
+ if (ret < 0)
+ return ret;
+ region_add(&inode->i_mapping->private_list, from, to);
+ return 0;
+}
+
+void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
+{
+ long chg = region_truncate(&inode->i_mapping->private_list, offset);
+ hugetlb_acct_memory(freed - chg);
+}
diff --git a/mm/memory.c b/mm/memory.c
index 0ec7bc64427..247b5c312b9 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -434,7 +434,9 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
/* pte contains position in swap or file, so copy. */
if (unlikely(!pte_present(pte))) {
if (!pte_file(pte)) {
- swap_duplicate(pte_to_swp_entry(pte));
+ swp_entry_t entry = pte_to_swp_entry(pte);
+
+ swap_duplicate(entry);
/* make sure dst_mm is on swapoff's mmlist. */
if (unlikely(list_empty(&dst_mm->mmlist))) {
spin_lock(&mmlist_lock);
@@ -443,6 +445,16 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
&src_mm->mmlist);
spin_unlock(&mmlist_lock);
}
+ if (is_write_migration_entry(entry) &&
+ is_cow_mapping(vm_flags)) {
+ /*
+ * COW mappings require pages in both parent
+ * and child to be set to read.
+ */
+ make_migration_entry_read(&entry);
+ pte = swp_entry_to_pte(entry);
+ set_pte_at(src_mm, addr, src_pte, pte);
+ }
}
goto out_set_pte;
}
@@ -1445,25 +1457,60 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
{
struct page *old_page, *new_page;
pte_t entry;
- int ret = VM_FAULT_MINOR;
+ int reuse, ret = VM_FAULT_MINOR;
old_page = vm_normal_page(vma, address, orig_pte);
if (!old_page)
goto gotten;
- if (PageAnon(old_page) && !TestSetPageLocked(old_page)) {
- int reuse = can_share_swap_page(old_page);
- unlock_page(old_page);
- if (reuse) {
- flush_cache_page(vma, address, pte_pfn(orig_pte));
- entry = pte_mkyoung(orig_pte);
- entry = maybe_mkwrite(pte_mkdirty(entry), vma);
- ptep_set_access_flags(vma, address, page_table, entry, 1);
- update_mmu_cache(vma, address, entry);
- lazy_mmu_prot_update(entry);
- ret |= VM_FAULT_WRITE;
- goto unlock;
+ if (unlikely((vma->vm_flags & (VM_SHARED|VM_WRITE)) ==
+ (VM_SHARED|VM_WRITE))) {
+ if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
+ /*
+ * Notify the address space that the page is about to
+ * become writable so that it can prohibit this or wait
+ * for the page to get into an appropriate state.
+ *
+ * We do this without the lock held, so that it can
+ * sleep if it needs to.
+ */
+ page_cache_get(old_page);
+ pte_unmap_unlock(page_table, ptl);
+
+ if (vma->vm_ops->page_mkwrite(vma, old_page) < 0)
+ goto unwritable_page;
+
+ page_cache_release(old_page);
+
+ /*
+ * Since we dropped the lock we need to revalidate
+ * the PTE as someone else may have changed it. If
+ * they did, we just return, as we can count on the
+ * MMU to tell us if they didn't also make it writable.
+ */
+ page_table = pte_offset_map_lock(mm, pmd, address,
+ &ptl);
+ if (!pte_same(*page_table, orig_pte))
+ goto unlock;
}
+
+ reuse = 1;
+ } else if (PageAnon(old_page) && !TestSetPageLocked(old_page)) {
+ reuse = can_share_swap_page(old_page);
+ unlock_page(old_page);
+ } else {
+ reuse = 0;
+ }
+
+ if (reuse) {
+ flush_cache_page(vma, address, pte_pfn(orig_pte));
+ entry = pte_mkyoung(orig_pte);
+ entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+ ptep_set_access_flags(vma, address, page_table, entry, 1);
+ update_mmu_cache(vma, address, entry);
+ lazy_mmu_prot_update(entry);
+ ret |= VM_FAULT_WRITE;
+ goto unlock;
}
/*
@@ -1523,6 +1570,10 @@ oom:
if (old_page)
page_cache_release(old_page);
return VM_FAULT_OOM;
+
+unwritable_page:
+ page_cache_release(old_page);
+ return VM_FAULT_SIGBUS;
}
/*
@@ -1879,7 +1930,10 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto out;
entry = pte_to_swp_entry(orig_pte);
-again:
+ if (is_migration_entry(entry)) {
+ migration_entry_wait(mm, pmd, address);
+ goto out;
+ }
page = lookup_swap_cache(entry);
if (!page) {
swapin_readahead(entry, address, vma);
@@ -1903,12 +1957,6 @@ again:
mark_page_accessed(page);
lock_page(page);
- if (!PageSwapCache(page)) {
- /* Page migration has occured */
- unlock_page(page);
- page_cache_release(page);
- goto again;
- }
/*
* Back out if somebody else already faulted in this pte.
@@ -2074,18 +2122,31 @@ retry:
/*
* Should we do an early C-O-W break?
*/
- if (write_access && !(vma->vm_flags & VM_SHARED)) {
- struct page *page;
+ if (write_access) {
+ if (!(vma->vm_flags & VM_SHARED)) {
+ struct page *page;
- if (unlikely(anon_vma_prepare(vma)))
- goto oom;
- page = alloc_page_vma(GFP_HIGHUSER, vma, address);
- if (!page)
- goto oom;
- copy_user_highpage(page, new_page, address);
- page_cache_release(new_page);
- new_page = page;
- anon = 1;
+ if (unlikely(anon_vma_prepare(vma)))
+ goto oom;
+ page = alloc_page_vma(GFP_HIGHUSER, vma, address);
+ if (!page)
+ goto oom;
+ copy_user_highpage(page, new_page, address);
+ page_cache_release(new_page);
+ new_page = page;
+ anon = 1;
+
+ } else {
+ /* if the page will be shareable, see if the backing
+ * address space wants to know that the page is about
+ * to become writable */
+ if (vma->vm_ops->page_mkwrite &&
+ vma->vm_ops->page_mkwrite(vma, new_page) < 0
+ ) {
+ page_cache_release(new_page);
+ return VM_FAULT_SIGBUS;
+ }
+ }
}
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 70df5c0d957..841a077d5ae 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -26,7 +26,7 @@
extern void zonetable_add(struct zone *zone, int nid, int zid, unsigned long pfn,
unsigned long size);
-static void __add_zone(struct zone *zone, unsigned long phys_start_pfn)
+static int __add_zone(struct zone *zone, unsigned long phys_start_pfn)
{
struct pglist_data *pgdat = zone->zone_pgdat;
int nr_pages = PAGES_PER_SECTION;
@@ -34,8 +34,15 @@ static void __add_zone(struct zone *zone, unsigned long phys_start_pfn)
int zone_type;
zone_type = zone - pgdat->node_zones;
+ if (!populated_zone(zone)) {
+ int ret = 0;
+ ret = init_currently_empty_zone(zone, phys_start_pfn, nr_pages);
+ if (ret < 0)
+ return ret;
+ }
memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn);
zonetable_add(zone, nid, zone_type, phys_start_pfn, nr_pages);
+ return 0;
}
extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
@@ -50,7 +57,11 @@ static int __add_section(struct zone *zone, unsigned long phys_start_pfn)
if (ret < 0)
return ret;
- __add_zone(zone, phys_start_pfn);
+ ret = __add_zone(zone, phys_start_pfn);
+
+ if (ret < 0)
+ return ret;
+
return register_new_memory(__pfn_to_section(phys_start_pfn));
}
@@ -116,6 +127,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
unsigned long flags;
unsigned long onlined_pages = 0;
struct zone *zone;
+ int need_zonelists_rebuild = 0;
/*
* This doesn't need a lock to do pfn_to_page().
@@ -128,6 +140,14 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
grow_pgdat_span(zone->zone_pgdat, pfn, pfn + nr_pages);
pgdat_resize_unlock(zone->zone_pgdat, &flags);
+ /*
+ * If this zone is not populated, then it is not in zonelist.
+ * This means the page allocator ignores this zone.
+ * So, zonelist must be updated after online.
+ */
+ if (!populated_zone(zone))
+ need_zonelists_rebuild = 1;
+
for (i = 0; i < nr_pages; i++) {
struct page *page = pfn_to_page(pfn + i);
online_page(page);
@@ -138,5 +158,8 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
setup_per_zone_pages_min();
+ if (need_zonelists_rebuild)
+ build_all_zonelists();
+ vm_total_pages = nr_free_pagecache_pages();
return 0;
}
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 8778f58880c..ec4a1a950df 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -87,6 +87,8 @@
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
#include <linux/migrate.h>
+#include <linux/rmap.h>
+#include <linux/security.h>
#include <asm/tlbflush.h>
#include <asm/uaccess.h>
@@ -587,6 +589,11 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
isolate_lru_page(page, pagelist);
}
+static struct page *new_node_page(struct page *page, unsigned long node, int **x)
+{
+ return alloc_pages_node(node, GFP_HIGHUSER, 0);
+}
+
/*
* Migrate pages from one node to a target node.
* Returns error or the number of pages not migrated.
@@ -603,11 +610,9 @@ int migrate_to_node(struct mm_struct *mm, int source, int dest, int flags)
check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask,
flags | MPOL_MF_DISCONTIG_OK, &pagelist);
- if (!list_empty(&pagelist)) {
- err = migrate_pages_to(&pagelist, NULL, dest);
- if (!list_empty(&pagelist))
- putback_lru_pages(&pagelist);
- }
+ if (!list_empty(&pagelist))
+ err = migrate_pages(&pagelist, new_node_page, dest);
+
return err;
}
@@ -694,6 +699,12 @@ int do_migrate_pages(struct mm_struct *mm,
}
+static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
+{
+ struct vm_area_struct *vma = (struct vm_area_struct *)private;
+
+ return alloc_page_vma(GFP_HIGHUSER, vma, page_address_in_vma(page, vma));
+}
#else
static void migrate_page_add(struct page *page, struct list_head *pagelist,
@@ -706,6 +717,11 @@ int do_migrate_pages(struct mm_struct *mm,
{
return -ENOSYS;
}
+
+static struct page *new_vma_page(struct page *page, unsigned long private)
+{
+ return NULL;
+}
#endif
long do_mbind(unsigned long start, unsigned long len,
@@ -767,15 +783,13 @@ long do_mbind(unsigned long start, unsigned long len,
err = mbind_range(vma, start, end, new);
if (!list_empty(&pagelist))
- nr_failed = migrate_pages_to(&pagelist, vma, -1);
+ nr_failed = migrate_pages(&pagelist, new_vma_page,
+ (unsigned long)vma);
if (!err && nr_failed && (flags & MPOL_MF_STRICT))
err = -EIO;
}
- if (!list_empty(&pagelist))
- putback_lru_pages(&pagelist);
-
up_write(&mm->mmap_sem);
mpol_free(new);
return err;
@@ -929,6 +943,10 @@ asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
goto out;
}
+ err = security_task_movememory(task);
+ if (err)
+ goto out;
+
err = do_migrate_pages(mm, &old, &new,
capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
out:
diff --git a/mm/migrate.c b/mm/migrate.c
index 1c25040693d..1c2a71aa05c 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -15,6 +15,7 @@
#include <linux/migrate.h>
#include <linux/module.h>
#include <linux/swap.h>
+#include <linux/swapops.h>
#include <linux/pagemap.h>
#include <linux/buffer_head.h>
#include <linux/mm_inline.h>
@@ -23,13 +24,13 @@
#include <linux/topology.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
-#include <linux/swapops.h>
+#include <linux/writeback.h>
+#include <linux/mempolicy.h>
+#include <linux/vmalloc.h>
+#include <linux/security.h>
#include "internal.h"
-/* The maximum number of pages to take off the LRU for migration */
-#define MIGRATE_CHUNK_SIZE 256
-
#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
/*
@@ -64,16 +65,11 @@ int isolate_lru_page(struct page *page, struct list_head *pagelist)
}
/*
- * migrate_prep() needs to be called after we have compiled the list of pages
- * to be migrated using isolate_lru_page() but before we begin a series of calls
- * to migrate_pages().
+ * migrate_prep() needs to be called before we start compiling a list of pages
+ * to be migrated using isolate_lru_page().
*/
int migrate_prep(void)
{
- /* Must have swap device for migration */
- if (nr_swap_pages <= 0)
- return -ENODEV;
-
/*
* Clear the LRU lists so pages can be isolated.
* Note that pages may be moved off the LRU after we have
@@ -87,7 +83,6 @@ int migrate_prep(void)
static inline void move_to_lru(struct page *page)
{
- list_del(&page->lru);
if (PageActive(page)) {
/*
* lru_cache_add_active checks that
@@ -113,113 +108,200 @@ int putback_lru_pages(struct list_head *l)
int count = 0;
list_for_each_entry_safe(page, page2, l, lru) {
+ list_del(&page->lru);
move_to_lru(page);
count++;
}
return count;
}
-/*
- * Non migratable page
- */
-int fail_migrate_page(struct page *newpage, struct page *page)
+static inline int is_swap_pte(pte_t pte)
{
- return -EIO;
+ return !pte_none(pte) && !pte_present(pte) && !pte_file(pte);
}
-EXPORT_SYMBOL(fail_migrate_page);
/*
- * swapout a single page
- * page is locked upon entry, unlocked on exit
+ * Restore a potential migration pte to a working pte entry
*/
-static int swap_page(struct page *page)
+static void remove_migration_pte(struct vm_area_struct *vma,
+ struct page *old, struct page *new)
{
- struct address_space *mapping = page_mapping(page);
+ struct mm_struct *mm = vma->vm_mm;
+ swp_entry_t entry;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *ptep, pte;
+ spinlock_t *ptl;
+ unsigned long addr = page_address_in_vma(new, vma);
+
+ if (addr == -EFAULT)
+ return;
+
+ pgd = pgd_offset(mm, addr);
+ if (!pgd_present(*pgd))
+ return;
+
+ pud = pud_offset(pgd, addr);
+ if (!pud_present(*pud))
+ return;
+
+ pmd = pmd_offset(pud, addr);
+ if (!pmd_present(*pmd))
+ return;
+
+ ptep = pte_offset_map(pmd, addr);
+
+ if (!is_swap_pte(*ptep)) {
+ pte_unmap(ptep);
+ return;
+ }
- if (page_mapped(page) && mapping)
- if (try_to_unmap(page, 1) != SWAP_SUCCESS)
- goto unlock_retry;
+ ptl = pte_lockptr(mm, pmd);
+ spin_lock(ptl);
+ pte = *ptep;
+ if (!is_swap_pte(pte))
+ goto out;
- if (PageDirty(page)) {
- /* Page is dirty, try to write it out here */
- switch(pageout(page, mapping)) {
- case PAGE_KEEP:
- case PAGE_ACTIVATE:
- goto unlock_retry;
+ entry = pte_to_swp_entry(pte);
- case PAGE_SUCCESS:
- goto retry;
+ if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old)
+ goto out;
- case PAGE_CLEAN:
- ; /* try to free the page below */
- }
- }
+ get_page(new);
+ pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
+ if (is_write_migration_entry(entry))
+ pte = pte_mkwrite(pte);
+ set_pte_at(mm, addr, ptep, pte);
- if (PagePrivate(page)) {
- if (!try_to_release_page(page, GFP_KERNEL) ||
- (!mapping && page_count(page) == 1))
- goto unlock_retry;
- }
+ if (PageAnon(new))
+ page_add_anon_rmap(new, vma, addr);
+ else
+ page_add_file_rmap(new);
- if (remove_mapping(mapping, page)) {
- /* Success */
- unlock_page(page);
- return 0;
- }
+ /* No need to invalidate - it was non-present before */
+ update_mmu_cache(vma, addr, pte);
+ lazy_mmu_prot_update(pte);
-unlock_retry:
- unlock_page(page);
+out:
+ pte_unmap_unlock(ptep, ptl);
+}
-retry:
- return -EAGAIN;
+/*
+ * Note that remove_file_migration_ptes will only work on regular mappings,
+ * Nonlinear mappings do not use migration entries.
+ */
+static void remove_file_migration_ptes(struct page *old, struct page *new)
+{
+ struct vm_area_struct *vma;
+ struct address_space *mapping = page_mapping(new);
+ struct prio_tree_iter iter;
+ pgoff_t pgoff = new->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+
+ if (!mapping)
+ return;
+
+ spin_lock(&mapping->i_mmap_lock);
+
+ vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff)
+ remove_migration_pte(vma, old, new);
+
+ spin_unlock(&mapping->i_mmap_lock);
}
/*
- * Remove references for a page and establish the new page with the correct
- * basic settings to be able to stop accesses to the page.
+ * Must hold mmap_sem lock on at least one of the vmas containing
+ * the page so that the anon_vma cannot vanish.
*/
-int migrate_page_remove_references(struct page *newpage,
- struct page *page, int nr_refs)
+static void remove_anon_migration_ptes(struct page *old, struct page *new)
{
- struct address_space *mapping = page_mapping(page);
- struct page **radix_pointer;
+ struct anon_vma *anon_vma;
+ struct vm_area_struct *vma;
+ unsigned long mapping;
- /*
- * Avoid doing any of the following work if the page count
- * indicates that the page is in use or truncate has removed
- * the page.
- */
- if (!mapping || page_mapcount(page) + nr_refs != page_count(page))
- return -EAGAIN;
+ mapping = (unsigned long)new->mapping;
- /*
- * Establish swap ptes for anonymous pages or destroy pte
- * maps for files.
- *
- * In order to reestablish file backed mappings the fault handlers
- * will take the radix tree_lock which may then be used to stop
- * processses from accessing this page until the new page is ready.
- *
- * A process accessing via a swap pte (an anonymous page) will take a
- * page_lock on the old page which will block the process until the
- * migration attempt is complete. At that time the PageSwapCache bit
- * will be examined. If the page was migrated then the PageSwapCache
- * bit will be clear and the operation to retrieve the page will be
- * retried which will find the new page in the radix tree. Then a new
- * direct mapping may be generated based on the radix tree contents.
- *
- * If the page was not migrated then the PageSwapCache bit
- * is still set and the operation may continue.
- */
- if (try_to_unmap(page, 1) == SWAP_FAIL)
- /* A vma has VM_LOCKED set -> permanent failure */
- return -EPERM;
+ if (!mapping || (mapping & PAGE_MAPPING_ANON) == 0)
+ return;
/*
- * Give up if we were unable to remove all mappings.
+ * We hold the mmap_sem lock. So no need to call page_lock_anon_vma.
*/
- if (page_mapcount(page))
- return -EAGAIN;
+ anon_vma = (struct anon_vma *) (mapping - PAGE_MAPPING_ANON);
+ spin_lock(&anon_vma->lock);
+
+ list_for_each_entry(vma, &anon_vma->head, anon_vma_node)
+ remove_migration_pte(vma, old, new);
+
+ spin_unlock(&anon_vma->lock);
+}
+
+/*
+ * Get rid of all migration entries and replace them by
+ * references to the indicated page.
+ */
+static void remove_migration_ptes(struct page *old, struct page *new)
+{
+ if (PageAnon(new))
+ remove_anon_migration_ptes(old, new);
+ else
+ remove_file_migration_ptes(old, new);
+}
+
+/*
+ * Something used the pte of a page under migration. We need to
+ * get to the page and wait until migration is finished.
+ * When we return from this function the fault will be retried.
+ *
+ * This function is called from do_swap_page().
+ */
+void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long address)
+{
+ pte_t *ptep, pte;
+ spinlock_t *ptl;
+ swp_entry_t entry;
+ struct page *page;
+
+ ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
+ pte = *ptep;
+ if (!is_swap_pte(pte))
+ goto out;
+
+ entry = pte_to_swp_entry(pte);
+ if (!is_migration_entry(entry))
+ goto out;
+
+ page = migration_entry_to_page(entry);
+
+ get_page(page);
+ pte_unmap_unlock(ptep, ptl);
+ wait_on_page_locked(page);
+ put_page(page);
+ return;
+out:
+ pte_unmap_unlock(ptep, ptl);
+}
+
+/*
+ * Replace the page in the mapping.
+ *
+ * The number of remaining references must be:
+ * 1 for anonymous pages without a mapping
+ * 2 for pages with a mapping
+ * 3 for pages with a mapping and PagePrivate set.
+ */
+static int migrate_page_move_mapping(struct address_space *mapping,
+ struct page *newpage, struct page *page)
+{
+ struct page **radix_pointer;
+
+ if (!mapping) {
+ /* Anonymous page */
+ if (page_count(page) != 1)
+ return -EAGAIN;
+ return 0;
+ }
write_lock_irq(&mapping->tree_lock);
@@ -227,7 +309,7 @@ int migrate_page_remove_references(struct page *newpage,
&mapping->page_tree,
page_index(page));
- if (!page_mapping(page) || page_count(page) != nr_refs ||
+ if (page_count(page) != 2 + !!PagePrivate(page) ||
*radix_pointer != page) {
write_unlock_irq(&mapping->tree_lock);
return -EAGAIN;
@@ -235,19 +317,14 @@ int migrate_page_remove_references(struct page *newpage,
/*
* Now we know that no one else is looking at the page.
- *
- * Certain minimal information about a page must be available
- * in order for other subsystems to properly handle the page if they
- * find it through the radix tree update before we are finished
- * copying the page.
*/
get_page(newpage);
- newpage->index = page->index;
- newpage->mapping = page->mapping;
+#ifdef CONFIG_SWAP
if (PageSwapCache(page)) {
SetPageSwapCache(newpage);
set_page_private(newpage, page_private(page));
}
+#endif
*radix_pointer = newpage;
__put_page(page);
@@ -255,12 +332,11 @@ int migrate_page_remove_references(struct page *newpage,
return 0;
}
-EXPORT_SYMBOL(migrate_page_remove_references);
/*
* Copy the page to its new location
*/
-void migrate_page_copy(struct page *newpage, struct page *page)
+static void migrate_page_copy(struct page *newpage, struct page *page)
{
copy_highpage(newpage, page);
@@ -282,7 +358,9 @@ void migrate_page_copy(struct page *newpage, struct page *page)
set_page_dirty(newpage);
}
+#ifdef CONFIG_SWAP
ClearPageSwapCache(page);
+#endif
ClearPageActive(page);
ClearPagePrivate(page);
set_page_private(page, 0);
@@ -295,7 +373,18 @@ void migrate_page_copy(struct page *newpage, struct page *page)
if (PageWriteback(newpage))
end_page_writeback(newpage);
}
-EXPORT_SYMBOL(migrate_page_copy);
+
+/************************************************************
+ * Migration functions
+ ***********************************************************/
+
+/* Always fail migration. Used for mappings that are not movable */
+int fail_migrate_page(struct address_space *mapping,
+ struct page *newpage, struct page *page)
+{
+ return -EIO;
+}
+EXPORT_SYMBOL(fail_migrate_page);
/*
* Common logic to directly migrate a single page suitable for
@@ -303,51 +392,286 @@ EXPORT_SYMBOL(migrate_page_copy);
*
* Pages are locked upon entry and exit.
*/
-int migrate_page(struct page *newpage, struct page *page)
+int migrate_page(struct address_space *mapping,
+ struct page *newpage, struct page *page)
{
int rc;
BUG_ON(PageWriteback(page)); /* Writeback must be complete */
- rc = migrate_page_remove_references(newpage, page, 2);
+ rc = migrate_page_move_mapping(mapping, newpage, page);
+
+ if (rc)
+ return rc;
+
+ migrate_page_copy(newpage, page);
+ return 0;
+}
+EXPORT_SYMBOL(migrate_page);
+
+/*
+ * Migration function for pages with buffers. This function can only be used
+ * if the underlying filesystem guarantees that no other references to "page"
+ * exist.
+ */
+int buffer_migrate_page(struct address_space *mapping,
+ struct page *newpage, struct page *page)
+{
+ struct buffer_head *bh, *head;
+ int rc;
+
+ if (!page_has_buffers(page))
+ return migrate_page(mapping, newpage, page);
+
+ head = page_buffers(page);
+
+ rc = migrate_page_move_mapping(mapping, newpage, page);
if (rc)
return rc;
+ bh = head;
+ do {
+ get_bh(bh);
+ lock_buffer(bh);
+ bh = bh->b_this_page;
+
+ } while (bh != head);
+
+ ClearPagePrivate(page);
+ set_page_private(newpage, page_private(page));
+ set_page_private(page, 0);
+ put_page(page);
+ get_page(newpage);
+
+ bh = head;
+ do {
+ set_bh_page(bh, newpage, bh_offset(bh));
+ bh = bh->b_this_page;
+
+ } while (bh != head);
+
+ SetPagePrivate(newpage);
+
migrate_page_copy(newpage, page);
+ bh = head;
+ do {
+ unlock_buffer(bh);
+ put_bh(bh);
+ bh = bh->b_this_page;
+
+ } while (bh != head);
+
+ return 0;
+}
+EXPORT_SYMBOL(buffer_migrate_page);
+
+/*
+ * Writeback a page to clean the dirty state
+ */
+static int writeout(struct address_space *mapping, struct page *page)
+{
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_NONE,
+ .nr_to_write = 1,
+ .range_start = 0,
+ .range_end = LLONG_MAX,
+ .nonblocking = 1,
+ .for_reclaim = 1
+ };
+ int rc;
+
+ if (!mapping->a_ops->writepage)
+ /* No write method for the address space */
+ return -EINVAL;
+
+ if (!clear_page_dirty_for_io(page))
+ /* Someone else already triggered a write */
+ return -EAGAIN;
+
/*
- * Remove auxiliary swap entries and replace
- * them with real ptes.
- *
- * Note that a real pte entry will allow processes that are not
- * waiting on the page lock to use the new page via the page tables
- * before the new page is unlocked.
+ * A dirty page may imply that the underlying filesystem has
+ * the page on some queue. So the page must be clean for
+ * migration. Writeout may mean we loose the lock and the
+ * page state is no longer what we checked for earlier.
+ * At this point we know that the migration attempt cannot
+ * be successful.
*/
- remove_from_swap(newpage);
- return 0;
+ remove_migration_ptes(page, page);
+
+ rc = mapping->a_ops->writepage(page, &wbc);
+ if (rc < 0)
+ /* I/O Error writing */
+ return -EIO;
+
+ if (rc != AOP_WRITEPAGE_ACTIVATE)
+ /* unlocked. Relock */
+ lock_page(page);
+
+ return -EAGAIN;
+}
+
+/*
+ * Default handling if a filesystem does not provide a migration function.
+ */
+static int fallback_migrate_page(struct address_space *mapping,
+ struct page *newpage, struct page *page)
+{
+ if (PageDirty(page))
+ return writeout(mapping, page);
+
+ /*
+ * Buffers may be managed in a filesystem specific way.
+ * We must have no buffers or drop them.
+ */
+ if (page_has_buffers(page) &&
+ !try_to_release_page(page, GFP_KERNEL))
+ return -EAGAIN;
+
+ return migrate_page(mapping, newpage, page);
+}
+
+/*
+ * Move a page to a newly allocated page
+ * The page is locked and all ptes have been successfully removed.
+ *
+ * The new page will have replaced the old page if this function
+ * is successful.
+ */
+static int move_to_new_page(struct page *newpage, struct page *page)
+{
+ struct address_space *mapping;
+ int rc;
+
+ /*
+ * Block others from accessing the page when we get around to
+ * establishing additional references. We are the only one
+ * holding a reference to the new page at this point.
+ */
+ if (TestSetPageLocked(newpage))
+ BUG();
+
+ /* Prepare mapping for the new page.*/
+ newpage->index = page->index;
+ newpage->mapping = page->mapping;
+
+ mapping = page_mapping(page);
+ if (!mapping)
+ rc = migrate_page(mapping, newpage, page);
+ else if (mapping->a_ops->migratepage)
+ /*
+ * Most pages have a mapping and most filesystems
+ * should provide a migration function. Anonymous
+ * pages are part of swap space which also has its
+ * own migration function. This is the most common
+ * path for page migration.
+ */
+ rc = mapping->a_ops->migratepage(mapping,
+ newpage, page);
+ else
+ rc = fallback_migrate_page(mapping, newpage, page);
+
+ if (!rc)
+ remove_migration_ptes(page, newpage);
+ else
+ newpage->mapping = NULL;
+
+ unlock_page(newpage);
+
+ return rc;
+}
+
+/*
+ * Obtain the lock on page, remove all ptes and migrate the page
+ * to the newly allocated page in newpage.
+ */
+static int unmap_and_move(new_page_t get_new_page, unsigned long private,
+ struct page *page, int force)
+{
+ int rc = 0;
+ int *result = NULL;
+ struct page *newpage = get_new_page(page, private, &result);
+
+ if (!newpage)
+ return -ENOMEM;
+
+ if (page_count(page) == 1)
+ /* page was freed from under us. So we are done. */
+ goto move_newpage;
+
+ rc = -EAGAIN;
+ if (TestSetPageLocked(page)) {
+ if (!force)
+ goto move_newpage;
+ lock_page(page);
+ }
+
+ if (PageWriteback(page)) {
+ if (!force)
+ goto unlock;
+ wait_on_page_writeback(page);
+ }
+
+ /*
+ * Establish migration ptes or remove ptes
+ */
+ if (try_to_unmap(page, 1) != SWAP_FAIL) {
+ if (!page_mapped(page))
+ rc = move_to_new_page(newpage, page);
+ } else
+ /* A vma has VM_LOCKED set -> permanent failure */
+ rc = -EPERM;
+
+ if (rc)
+ remove_migration_ptes(page, page);
+unlock:
+ unlock_page(page);
+
+ if (rc != -EAGAIN) {
+ /*
+ * A page that has been migrated has all references
+ * removed and will be freed. A page that has not been
+ * migrated will have kepts its references and be
+ * restored.
+ */
+ list_del(&page->lru);
+ move_to_lru(page);
+ }
+
+move_newpage:
+ /*
+ * Move the new page to the LRU. If migration was not successful
+ * then this will free the page.
+ */
+ move_to_lru(newpage);
+ if (result) {
+ if (rc)
+ *result = rc;
+ else
+ *result = page_to_nid(newpage);
+ }
+ return rc;
}
-EXPORT_SYMBOL(migrate_page);
/*
* migrate_pages
*
- * Two lists are passed to this function. The first list
- * contains the pages isolated from the LRU to be migrated.
- * The second list contains new pages that the pages isolated
- * can be moved to. If the second list is NULL then all
- * pages are swapped out.
+ * The function takes one list of pages to migrate and a function
+ * that determines from the page to be migrated and the private data
+ * the target of the move and allocates the page.
*
* The function returns after 10 attempts or if no pages
* are movable anymore because to has become empty
- * or no retryable pages exist anymore.
+ * or no retryable pages exist anymore. All pages will be
+ * retruned to the LRU or freed.
*
- * Return: Number of pages not migrated when "to" ran empty.
+ * Return: Number of pages not migrated or error code.
*/
-int migrate_pages(struct list_head *from, struct list_head *to,
- struct list_head *moved, struct list_head *failed)
+int migrate_pages(struct list_head *from,
+ new_page_t get_new_page, unsigned long private)
{
- int retry;
+ int retry = 1;
int nr_failed = 0;
int pass = 0;
struct page *page;
@@ -358,305 +682,297 @@ int migrate_pages(struct list_head *from, struct list_head *to,
if (!swapwrite)
current->flags |= PF_SWAPWRITE;
-redo:
- retry = 0;
+ for(pass = 0; pass < 10 && retry; pass++) {
+ retry = 0;
+
+ list_for_each_entry_safe(page, page2, from, lru) {
+ cond_resched();
+
+ rc = unmap_and_move(get_new_page, private,
+ page, pass > 2);
+
+ switch(rc) {
+ case -ENOMEM:
+ goto out;
+ case -EAGAIN:
+ retry++;
+ break;
+ case 0:
+ break;
+ default:
+ /* Permanent failure */
+ nr_failed++;
+ break;
+ }
+ }
+ }
+ rc = 0;
+out:
+ if (!swapwrite)
+ current->flags &= ~PF_SWAPWRITE;
- list_for_each_entry_safe(page, page2, from, lru) {
- struct page *newpage = NULL;
- struct address_space *mapping;
+ putback_lru_pages(from);
- cond_resched();
+ if (rc)
+ return rc;
- rc = 0;
- if (page_count(page) == 1)
- /* page was freed from under us. So we are done. */
- goto next;
+ return nr_failed + retry;
+}
- if (to && list_empty(to))
- break;
+#ifdef CONFIG_NUMA
+/*
+ * Move a list of individual pages
+ */
+struct page_to_node {
+ unsigned long addr;
+ struct page *page;
+ int node;
+ int status;
+};
- /*
- * Skip locked pages during the first two passes to give the
- * functions holding the lock time to release the page. Later we
- * use lock_page() to have a higher chance of acquiring the
- * lock.
- */
- rc = -EAGAIN;
- if (pass > 2)
- lock_page(page);
- else
- if (TestSetPageLocked(page))
- goto next;
+static struct page *new_page_node(struct page *p, unsigned long private,
+ int **result)
+{
+ struct page_to_node *pm = (struct page_to_node *)private;
- /*
- * Only wait on writeback if we have already done a pass where
- * we we may have triggered writeouts for lots of pages.
- */
- if (pass > 0) {
- wait_on_page_writeback(page);
- } else {
- if (PageWriteback(page))
- goto unlock_page;
- }
+ while (pm->node != MAX_NUMNODES && pm->page != p)
+ pm++;
- /*
- * Anonymous pages must have swap cache references otherwise
- * the information contained in the page maps cannot be
- * preserved.
- */
- if (PageAnon(page) && !PageSwapCache(page)) {
- if (!add_to_swap(page, GFP_KERNEL)) {
- rc = -ENOMEM;
- goto unlock_page;
- }
- }
+ if (pm->node == MAX_NUMNODES)
+ return NULL;
- if (!to) {
- rc = swap_page(page);
- goto next;
- }
+ *result = &pm->status;
- newpage = lru_to_page(to);
- lock_page(newpage);
+ return alloc_pages_node(pm->node, GFP_HIGHUSER, 0);
+}
- /*
- * Pages are properly locked and writeback is complete.
- * Try to migrate the page.
- */
- mapping = page_mapping(page);
- if (!mapping)
- goto unlock_both;
+/*
+ * Move a set of pages as indicated in the pm array. The addr
+ * field must be set to the virtual address of the page to be moved
+ * and the node number must contain a valid target node.
+ */
+static int do_move_pages(struct mm_struct *mm, struct page_to_node *pm,
+ int migrate_all)
+{
+ int err;
+ struct page_to_node *pp;
+ LIST_HEAD(pagelist);
- if (mapping->a_ops->migratepage) {
- /*
- * Most pages have a mapping and most filesystems
- * should provide a migration function. Anonymous
- * pages are part of swap space which also has its
- * own migration function. This is the most common
- * path for page migration.
- */
- rc = mapping->a_ops->migratepage(newpage, page);
- goto unlock_both;
- }
-
- /* Make sure the dirty bit is up to date */
- if (try_to_unmap(page, 1) == SWAP_FAIL) {
- rc = -EPERM;
- goto unlock_both;
- }
+ down_read(&mm->mmap_sem);
- if (page_mapcount(page)) {
- rc = -EAGAIN;
- goto unlock_both;
- }
+ /*
+ * Build a list of pages to migrate
+ */
+ migrate_prep();
+ for (pp = pm; pp->node != MAX_NUMNODES; pp++) {
+ struct vm_area_struct *vma;
+ struct page *page;
/*
- * Default handling if a filesystem does not provide
- * a migration function. We can only migrate clean
- * pages so try to write out any dirty pages first.
+ * A valid page pointer that will not match any of the
+ * pages that will be moved.
*/
- if (PageDirty(page)) {
- switch (pageout(page, mapping)) {
- case PAGE_KEEP:
- case PAGE_ACTIVATE:
- goto unlock_both;
-
- case PAGE_SUCCESS:
- unlock_page(newpage);
- goto next;
-
- case PAGE_CLEAN:
- ; /* try to migrate the page below */
- }
- }
+ pp->page = ZERO_PAGE(0);
- /*
- * Buffers are managed in a filesystem specific way.
- * We must have no buffers or drop them.
- */
- if (!page_has_buffers(page) ||
- try_to_release_page(page, GFP_KERNEL)) {
- rc = migrate_page(newpage, page);
- goto unlock_both;
- }
+ err = -EFAULT;
+ vma = find_vma(mm, pp->addr);
+ if (!vma)
+ goto set_status;
- /*
- * On early passes with mapped pages simply
- * retry. There may be a lock held for some
- * buffers that may go away. Later
- * swap them out.
- */
- if (pass > 4) {
+ page = follow_page(vma, pp->addr, FOLL_GET);
+ err = -ENOENT;
+ if (!page)
+ goto set_status;
+
+ if (PageReserved(page)) /* Check for zero page */
+ goto put_and_set;
+
+ pp->page = page;
+ err = page_to_nid(page);
+
+ if (err == pp->node)
/*
- * Persistently unable to drop buffers..... As a
- * measure of last resort we fall back to
- * swap_page().
+ * Node already in the right place
*/
- unlock_page(newpage);
- newpage = NULL;
- rc = swap_page(page);
- goto next;
- }
+ goto put_and_set;
-unlock_both:
- unlock_page(newpage);
-
-unlock_page:
- unlock_page(page);
-
-next:
- if (rc == -EAGAIN) {
- retry++;
- } else if (rc) {
- /* Permanent failure */
- list_move(&page->lru, failed);
- nr_failed++;
- } else {
- if (newpage) {
- /* Successful migration. Return page to LRU */
- move_to_lru(newpage);
- }
- list_move(&page->lru, moved);
- }
+ err = -EACCES;
+ if (page_mapcount(page) > 1 &&
+ !migrate_all)
+ goto put_and_set;
+
+ err = isolate_lru_page(page, &pagelist);
+put_and_set:
+ /*
+ * Either remove the duplicate refcount from
+ * isolate_lru_page() or drop the page ref if it was
+ * not isolated.
+ */
+ put_page(page);
+set_status:
+ pp->status = err;
}
- if (retry && pass++ < 10)
- goto redo;
- if (!swapwrite)
- current->flags &= ~PF_SWAPWRITE;
+ if (!list_empty(&pagelist))
+ err = migrate_pages(&pagelist, new_page_node,
+ (unsigned long)pm);
+ else
+ err = -ENOENT;
- return nr_failed + retry;
+ up_read(&mm->mmap_sem);
+ return err;
}
/*
- * Migration function for pages with buffers. This function can only be used
- * if the underlying filesystem guarantees that no other references to "page"
- * exist.
+ * Determine the nodes of a list of pages. The addr in the pm array
+ * must have been set to the virtual address of which we want to determine
+ * the node number.
*/
-int buffer_migrate_page(struct page *newpage, struct page *page)
+static int do_pages_stat(struct mm_struct *mm, struct page_to_node *pm)
{
- struct address_space *mapping = page->mapping;
- struct buffer_head *bh, *head;
- int rc;
+ down_read(&mm->mmap_sem);
+
+ for ( ; pm->node != MAX_NUMNODES; pm++) {
+ struct vm_area_struct *vma;
+ struct page *page;
+ int err;
+
+ err = -EFAULT;
+ vma = find_vma(mm, pm->addr);
+ if (!vma)
+ goto set_status;
+
+ page = follow_page(vma, pm->addr, 0);
+ err = -ENOENT;
+ /* Use PageReserved to check for zero page */
+ if (!page || PageReserved(page))
+ goto set_status;
+
+ err = page_to_nid(page);
+set_status:
+ pm->status = err;
+ }
- if (!mapping)
- return -EAGAIN;
+ up_read(&mm->mmap_sem);
+ return 0;
+}
- if (!page_has_buffers(page))
- return migrate_page(newpage, page);
+/*
+ * Move a list of pages in the address space of the currently executing
+ * process.
+ */
+asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages,
+ const void __user * __user *pages,
+ const int __user *nodes,
+ int __user *status, int flags)
+{
+ int err = 0;
+ int i;
+ struct task_struct *task;
+ nodemask_t task_nodes;
+ struct mm_struct *mm;
+ struct page_to_node *pm = NULL;
- head = page_buffers(page);
+ /* Check flags */
+ if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
+ return -EINVAL;
- rc = migrate_page_remove_references(newpage, page, 3);
+ if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
+ return -EPERM;
- if (rc)
- return rc;
+ /* Find the mm_struct */
+ read_lock(&tasklist_lock);
+ task = pid ? find_task_by_pid(pid) : current;
+ if (!task) {
+ read_unlock(&tasklist_lock);
+ return -ESRCH;
+ }
+ mm = get_task_mm(task);
+ read_unlock(&tasklist_lock);
- bh = head;
- do {
- get_bh(bh);
- lock_buffer(bh);
- bh = bh->b_this_page;
+ if (!mm)
+ return -EINVAL;
- } while (bh != head);
+ /*
+ * Check if this process has the right to modify the specified
+ * process. The right exists if the process has administrative
+ * capabilities, superuser privileges or the same
+ * userid as the target process.
+ */
+ if ((current->euid != task->suid) && (current->euid != task->uid) &&
+ (current->uid != task->suid) && (current->uid != task->uid) &&
+ !capable(CAP_SYS_NICE)) {
+ err = -EPERM;
+ goto out2;
+ }
- ClearPagePrivate(page);
- set_page_private(newpage, page_private(page));
- set_page_private(page, 0);
- put_page(page);
- get_page(newpage);
+ err = security_task_movememory(task);
+ if (err)
+ goto out2;
- bh = head;
- do {
- set_bh_page(bh, newpage, bh_offset(bh));
- bh = bh->b_this_page;
- } while (bh != head);
+ task_nodes = cpuset_mems_allowed(task);
- SetPagePrivate(newpage);
+ /* Limit nr_pages so that the multiplication may not overflow */
+ if (nr_pages >= ULONG_MAX / sizeof(struct page_to_node) - 1) {
+ err = -E2BIG;
+ goto out2;
+ }
- migrate_page_copy(newpage, page);
+ pm = vmalloc((nr_pages + 1) * sizeof(struct page_to_node));
+ if (!pm) {
+ err = -ENOMEM;
+ goto out2;
+ }
- bh = head;
- do {
- unlock_buffer(bh);
- put_bh(bh);
- bh = bh->b_this_page;
+ /*
+ * Get parameters from user space and initialize the pm
+ * array. Return various errors if the user did something wrong.
+ */
+ for (i = 0; i < nr_pages; i++) {
+ const void *p;
- } while (bh != head);
+ err = -EFAULT;
+ if (get_user(p, pages + i))
+ goto out;
- return 0;
-}
-EXPORT_SYMBOL(buffer_migrate_page);
+ pm[i].addr = (unsigned long)p;
+ if (nodes) {
+ int node;
-/*
- * Migrate the list 'pagelist' of pages to a certain destination.
- *
- * Specify destination with either non-NULL vma or dest_node >= 0
- * Return the number of pages not migrated or error code
- */
-int migrate_pages_to(struct list_head *pagelist,
- struct vm_area_struct *vma, int dest)
-{
- LIST_HEAD(newlist);
- LIST_HEAD(moved);
- LIST_HEAD(failed);
- int err = 0;
- unsigned long offset = 0;
- int nr_pages;
- struct page *page;
- struct list_head *p;
+ if (get_user(node, nodes + i))
+ goto out;
-redo:
- nr_pages = 0;
- list_for_each(p, pagelist) {
- if (vma) {
- /*
- * The address passed to alloc_page_vma is used to
- * generate the proper interleave behavior. We fake
- * the address here by an increasing offset in order
- * to get the proper distribution of pages.
- *
- * No decision has been made as to which page
- * a certain old page is moved to so we cannot
- * specify the correct address.
- */
- page = alloc_page_vma(GFP_HIGHUSER, vma,
- offset + vma->vm_start);
- offset += PAGE_SIZE;
- }
- else
- page = alloc_pages_node(dest, GFP_HIGHUSER, 0);
+ err = -ENODEV;
+ if (!node_online(node))
+ goto out;
- if (!page) {
- err = -ENOMEM;
- goto out;
+ err = -EACCES;
+ if (!node_isset(node, task_nodes))
+ goto out;
+
+ pm[i].node = node;
}
- list_add_tail(&page->lru, &newlist);
- nr_pages++;
- if (nr_pages > MIGRATE_CHUNK_SIZE)
- break;
}
- err = migrate_pages(pagelist, &newlist, &moved, &failed);
+ /* End marker */
+ pm[nr_pages].node = MAX_NUMNODES;
+
+ if (nodes)
+ err = do_move_pages(mm, pm, flags & MPOL_MF_MOVE_ALL);
+ else
+ err = do_pages_stat(mm, pm);
- putback_lru_pages(&moved); /* Call release pages instead ?? */
+ if (err >= 0)
+ /* Return status information */
+ for (i = 0; i < nr_pages; i++)
+ if (put_user(pm[i].status, status + i))
+ err = -EFAULT;
- if (err >= 0 && list_empty(&newlist) && !list_empty(pagelist))
- goto redo;
out:
- /* Return leftover allocated pages */
- while (!list_empty(&newlist)) {
- page = list_entry(newlist.next, struct page, lru);
- list_del(&page->lru);
- __free_page(page);
- }
- list_splice(&failed, pagelist);
- if (err < 0)
- return err;
-
- /* Calculate number of leftover pages */
- nr_pages = 0;
- list_for_each(p, pagelist)
- nr_pages++;
- return nr_pages;
+ vfree(pm);
+out2:
+ mmput(mm);
+ return err;
}
+#endif
+
diff --git a/mm/mmap.c b/mm/mmap.c
index e6ee12344b1..6446c6134b0 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1065,7 +1065,8 @@ munmap_back:
vma->vm_start = addr;
vma->vm_end = addr + len;
vma->vm_flags = vm_flags;
- vma->vm_page_prot = protection_map[vm_flags & 0x0f];
+ vma->vm_page_prot = protection_map[vm_flags &
+ (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
vma->vm_pgoff = pgoff;
if (file) {
@@ -1089,6 +1090,12 @@ munmap_back:
goto free_vma;
}
+ /* Don't make the VMA automatically writable if it's shared, but the
+ * backer wishes to know when pages are first written to */
+ if (vma->vm_ops && vma->vm_ops->page_mkwrite)
+ vma->vm_page_prot =
+ protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC)];
+
/* We set VM_ACCOUNT in a shared mapping's vm_flags, to inform
* shmem_zero_setup (perhaps called through /dev/zero's ->mmap)
* that memory reservation must be checked; but that reservation
@@ -1921,7 +1928,8 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
vma->vm_end = addr + len;
vma->vm_pgoff = pgoff;
vma->vm_flags = flags;
- vma->vm_page_prot = protection_map[flags & 0x0f];
+ vma->vm_page_prot = protection_map[flags &
+ (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
vma_link(mm, vma, prev, rb_link, rb_parent);
out:
mm->total_vm += len >> PAGE_SHIFT;
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 4c14d4289b6..638edabaff7 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -19,7 +19,8 @@
#include <linux/mempolicy.h>
#include <linux/personality.h>
#include <linux/syscalls.h>
-
+#include <linux/swap.h>
+#include <linux/swapops.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
@@ -28,12 +29,13 @@
static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr, unsigned long end, pgprot_t newprot)
{
- pte_t *pte;
+ pte_t *pte, oldpte;
spinlock_t *ptl;
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
do {
- if (pte_present(*pte)) {
+ oldpte = *pte;
+ if (pte_present(oldpte)) {
pte_t ptent;
/* Avoid an SMP race with hardware updated dirty/clean
@@ -43,7 +45,22 @@ static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
ptent = pte_modify(ptep_get_and_clear(mm, addr, pte), newprot);
set_pte_at(mm, addr, pte, ptent);
lazy_mmu_prot_update(ptent);
+#ifdef CONFIG_MIGRATION
+ } else if (!pte_file(oldpte)) {
+ swp_entry_t entry = pte_to_swp_entry(oldpte);
+
+ if (is_write_migration_entry(entry)) {
+ /*
+ * A protection check is difficult so
+ * just be safe and disable write
+ */
+ make_migration_entry_read(&entry);
+ set_pte_at(mm, addr, pte,
+ swp_entry_to_pte(entry));
+ }
+#endif
}
+
} while (pte++, addr += PAGE_SIZE, addr != end);
pte_unmap_unlock(pte - 1, ptl);
}
@@ -106,6 +123,7 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
unsigned long oldflags = vma->vm_flags;
long nrpages = (end - start) >> PAGE_SHIFT;
unsigned long charged = 0;
+ unsigned int mask;
pgprot_t newprot;
pgoff_t pgoff;
int error;
@@ -132,8 +150,6 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
}
}
- newprot = protection_map[newflags & 0xf];
-
/*
* First try to merge with previous and/or next vma.
*/
@@ -160,6 +176,14 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
}
success:
+ /* Don't make the VMA automatically writable if it's shared, but the
+ * backer wishes to know when pages are first written to */
+ mask = VM_READ|VM_WRITE|VM_EXEC|VM_SHARED;
+ if (vma->vm_ops && vma->vm_ops->page_mkwrite)
+ mask &= ~VM_SHARED;
+
+ newprot = protection_map[newflags & mask];
+
/*
* vm_flags and vm_page_prot are protected by the mmap_sem
* held in write mode.
@@ -205,8 +229,7 @@ sys_mprotect(unsigned long start, size_t len, unsigned long prot)
/*
* Does the application expect PROT_READ to imply PROT_EXEC:
*/
- if (unlikely((prot & PROT_READ) &&
- (current->personality & READ_IMPLIES_EXEC)))
+ if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
prot |= PROT_EXEC;
vm_flags = calc_vm_prot_bits(prot);
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 042e6436c3e..d46ed0f1dc0 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -22,10 +22,11 @@
#include <linux/jiffies.h>
#include <linux/cpuset.h>
+int sysctl_panic_on_oom;
/* #define DEBUG */
/**
- * oom_badness - calculate a numeric value for how bad this task has been
+ * badness - calculate a numeric value for how bad this task has been
* @p: task struct of which task we should calculate
* @uptime: current uptime in seconds
*
@@ -200,7 +201,7 @@ static struct task_struct *select_bad_process(unsigned long *ppoints)
continue;
/*
- * This is in the process of releasing memory so for wait it
+ * This is in the process of releasing memory so wait for it
* to finish before killing some other task by mistake.
*/
releasing = test_tsk_thread_flag(p, TIF_MEMDIE) ||
@@ -306,7 +307,7 @@ static int oom_kill_process(struct task_struct *p, unsigned long points,
}
/**
- * oom_kill - kill the "best" process when we run out of memory
+ * out_of_memory - kill the "best" process when we run out of memory
*
* If we run out of memory, we have the choice between either
* killing a random task (bad), letting the system crash (worse)
@@ -344,6 +345,8 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order)
break;
case CONSTRAINT_NONE:
+ if (sysctl_panic_on_oom)
+ panic("out of memory. panic_on_oom is selected\n");
retry:
/*
* Rambo mode: Shoot down a process and hope it solves whatever
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 75d7f48b79b..8ccf6f1b147 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -204,6 +204,7 @@ static void balance_dirty_pages(struct address_space *mapping)
.sync_mode = WB_SYNC_NONE,
.older_than_this = NULL,
.nr_to_write = write_chunk,
+ .range_cyclic = 1,
};
get_dirty_limits(&wbs, &background_thresh,
@@ -331,6 +332,7 @@ static void background_writeout(unsigned long _min_pages)
.older_than_this = NULL,
.nr_to_write = 0,
.nonblocking = 1,
+ .range_cyclic = 1,
};
for ( ; ; ) {
@@ -407,6 +409,7 @@ static void wb_kupdate(unsigned long arg)
.nr_to_write = 0,
.nonblocking = 1,
.for_kupdate = 1,
+ .range_cyclic = 1,
};
sync_supers();
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 253a450c400..423db0db7c0 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -37,6 +37,7 @@
#include <linux/nodemask.h>
#include <linux/vmalloc.h>
#include <linux/mempolicy.h>
+#include <linux/stop_machine.h>
#include <asm/tlbflush.h>
#include <asm/div64.h>
@@ -83,8 +84,8 @@ EXPORT_SYMBOL(zone_table);
static char *zone_names[MAX_NR_ZONES] = { "DMA", "DMA32", "Normal", "HighMem" };
int min_free_kbytes = 1024;
-unsigned long __initdata nr_kernel_pages;
-unsigned long __initdata nr_all_pages;
+unsigned long __meminitdata nr_kernel_pages;
+unsigned long __meminitdata nr_all_pages;
#ifdef CONFIG_DEBUG_VM
static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
@@ -286,22 +287,27 @@ __find_combined_index(unsigned long page_idx, unsigned int order)
* we can do coalesce a page and its buddy if
* (a) the buddy is not in a hole &&
* (b) the buddy is in the buddy system &&
- * (c) a page and its buddy have the same order.
+ * (c) a page and its buddy have the same order &&
+ * (d) a page and its buddy are in the same zone.
*
* For recording whether a page is in the buddy system, we use PG_buddy.
* Setting, clearing, and testing PG_buddy is serialized by zone->lock.
*
* For recording page's order, we use page_private(page).
*/
-static inline int page_is_buddy(struct page *page, int order)
+static inline int page_is_buddy(struct page *page, struct page *buddy,
+ int order)
{
#ifdef CONFIG_HOLES_IN_ZONE
- if (!pfn_valid(page_to_pfn(page)))
+ if (!pfn_valid(page_to_pfn(buddy)))
return 0;
#endif
- if (PageBuddy(page) && page_order(page) == order) {
- BUG_ON(page_count(page) != 0);
+ if (page_zone_id(page) != page_zone_id(buddy))
+ return 0;
+
+ if (PageBuddy(buddy) && page_order(buddy) == order) {
+ BUG_ON(page_count(buddy) != 0);
return 1;
}
return 0;
@@ -352,7 +358,7 @@ static inline void __free_one_page(struct page *page,
struct page *buddy;
buddy = __page_find_buddy(page, page_idx, order);
- if (!page_is_buddy(buddy, order))
+ if (!page_is_buddy(page, buddy, order))
break; /* Move the buddy up one level. */
list_del(&buddy->lru);
@@ -1485,7 +1491,7 @@ void show_free_areas(void)
}
for_each_zone(zone) {
- unsigned long nr, flags, order, total = 0;
+ unsigned long nr[MAX_ORDER], flags, order, total = 0;
show_node(zone);
printk("%s: ", zone->name);
@@ -1496,11 +1502,12 @@ void show_free_areas(void)
spin_lock_irqsave(&zone->lock, flags);
for (order = 0; order < MAX_ORDER; order++) {
- nr = zone->free_area[order].nr_free;
- total += nr << order;
- printk("%lu*%lukB ", nr, K(1UL) << order);
+ nr[order] = zone->free_area[order].nr_free;
+ total += nr[order] << order;
}
spin_unlock_irqrestore(&zone->lock, flags);
+ for (order = 0; order < MAX_ORDER; order++)
+ printk("%lu*%lukB ", nr[order], K(1UL) << order);
printk("= %lukB\n", K(total));
}
@@ -1512,7 +1519,7 @@ void show_free_areas(void)
*
* Add all populated zones of a node to the zonelist.
*/
-static int __init build_zonelists_node(pg_data_t *pgdat,
+static int __meminit build_zonelists_node(pg_data_t *pgdat,
struct zonelist *zonelist, int nr_zones, int zone_type)
{
struct zone *zone;
@@ -1548,7 +1555,7 @@ static inline int highest_zone(int zone_bits)
#ifdef CONFIG_NUMA
#define MAX_NODE_LOAD (num_online_nodes())
-static int __initdata node_load[MAX_NUMNODES];
+static int __meminitdata node_load[MAX_NUMNODES];
/**
* find_next_best_node - find the next node that should appear in a given node's fallback list
* @node: node whose fallback list we're appending
@@ -1563,7 +1570,7 @@ static int __initdata node_load[MAX_NUMNODES];
* on them otherwise.
* It returns -1 if no node is found.
*/
-static int __init find_next_best_node(int node, nodemask_t *used_node_mask)
+static int __meminit find_next_best_node(int node, nodemask_t *used_node_mask)
{
int n, val;
int min_val = INT_MAX;
@@ -1609,7 +1616,7 @@ static int __init find_next_best_node(int node, nodemask_t *used_node_mask)
return best_node;
}
-static void __init build_zonelists(pg_data_t *pgdat)
+static void __meminit build_zonelists(pg_data_t *pgdat)
{
int i, j, k, node, local_node;
int prev_node, load;
@@ -1661,7 +1668,7 @@ static void __init build_zonelists(pg_data_t *pgdat)
#else /* CONFIG_NUMA */
-static void __init build_zonelists(pg_data_t *pgdat)
+static void __meminit build_zonelists(pg_data_t *pgdat)
{
int i, j, k, node, local_node;
@@ -1699,14 +1706,29 @@ static void __init build_zonelists(pg_data_t *pgdat)
#endif /* CONFIG_NUMA */
-void __init build_all_zonelists(void)
+/* return values int ....just for stop_machine_run() */
+static int __meminit __build_all_zonelists(void *dummy)
{
- int i;
+ int nid;
+ for_each_online_node(nid)
+ build_zonelists(NODE_DATA(nid));
+ return 0;
+}
- for_each_online_node(i)
- build_zonelists(NODE_DATA(i));
- printk("Built %i zonelists\n", num_online_nodes());
- cpuset_init_current_mems_allowed();
+void __meminit build_all_zonelists(void)
+{
+ if (system_state == SYSTEM_BOOTING) {
+ __build_all_zonelists(0);
+ cpuset_init_current_mems_allowed();
+ } else {
+ /* we have to stop all cpus to guaranntee there is no user
+ of zonelist */
+ stop_machine_run(__build_all_zonelists, NULL, NR_CPUS);
+ /* cpuset refresh routine should be here */
+ }
+ vm_total_pages = nr_free_pagecache_pages();
+ printk("Built %i zonelists. Total pages: %ld\n",
+ num_online_nodes(), vm_total_pages);
}
/*
@@ -1722,7 +1744,8 @@ void __init build_all_zonelists(void)
*/
#define PAGES_PER_WAITQUEUE 256
-static inline unsigned long wait_table_size(unsigned long pages)
+#ifndef CONFIG_MEMORY_HOTPLUG
+static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
{
unsigned long size = 1;
@@ -1740,6 +1763,29 @@ static inline unsigned long wait_table_size(unsigned long pages)
return max(size, 4UL);
}
+#else
+/*
+ * A zone's size might be changed by hot-add, so it is not possible to determine
+ * a suitable size for its wait_table. So we use the maximum size now.
+ *
+ * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie:
+ *
+ * i386 (preemption config) : 4096 x 16 = 64Kbyte.
+ * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
+ * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte.
+ *
+ * The maximum entries are prepared when a zone's memory is (512K + 256) pages
+ * or more by the traditional way. (See above). It equals:
+ *
+ * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte.
+ * ia64(16K page size) : = ( 8G + 4M)byte.
+ * powerpc (64K page size) : = (32G +16M)byte.
+ */
+static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
+{
+ return 4096UL;
+}
+#endif
/*
* This is an integer logarithm so that shifts can be used later
@@ -2005,23 +2051,46 @@ void __init setup_per_cpu_pageset(void)
#endif
static __meminit
-void zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
+int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
{
int i;
struct pglist_data *pgdat = zone->zone_pgdat;
+ size_t alloc_size;
/*
* The per-page waitqueue mechanism uses hashed waitqueues
* per zone.
*/
- zone->wait_table_size = wait_table_size(zone_size_pages);
- zone->wait_table_bits = wait_table_bits(zone->wait_table_size);
- zone->wait_table = (wait_queue_head_t *)
- alloc_bootmem_node(pgdat, zone->wait_table_size
- * sizeof(wait_queue_head_t));
+ zone->wait_table_hash_nr_entries =
+ wait_table_hash_nr_entries(zone_size_pages);
+ zone->wait_table_bits =
+ wait_table_bits(zone->wait_table_hash_nr_entries);
+ alloc_size = zone->wait_table_hash_nr_entries
+ * sizeof(wait_queue_head_t);
+
+ if (system_state == SYSTEM_BOOTING) {
+ zone->wait_table = (wait_queue_head_t *)
+ alloc_bootmem_node(pgdat, alloc_size);
+ } else {
+ /*
+ * This case means that a zone whose size was 0 gets new memory
+ * via memory hot-add.
+ * But it may be the case that a new node was hot-added. In
+ * this case vmalloc() will not be able to use this new node's
+ * memory - this wait_table must be initialized to use this new
+ * node itself as well.
+ * To use this new node's memory, further consideration will be
+ * necessary.
+ */
+ zone->wait_table = (wait_queue_head_t *)vmalloc(alloc_size);
+ }
+ if (!zone->wait_table)
+ return -ENOMEM;
- for(i = 0; i < zone->wait_table_size; ++i)
+ for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
init_waitqueue_head(zone->wait_table + i);
+
+ return 0;
}
static __meminit void zone_pcp_init(struct zone *zone)
@@ -2043,12 +2112,15 @@ static __meminit void zone_pcp_init(struct zone *zone)
zone->name, zone->present_pages, batch);
}
-static __meminit void init_currently_empty_zone(struct zone *zone,
- unsigned long zone_start_pfn, unsigned long size)
+__meminit int init_currently_empty_zone(struct zone *zone,
+ unsigned long zone_start_pfn,
+ unsigned long size)
{
struct pglist_data *pgdat = zone->zone_pgdat;
-
- zone_wait_table_init(zone, size);
+ int ret;
+ ret = zone_wait_table_init(zone, size);
+ if (ret)
+ return ret;
pgdat->nr_zones = zone_idx(zone) + 1;
zone->zone_start_pfn = zone_start_pfn;
@@ -2056,6 +2128,8 @@ static __meminit void init_currently_empty_zone(struct zone *zone,
memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn);
zone_init_free_lists(pgdat, zone, zone->spanned_pages);
+
+ return 0;
}
/*
@@ -2064,12 +2138,13 @@ static __meminit void init_currently_empty_zone(struct zone *zone,
* - mark all memory queues empty
* - clear the memory bitmaps
*/
-static void __init free_area_init_core(struct pglist_data *pgdat,
+static void __meminit free_area_init_core(struct pglist_data *pgdat,
unsigned long *zones_size, unsigned long *zholes_size)
{
unsigned long j;
int nid = pgdat->node_id;
unsigned long zone_start_pfn = pgdat->node_start_pfn;
+ int ret;
pgdat_resize_init(pgdat);
pgdat->nr_zones = 0;
@@ -2111,7 +2186,8 @@ static void __init free_area_init_core(struct pglist_data *pgdat,
continue;
zonetable_add(zone, nid, j, zone_start_pfn, size);
- init_currently_empty_zone(zone, zone_start_pfn, size);
+ ret = init_currently_empty_zone(zone, zone_start_pfn, size);
+ BUG_ON(ret);
zone_start_pfn += size;
}
}
@@ -2152,7 +2228,7 @@ static void __init alloc_node_mem_map(struct pglist_data *pgdat)
#endif /* CONFIG_FLAT_NODE_MEM_MAP */
}
-void __init free_area_init_node(int nid, struct pglist_data *pgdat,
+void __meminit free_area_init_node(int nid, struct pglist_data *pgdat,
unsigned long *zones_size, unsigned long node_start_pfn,
unsigned long *zholes_size)
{
@@ -2804,42 +2880,14 @@ void *__init alloc_large_system_hash(const char *tablename,
}
#ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE
-/*
- * pfn <-> page translation. out-of-line version.
- * (see asm-generic/memory_model.h)
- */
-#if defined(CONFIG_FLATMEM)
struct page *pfn_to_page(unsigned long pfn)
{
- return mem_map + (pfn - ARCH_PFN_OFFSET);
+ return __pfn_to_page(pfn);
}
unsigned long page_to_pfn(struct page *page)
{
- return (page - mem_map) + ARCH_PFN_OFFSET;
-}
-#elif defined(CONFIG_DISCONTIGMEM)
-struct page *pfn_to_page(unsigned long pfn)
-{
- int nid = arch_pfn_to_nid(pfn);
- return NODE_DATA(nid)->node_mem_map + arch_local_page_offset(pfn,nid);
-}
-unsigned long page_to_pfn(struct page *page)
-{
- struct pglist_data *pgdat = NODE_DATA(page_to_nid(page));
- return (page - pgdat->node_mem_map) + pgdat->node_start_pfn;
-}
-#elif defined(CONFIG_SPARSEMEM)
-struct page *pfn_to_page(unsigned long pfn)
-{
- return __section_mem_map_addr(__pfn_to_section(pfn)) + pfn;
-}
-
-unsigned long page_to_pfn(struct page *page)
-{
- long section_id = page_to_section(page);
- return page - __section_mem_map_addr(__nr_to_section(section_id));
+ return __page_to_pfn(page);
}
-#endif /* CONFIG_FLATMEM/DISCONTIGMME/SPARSEMEM */
EXPORT_SYMBOL(pfn_to_page);
EXPORT_SYMBOL(page_to_pfn);
#endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */
diff --git a/mm/pdflush.c b/mm/pdflush.c
index c4b6d0afd73..df7e50b8f70 100644
--- a/mm/pdflush.c
+++ b/mm/pdflush.c
@@ -202,8 +202,7 @@ int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0)
unsigned long flags;
int ret = 0;
- if (fn == NULL)
- BUG(); /* Hard to diagnose if it's deferred */
+ BUG_ON(fn == NULL); /* Hard to diagnose if it's deferred */
spin_lock_irqsave(&pdflush_lock, flags);
if (list_empty(&pdflush_list)) {
diff --git a/mm/rmap.c b/mm/rmap.c
index 1963e269314..882a85826bb 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -103,7 +103,7 @@ int anon_vma_prepare(struct vm_area_struct *vma)
spin_lock(&mm->page_table_lock);
if (likely(!vma->anon_vma)) {
vma->anon_vma = anon_vma;
- list_add(&vma->anon_vma_node, &anon_vma->head);
+ list_add_tail(&vma->anon_vma_node, &anon_vma->head);
allocated = NULL;
}
spin_unlock(&mm->page_table_lock);
@@ -127,7 +127,7 @@ void __anon_vma_link(struct vm_area_struct *vma)
struct anon_vma *anon_vma = vma->anon_vma;
if (anon_vma) {
- list_add(&vma->anon_vma_node, &anon_vma->head);
+ list_add_tail(&vma->anon_vma_node, &anon_vma->head);
validate_anon_vma(vma);
}
}
@@ -138,7 +138,7 @@ void anon_vma_link(struct vm_area_struct *vma)
if (anon_vma) {
spin_lock(&anon_vma->lock);
- list_add(&vma->anon_vma_node, &anon_vma->head);
+ list_add_tail(&vma->anon_vma_node, &anon_vma->head);
validate_anon_vma(vma);
spin_unlock(&anon_vma->lock);
}
@@ -205,44 +205,6 @@ out:
return anon_vma;
}
-#ifdef CONFIG_MIGRATION
-/*
- * Remove an anonymous page from swap replacing the swap pte's
- * through real pte's pointing to valid pages and then releasing
- * the page from the swap cache.
- *
- * Must hold page lock on page and mmap_sem of one vma that contains
- * the page.
- */
-void remove_from_swap(struct page *page)
-{
- struct anon_vma *anon_vma;
- struct vm_area_struct *vma;
- unsigned long mapping;
-
- if (!PageSwapCache(page))
- return;
-
- mapping = (unsigned long)page->mapping;
-
- if (!mapping || (mapping & PAGE_MAPPING_ANON) == 0)
- return;
-
- /*
- * We hold the mmap_sem lock. So no need to call page_lock_anon_vma.
- */
- anon_vma = (struct anon_vma *) (mapping - PAGE_MAPPING_ANON);
- spin_lock(&anon_vma->lock);
-
- list_for_each_entry(vma, &anon_vma->head, anon_vma_node)
- remove_vma_swap(vma, page);
-
- spin_unlock(&anon_vma->lock);
- delete_from_swap_cache(page);
-}
-EXPORT_SYMBOL(remove_from_swap);
-#endif
-
/*
* At what user virtual address is page expected in vma?
*/
@@ -578,7 +540,7 @@ void page_remove_rmap(struct page *page)
* repeatedly from either try_to_unmap_anon or try_to_unmap_file.
*/
static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
- int ignore_refs)
+ int migration)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long address;
@@ -602,7 +564,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
*/
if ((vma->vm_flags & VM_LOCKED) ||
(ptep_clear_flush_young(vma, address, pte)
- && !ignore_refs)) {
+ && !migration)) {
ret = SWAP_FAIL;
goto out_unmap;
}
@@ -620,24 +582,45 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
if (PageAnon(page)) {
swp_entry_t entry = { .val = page_private(page) };
- /*
- * Store the swap location in the pte.
- * See handle_pte_fault() ...
- */
- BUG_ON(!PageSwapCache(page));
- swap_duplicate(entry);
- if (list_empty(&mm->mmlist)) {
- spin_lock(&mmlist_lock);
- if (list_empty(&mm->mmlist))
- list_add(&mm->mmlist, &init_mm.mmlist);
- spin_unlock(&mmlist_lock);
+
+ if (PageSwapCache(page)) {
+ /*
+ * Store the swap location in the pte.
+ * See handle_pte_fault() ...
+ */
+ swap_duplicate(entry);
+ if (list_empty(&mm->mmlist)) {
+ spin_lock(&mmlist_lock);
+ if (list_empty(&mm->mmlist))
+ list_add(&mm->mmlist, &init_mm.mmlist);
+ spin_unlock(&mmlist_lock);
+ }
+ dec_mm_counter(mm, anon_rss);
+#ifdef CONFIG_MIGRATION
+ } else {
+ /*
+ * Store the pfn of the page in a special migration
+ * pte. do_swap_page() will wait until the migration
+ * pte is removed and then restart fault handling.
+ */
+ BUG_ON(!migration);
+ entry = make_migration_entry(page, pte_write(pteval));
+#endif
}
set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
BUG_ON(pte_file(*pte));
- dec_mm_counter(mm, anon_rss);
} else
+#ifdef CONFIG_MIGRATION
+ if (migration) {
+ /* Establish migration entry for a file page */
+ swp_entry_t entry;
+ entry = make_migration_entry(page, pte_write(pteval));
+ set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
+ } else
+#endif
dec_mm_counter(mm, file_rss);
+
page_remove_rmap(page);
page_cache_release(page);
@@ -736,7 +719,7 @@ static void try_to_unmap_cluster(unsigned long cursor,
pte_unmap_unlock(pte - 1, ptl);
}
-static int try_to_unmap_anon(struct page *page, int ignore_refs)
+static int try_to_unmap_anon(struct page *page, int migration)
{
struct anon_vma *anon_vma;
struct vm_area_struct *vma;
@@ -747,7 +730,7 @@ static int try_to_unmap_anon(struct page *page, int ignore_refs)
return ret;
list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
- ret = try_to_unmap_one(page, vma, ignore_refs);
+ ret = try_to_unmap_one(page, vma, migration);
if (ret == SWAP_FAIL || !page_mapped(page))
break;
}
@@ -764,7 +747,7 @@ static int try_to_unmap_anon(struct page *page, int ignore_refs)
*
* This function is only called from try_to_unmap for object-based pages.
*/
-static int try_to_unmap_file(struct page *page, int ignore_refs)
+static int try_to_unmap_file(struct page *page, int migration)
{
struct address_space *mapping = page->mapping;
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
@@ -778,7 +761,7 @@ static int try_to_unmap_file(struct page *page, int ignore_refs)
spin_lock(&mapping->i_mmap_lock);
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
- ret = try_to_unmap_one(page, vma, ignore_refs);
+ ret = try_to_unmap_one(page, vma, migration);
if (ret == SWAP_FAIL || !page_mapped(page))
goto out;
}
@@ -863,16 +846,16 @@ out:
* SWAP_AGAIN - we missed a mapping, try again later
* SWAP_FAIL - the page is unswappable
*/
-int try_to_unmap(struct page *page, int ignore_refs)
+int try_to_unmap(struct page *page, int migration)
{
int ret;
BUG_ON(!PageLocked(page));
if (PageAnon(page))
- ret = try_to_unmap_anon(page, ignore_refs);
+ ret = try_to_unmap_anon(page, migration);
else
- ret = try_to_unmap_file(page, ignore_refs);
+ ret = try_to_unmap_file(page, migration);
if (!page_mapped(page))
ret = SWAP_SUCCESS;
diff --git a/mm/shmem.c b/mm/shmem.c
index 1e43c8a865b..84b5cf9b63c 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1081,14 +1081,6 @@ repeat:
page_cache_release(swappage);
goto repeat;
}
- if (!PageSwapCache(swappage)) {
- /* Page migration has occured */
- shmem_swp_unmap(entry);
- spin_unlock(&info->lock);
- unlock_page(swappage);
- page_cache_release(swappage);
- goto repeat;
- }
if (PageWriteback(swappage)) {
shmem_swp_unmap(entry);
spin_unlock(&info->lock);
@@ -1654,9 +1646,9 @@ static ssize_t shmem_file_sendfile(struct file *in_file, loff_t *ppos,
return desc.error;
}
-static int shmem_statfs(struct super_block *sb, struct kstatfs *buf)
+static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
{
- struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
+ struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
buf->f_type = TMPFS_MAGIC;
buf->f_bsize = PAGE_CACHE_SIZE;
@@ -2233,10 +2225,10 @@ static struct vm_operations_struct shmem_vm_ops = {
};
-static struct super_block *shmem_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int shmem_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_nodev(fs_type, flags, data, shmem_fill_super);
+ return get_sb_nodev(fs_type, flags, data, shmem_fill_super, mnt);
}
static struct file_system_type tmpfs_fs_type = {
diff --git a/mm/slab.c b/mm/slab.c
index f1b644eb39d..98ac20bc0de 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -331,6 +331,8 @@ static __always_inline int index_of(const size_t size)
return 0;
}
+static int slab_early_init = 1;
+
#define INDEX_AC index_of(sizeof(struct arraycache_init))
#define INDEX_L3 index_of(sizeof(struct kmem_list3))
@@ -592,6 +594,7 @@ static inline struct kmem_cache *page_get_cache(struct page *page)
{
if (unlikely(PageCompound(page)))
page = (struct page *)page_private(page);
+ BUG_ON(!PageSlab(page));
return (struct kmem_cache *)page->lru.next;
}
@@ -604,6 +607,7 @@ static inline struct slab *page_get_slab(struct page *page)
{
if (unlikely(PageCompound(page)))
page = (struct page *)page_private(page);
+ BUG_ON(!PageSlab(page));
return (struct slab *)page->lru.prev;
}
@@ -1024,6 +1028,40 @@ static void drain_alien_cache(struct kmem_cache *cachep,
}
}
}
+
+static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
+{
+ struct slab *slabp = virt_to_slab(objp);
+ int nodeid = slabp->nodeid;
+ struct kmem_list3 *l3;
+ struct array_cache *alien = NULL;
+
+ /*
+ * Make sure we are not freeing a object from another node to the array
+ * cache on this cpu.
+ */
+ if (likely(slabp->nodeid == numa_node_id()))
+ return 0;
+
+ l3 = cachep->nodelists[numa_node_id()];
+ STATS_INC_NODEFREES(cachep);
+ if (l3->alien && l3->alien[nodeid]) {
+ alien = l3->alien[nodeid];
+ spin_lock(&alien->lock);
+ if (unlikely(alien->avail == alien->limit)) {
+ STATS_INC_ACOVERFLOW(cachep);
+ __drain_alien_cache(cachep, alien, nodeid);
+ }
+ alien->entry[alien->avail++] = objp;
+ spin_unlock(&alien->lock);
+ } else {
+ spin_lock(&(cachep->nodelists[nodeid])->list_lock);
+ free_block(cachep, &objp, 1, nodeid);
+ spin_unlock(&(cachep->nodelists[nodeid])->list_lock);
+ }
+ return 1;
+}
+
#else
#define drain_alien_cache(cachep, alien) do { } while (0)
@@ -1038,6 +1076,11 @@ static inline void free_alien_cache(struct array_cache **ac_ptr)
{
}
+static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
+{
+ return 0;
+}
+
#endif
static int cpuup_callback(struct notifier_block *nfb,
@@ -1335,6 +1378,8 @@ void __init kmem_cache_init(void)
NULL, NULL);
}
+ slab_early_init = 0;
+
while (sizes->cs_size != ULONG_MAX) {
/*
* For performance, all the general caches are L1 aligned.
@@ -1450,31 +1495,29 @@ __initcall(cpucache_init);
static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
struct page *page;
- void *addr;
+ int nr_pages;
int i;
- flags |= cachep->gfpflags;
#ifndef CONFIG_MMU
- /* nommu uses slab's for process anonymous memory allocations, so
- * requires __GFP_COMP to properly refcount higher order allocations"
+ /*
+ * Nommu uses slab's for process anonymous memory allocations, and thus
+ * requires __GFP_COMP to properly refcount higher order allocations
*/
- page = alloc_pages_node(nodeid, (flags | __GFP_COMP), cachep->gfporder);
-#else
- page = alloc_pages_node(nodeid, flags, cachep->gfporder);
+ flags |= __GFP_COMP;
#endif
+ flags |= cachep->gfpflags;
+
+ page = alloc_pages_node(nodeid, flags, cachep->gfporder);
if (!page)
return NULL;
- addr = page_address(page);
- i = (1 << cachep->gfporder);
+ nr_pages = (1 << cachep->gfporder);
if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
- atomic_add(i, &slab_reclaim_pages);
- add_page_state(nr_slab, i);
- while (i--) {
- __SetPageSlab(page);
- page++;
- }
- return addr;
+ atomic_add(nr_pages, &slab_reclaim_pages);
+ add_page_state(nr_slab, nr_pages);
+ for (i = 0; i < nr_pages; i++)
+ __SetPageSlab(page + i);
+ return page_address(page);
}
/*
@@ -1913,8 +1956,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
void (*dtor)(void*, struct kmem_cache *, unsigned long))
{
size_t left_over, slab_size, ralign;
- struct kmem_cache *cachep = NULL;
- struct list_head *p;
+ struct kmem_cache *cachep = NULL, *pc;
/*
* Sanity checks... these are all serious usage bugs.
@@ -1934,8 +1976,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
mutex_lock(&cache_chain_mutex);
- list_for_each(p, &cache_chain) {
- struct kmem_cache *pc = list_entry(p, struct kmem_cache, next);
+ list_for_each_entry(pc, &cache_chain, next) {
mm_segment_t old_fs = get_fs();
char tmp;
int res;
@@ -2069,8 +2110,12 @@ kmem_cache_create (const char *name, size_t size, size_t align,
#endif
#endif
- /* Determine if the slab management is 'on' or 'off' slab. */
- if (size >= (PAGE_SIZE >> 3))
+ /*
+ * Determine if the slab management is 'on' or 'off' slab.
+ * (bootstrapping cannot cope with offslab caches so don't do
+ * it too early on.)
+ */
+ if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init)
/*
* Size is large, assume best to place the slab management obj
* off-slab (should allow better packing of objs).
@@ -2460,23 +2505,28 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
slabp->inuse--;
}
-static void set_slab_attr(struct kmem_cache *cachep, struct slab *slabp,
- void *objp)
+/*
+ * Map pages beginning at addr to the given cache and slab. This is required
+ * for the slab allocator to be able to lookup the cache and slab of a
+ * virtual address for kfree, ksize, kmem_ptr_validate, and slab debugging.
+ */
+static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
+ void *addr)
{
- int i;
+ int nr_pages;
struct page *page;
- /* Nasty!!!!!! I hope this is OK. */
- page = virt_to_page(objp);
+ page = virt_to_page(addr);
- i = 1;
+ nr_pages = 1;
if (likely(!PageCompound(page)))
- i <<= cachep->gfporder;
+ nr_pages <<= cache->gfporder;
+
do {
- page_set_cache(page, cachep);
- page_set_slab(page, slabp);
+ page_set_cache(page, cache);
+ page_set_slab(page, slab);
page++;
- } while (--i);
+ } while (--nr_pages);
}
/*
@@ -2548,7 +2598,7 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
goto opps1;
slabp->nodeid = nodeid;
- set_slab_attr(cachep, slabp, objp);
+ slab_map_pages(cachep, slabp, objp);
cache_init_objs(cachep, slabp, ctor_flags);
@@ -2596,6 +2646,28 @@ static void kfree_debugcheck(const void *objp)
}
}
+static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
+{
+ unsigned long redzone1, redzone2;
+
+ redzone1 = *dbg_redzone1(cache, obj);
+ redzone2 = *dbg_redzone2(cache, obj);
+
+ /*
+ * Redzone is ok.
+ */
+ if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
+ return;
+
+ if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
+ slab_error(cache, "double free detected");
+ else
+ slab_error(cache, "memory outside object was overwritten");
+
+ printk(KERN_ERR "%p: redzone 1:0x%lx, redzone 2:0x%lx.\n",
+ obj, redzone1, redzone2);
+}
+
static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
void *caller)
{
@@ -2607,27 +2679,10 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
kfree_debugcheck(objp);
page = virt_to_page(objp);
- if (page_get_cache(page) != cachep) {
- printk(KERN_ERR "mismatch in kmem_cache_free: expected "
- "cache %p, got %p\n",
- page_get_cache(page), cachep);
- printk(KERN_ERR "%p is %s.\n", cachep, cachep->name);
- printk(KERN_ERR "%p is %s.\n", page_get_cache(page),
- page_get_cache(page)->name);
- WARN_ON(1);
- }
slabp = page_get_slab(page);
if (cachep->flags & SLAB_RED_ZONE) {
- if (*dbg_redzone1(cachep, objp) != RED_ACTIVE ||
- *dbg_redzone2(cachep, objp) != RED_ACTIVE) {
- slab_error(cachep, "double free, or memory outside"
- " object was overwritten");
- printk(KERN_ERR "%p: redzone 1:0x%lx, "
- "redzone 2:0x%lx.\n",
- objp, *dbg_redzone1(cachep, objp),
- *dbg_redzone2(cachep, objp));
- }
+ verify_redzone_free(cachep, objp);
*dbg_redzone1(cachep, objp) = RED_INACTIVE;
*dbg_redzone2(cachep, objp) = RED_INACTIVE;
}
@@ -3087,41 +3142,9 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
check_irq_off();
objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
- /* Make sure we are not freeing a object from another
- * node to the array cache on this cpu.
- */
-#ifdef CONFIG_NUMA
- {
- struct slab *slabp;
- slabp = virt_to_slab(objp);
- if (unlikely(slabp->nodeid != numa_node_id())) {
- struct array_cache *alien = NULL;
- int nodeid = slabp->nodeid;
- struct kmem_list3 *l3;
-
- l3 = cachep->nodelists[numa_node_id()];
- STATS_INC_NODEFREES(cachep);
- if (l3->alien && l3->alien[nodeid]) {
- alien = l3->alien[nodeid];
- spin_lock(&alien->lock);
- if (unlikely(alien->avail == alien->limit)) {
- STATS_INC_ACOVERFLOW(cachep);
- __drain_alien_cache(cachep,
- alien, nodeid);
- }
- alien->entry[alien->avail++] = objp;
- spin_unlock(&alien->lock);
- } else {
- spin_lock(&(cachep->nodelists[nodeid])->
- list_lock);
- free_block(cachep, &objp, 1, nodeid);
- spin_unlock(&(cachep->nodelists[nodeid])->
- list_lock);
- }
- return;
- }
- }
-#endif
+ if (cache_free_alien(cachep, objp))
+ return;
+
if (likely(ac->avail < ac->limit)) {
STATS_INC_FREEHIT(cachep);
ac->entry[ac->avail++] = objp;
@@ -3254,26 +3277,10 @@ EXPORT_SYMBOL(kmalloc_node);
#endif
/**
- * kmalloc - allocate memory
+ * __do_kmalloc - allocate memory
* @size: how many bytes of memory are required.
- * @flags: the type of memory to allocate.
+ * @flags: the type of memory to allocate (see kmalloc).
* @caller: function caller for debug tracking of the caller
- *
- * kmalloc is the normal method of allocating memory
- * in the kernel.
- *
- * The @flags argument may be one of:
- *
- * %GFP_USER - Allocate memory on behalf of user. May sleep.
- *
- * %GFP_KERNEL - Allocate normal kernel ram. May sleep.
- *
- * %GFP_ATOMIC - Allocation will not sleep. Use inside interrupt handlers.
- *
- * Additionally, the %GFP_DMA flag may be set to indicate the memory
- * must be suitable for DMA. This can mean different things on different
- * platforms. For example, on i386, it means that the memory must come
- * from the first 16MB.
*/
static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
void *caller)
@@ -3371,6 +3378,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
{
unsigned long flags;
+ BUG_ON(virt_to_cache(objp) != cachep);
+
local_irq_save(flags);
__cache_free(cachep, objp);
local_irq_restore(flags);
@@ -3680,7 +3689,7 @@ void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
*/
static void cache_reap(void *unused)
{
- struct list_head *walk;
+ struct kmem_cache *searchp;
struct kmem_list3 *l3;
int node = numa_node_id();
@@ -3691,13 +3700,11 @@ static void cache_reap(void *unused)
return;
}
- list_for_each(walk, &cache_chain) {
- struct kmem_cache *searchp;
+ list_for_each_entry(searchp, &cache_chain, next) {
struct list_head *p;
int tofree;
struct slab *slabp;
- searchp = list_entry(walk, struct kmem_cache, next);
check_irq_on();
/*
@@ -3825,7 +3832,6 @@ static void s_stop(struct seq_file *m, void *p)
static int s_show(struct seq_file *m, void *p)
{
struct kmem_cache *cachep = p;
- struct list_head *q;
struct slab *slabp;
unsigned long active_objs;
unsigned long num_objs;
@@ -3846,15 +3852,13 @@ static int s_show(struct seq_file *m, void *p)
check_irq_on();
spin_lock_irq(&l3->list_lock);
- list_for_each(q, &l3->slabs_full) {
- slabp = list_entry(q, struct slab, list);
+ list_for_each_entry(slabp, &l3->slabs_full, list) {
if (slabp->inuse != cachep->num && !error)
error = "slabs_full accounting error";
active_objs += cachep->num;
active_slabs++;
}
- list_for_each(q, &l3->slabs_partial) {
- slabp = list_entry(q, struct slab, list);
+ list_for_each_entry(slabp, &l3->slabs_partial, list) {
if (slabp->inuse == cachep->num && !error)
error = "slabs_partial inuse accounting error";
if (!slabp->inuse && !error)
@@ -3862,8 +3866,7 @@ static int s_show(struct seq_file *m, void *p)
active_objs += slabp->inuse;
active_slabs++;
}
- list_for_each(q, &l3->slabs_free) {
- slabp = list_entry(q, struct slab, list);
+ list_for_each_entry(slabp, &l3->slabs_free, list) {
if (slabp->inuse && !error)
error = "slabs_free/inuse accounting error";
num_slabs++;
@@ -3956,7 +3959,7 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer,
{
char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
int limit, batchcount, shared, res;
- struct list_head *p;
+ struct kmem_cache *cachep;
if (count > MAX_SLABINFO_WRITE)
return -EINVAL;
@@ -3975,10 +3978,7 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer,
/* Find the cache in the chain of caches. */
mutex_lock(&cache_chain_mutex);
res = -EINVAL;
- list_for_each(p, &cache_chain) {
- struct kmem_cache *cachep;
-
- cachep = list_entry(p, struct kmem_cache, next);
+ list_for_each_entry(cachep, &cache_chain, next) {
if (!strcmp(cachep->name, kbuf)) {
if (limit < 1 || batchcount < 1 ||
batchcount > limit || shared < 0) {
@@ -4080,7 +4080,6 @@ static void show_symbol(struct seq_file *m, unsigned long address)
static int leaks_show(struct seq_file *m, void *p)
{
struct kmem_cache *cachep = p;
- struct list_head *q;
struct slab *slabp;
struct kmem_list3 *l3;
const char *name;
@@ -4105,14 +4104,10 @@ static int leaks_show(struct seq_file *m, void *p)
check_irq_on();
spin_lock_irq(&l3->list_lock);
- list_for_each(q, &l3->slabs_full) {
- slabp = list_entry(q, struct slab, list);
+ list_for_each_entry(slabp, &l3->slabs_full, list)
handle_slab(n, cachep, slabp);
- }
- list_for_each(q, &l3->slabs_partial) {
- slabp = list_entry(q, struct slab, list);
+ list_for_each_entry(slabp, &l3->slabs_partial, list)
handle_slab(n, cachep, slabp);
- }
spin_unlock_irq(&l3->list_lock);
}
name = cachep->name;
diff --git a/mm/sparse.c b/mm/sparse.c
index 100040c0dfb..e0a3fe48aa3 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -99,6 +99,22 @@ int __section_nr(struct mem_section* ms)
return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
}
+/*
+ * During early boot, before section_mem_map is used for an actual
+ * mem_map, we use section_mem_map to store the section's NUMA
+ * node. This keeps us from having to use another data structure. The
+ * node information is cleared just before we store the real mem_map.
+ */
+static inline unsigned long sparse_encode_early_nid(int nid)
+{
+ return (nid << SECTION_NID_SHIFT);
+}
+
+static inline int sparse_early_nid(struct mem_section *section)
+{
+ return (section->section_mem_map >> SECTION_NID_SHIFT);
+}
+
/* Record a memory area against a node. */
void memory_present(int nid, unsigned long start, unsigned long end)
{
@@ -113,7 +129,8 @@ void memory_present(int nid, unsigned long start, unsigned long end)
ms = __nr_to_section(section);
if (!ms->section_mem_map)
- ms->section_mem_map = SECTION_MARKED_PRESENT;
+ ms->section_mem_map = sparse_encode_early_nid(nid) |
+ SECTION_MARKED_PRESENT;
}
}
@@ -164,6 +181,7 @@ static int sparse_init_one_section(struct mem_section *ms,
if (!valid_section(ms))
return -EINVAL;
+ ms->section_mem_map &= ~SECTION_MAP_MASK;
ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum);
return 1;
@@ -172,8 +190,8 @@ static int sparse_init_one_section(struct mem_section *ms,
static struct page *sparse_early_mem_map_alloc(unsigned long pnum)
{
struct page *map;
- int nid = early_pfn_to_nid(section_nr_to_pfn(pnum));
struct mem_section *ms = __nr_to_section(pnum);
+ int nid = sparse_early_nid(ms);
map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
if (map)
diff --git a/mm/swap.c b/mm/swap.c
index 88895c249bc..03ae2076f92 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -480,48 +480,6 @@ static int cpu_swap_callback(struct notifier_block *nfb,
#endif /* CONFIG_HOTPLUG_CPU */
#endif /* CONFIG_SMP */
-#ifdef CONFIG_SMP
-void percpu_counter_mod(struct percpu_counter *fbc, long amount)
-{
- long count;
- long *pcount;
- int cpu = get_cpu();
-
- pcount = per_cpu_ptr(fbc->counters, cpu);
- count = *pcount + amount;
- if (count >= FBC_BATCH || count <= -FBC_BATCH) {
- spin_lock(&fbc->lock);
- fbc->count += count;
- *pcount = 0;
- spin_unlock(&fbc->lock);
- } else {
- *pcount = count;
- }
- put_cpu();
-}
-EXPORT_SYMBOL(percpu_counter_mod);
-
-/*
- * Add up all the per-cpu counts, return the result. This is a more accurate
- * but much slower version of percpu_counter_read_positive()
- */
-long percpu_counter_sum(struct percpu_counter *fbc)
-{
- long ret;
- int cpu;
-
- spin_lock(&fbc->lock);
- ret = fbc->count;
- for_each_possible_cpu(cpu) {
- long *pcount = per_cpu_ptr(fbc->counters, cpu);
- ret += *pcount;
- }
- spin_unlock(&fbc->lock);
- return ret < 0 ? 0 : ret;
-}
-EXPORT_SYMBOL(percpu_counter_sum);
-#endif
-
/*
* Perform any setup for the swap system
*/
diff --git a/mm/swapfile.c b/mm/swapfile.c
index e5fd5385f0c..cc367f7e75d 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -395,6 +395,9 @@ void free_swap_and_cache(swp_entry_t entry)
struct swap_info_struct * p;
struct page *page = NULL;
+ if (is_migration_entry(entry))
+ return;
+
p = swap_info_get(entry);
if (p) {
if (swap_entry_free(p, swp_offset(entry)) == 1) {
@@ -615,15 +618,6 @@ static int unuse_mm(struct mm_struct *mm,
return 0;
}
-#ifdef CONFIG_MIGRATION
-int remove_vma_swap(struct vm_area_struct *vma, struct page *page)
-{
- swp_entry_t entry = { .val = page_private(page) };
-
- return unuse_vma(vma, entry, page);
-}
-#endif
-
/*
* Scan swap_map from current position to next entry still in use.
* Recycle to start on reaching the end, returning 0 when empty.
@@ -716,7 +710,6 @@ static int try_to_unuse(unsigned int type)
*/
swap_map = &si->swap_map[i];
entry = swp_entry(type, i);
-again:
page = read_swap_cache_async(entry, NULL, 0);
if (!page) {
/*
@@ -751,12 +744,6 @@ again:
wait_on_page_locked(page);
wait_on_page_writeback(page);
lock_page(page);
- if (!PageSwapCache(page)) {
- /* Page migration has occured */
- unlock_page(page);
- page_cache_release(page);
- goto again;
- }
wait_on_page_writeback(page);
/*
@@ -785,10 +772,8 @@ again:
while (*swap_map > 1 && !retval &&
(p = p->next) != &start_mm->mmlist) {
mm = list_entry(p, struct mm_struct, mmlist);
- if (atomic_inc_return(&mm->mm_users) == 1) {
- atomic_dec(&mm->mm_users);
+ if (!atomic_inc_not_zero(&mm->mm_users))
continue;
- }
spin_unlock(&mmlist_lock);
mmput(prev_mm);
prev_mm = mm;
@@ -1407,19 +1392,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
if (!(p->flags & SWP_USED))
break;
error = -EPERM;
- /*
- * Test if adding another swap device is possible. There are
- * two limiting factors: 1) the number of bits for the swap
- * type swp_entry_t definition and 2) the number of bits for
- * the swap type in the swap ptes as defined by the different
- * architectures. To honor both limitations a swap entry
- * with swap offset 0 and swap type ~0UL is created, encoded
- * to a swap pte, decoded to a swp_entry_t again and finally
- * the swap type part is extracted. This will mask all bits
- * from the initial ~0UL that can't be encoded in either the
- * swp_entry_t or the architecture definition of a swap pte.
- */
- if (type > swp_type(pte_to_swp_entry(swp_entry_to_pte(swp_entry(~0UL,0))))) {
+ if (type >= MAX_SWAPFILES) {
spin_unlock(&swap_lock);
goto out;
}
@@ -1504,8 +1477,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
error = -EINVAL;
goto bad_swap;
}
- page = read_cache_page(mapping, 0,
- (filler_t *)mapping->a_ops->readpage, swap_file);
+ page = read_mapping_page(mapping, 0, swap_file);
if (IS_ERR(page)) {
error = PTR_ERR(page);
goto bad_swap;
@@ -1709,6 +1681,9 @@ int swap_duplicate(swp_entry_t entry)
unsigned long offset, type;
int result = 0;
+ if (is_migration_entry(entry))
+ return 1;
+
type = swp_type(entry);
if (type >= nr_swapfiles)
goto bad_file;
diff --git a/mm/truncate.c b/mm/truncate.c
index 6cb3fff25f6..cf1b015df4a 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -230,14 +230,24 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
+ pgoff_t index;
+ int lock_failed;
- if (TestSetPageLocked(page)) {
- next++;
- continue;
- }
- if (page->index > next)
- next = page->index;
+ lock_failed = TestSetPageLocked(page);
+
+ /*
+ * We really shouldn't be looking at the ->index of an
+ * unlocked page. But we're not allowed to lock these
+ * pages. So we rely upon nobody altering the ->index
+ * of this (pinned-by-us) page.
+ */
+ index = page->index;
+ if (index > next)
+ next = index;
next++;
+ if (lock_failed)
+ continue;
+
if (PageDirty(page) || PageWriteback(page))
goto unlock;
if (page_mapped(page))
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index c0504f1e34e..35f8553f893 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -257,6 +257,19 @@ struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, int
}
/* Caller must hold vmlist_lock */
+static struct vm_struct *__find_vm_area(void *addr)
+{
+ struct vm_struct *tmp;
+
+ for (tmp = vmlist; tmp != NULL; tmp = tmp->next) {
+ if (tmp->addr == addr)
+ break;
+ }
+
+ return tmp;
+}
+
+/* Caller must hold vmlist_lock */
struct vm_struct *__remove_vm_area(void *addr)
{
struct vm_struct **p, *tmp;
@@ -498,11 +511,33 @@ EXPORT_SYMBOL(__vmalloc);
*/
void *vmalloc(unsigned long size)
{
- return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
+ return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
}
EXPORT_SYMBOL(vmalloc);
/**
+ * vmalloc_user - allocate virtually contiguous memory which has
+ * been zeroed so it can be mapped to userspace without
+ * leaking data.
+ *
+ * @size: allocation size
+ */
+void *vmalloc_user(unsigned long size)
+{
+ struct vm_struct *area;
+ void *ret;
+
+ ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
+ write_lock(&vmlist_lock);
+ area = __find_vm_area(ret);
+ area->flags |= VM_USERMAP;
+ write_unlock(&vmlist_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(vmalloc_user);
+
+/**
* vmalloc_node - allocate memory on a specific node
*
* @size: allocation size
@@ -516,7 +551,7 @@ EXPORT_SYMBOL(vmalloc);
*/
void *vmalloc_node(unsigned long size, int node)
{
- return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node);
+ return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node);
}
EXPORT_SYMBOL(vmalloc_node);
@@ -556,6 +591,28 @@ void *vmalloc_32(unsigned long size)
}
EXPORT_SYMBOL(vmalloc_32);
+/**
+ * vmalloc_32_user - allocate virtually contiguous memory (32bit
+ * addressable) which is zeroed so it can be
+ * mapped to userspace without leaking data.
+ *
+ * @size: allocation size
+ */
+void *vmalloc_32_user(unsigned long size)
+{
+ struct vm_struct *area;
+ void *ret;
+
+ ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
+ write_lock(&vmlist_lock);
+ area = __find_vm_area(ret);
+ area->flags |= VM_USERMAP;
+ write_unlock(&vmlist_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(vmalloc_32_user);
+
long vread(char *buf, char *addr, unsigned long count)
{
struct vm_struct *tmp;
@@ -630,3 +687,64 @@ finished:
read_unlock(&vmlist_lock);
return buf - buf_start;
}
+
+/**
+ * remap_vmalloc_range - map vmalloc pages to userspace
+ *
+ * @vma: vma to cover (map full range of vma)
+ * @addr: vmalloc memory
+ * @pgoff: number of pages into addr before first page to map
+ * @returns: 0 for success, -Exxx on failure
+ *
+ * This function checks that addr is a valid vmalloc'ed area, and
+ * that it is big enough to cover the vma. Will return failure if
+ * that criteria isn't met.
+ *
+ * Similar to remap_pfn_range (see mm/memory.c)
+ */
+int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
+ unsigned long pgoff)
+{
+ struct vm_struct *area;
+ unsigned long uaddr = vma->vm_start;
+ unsigned long usize = vma->vm_end - vma->vm_start;
+ int ret;
+
+ if ((PAGE_SIZE-1) & (unsigned long)addr)
+ return -EINVAL;
+
+ read_lock(&vmlist_lock);
+ area = __find_vm_area(addr);
+ if (!area)
+ goto out_einval_locked;
+
+ if (!(area->flags & VM_USERMAP))
+ goto out_einval_locked;
+
+ if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE)
+ goto out_einval_locked;
+ read_unlock(&vmlist_lock);
+
+ addr += pgoff << PAGE_SHIFT;
+ do {
+ struct page *page = vmalloc_to_page(addr);
+ ret = vm_insert_page(vma, uaddr, page);
+ if (ret)
+ return ret;
+
+ uaddr += PAGE_SIZE;
+ addr += PAGE_SIZE;
+ usize -= PAGE_SIZE;
+ } while (usize > 0);
+
+ /* Prevent "things" like memory migration? VM_flags need a cleanup... */
+ vma->vm_flags |= VM_RESERVED;
+
+ return ret;
+
+out_einval_locked:
+ read_unlock(&vmlist_lock);
+ return -EINVAL;
+}
+EXPORT_SYMBOL(remap_vmalloc_range);
+
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 440a733fe2e..72babac71de 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -61,6 +61,8 @@ struct scan_control {
* In this context, it doesn't matter that we scan the
* whole list at once. */
int swap_cluster_max;
+
+ int swappiness;
};
/*
@@ -108,7 +110,7 @@ struct shrinker {
* From 0 .. 100. Higher means more swappy.
*/
int vm_swappiness = 60;
-static long total_memory;
+long vm_total_pages; /* The total number of pages which the VM controls */
static LIST_HEAD(shrinker_list);
static DECLARE_RWSEM(shrinker_rwsem);
@@ -288,11 +290,23 @@ static void handle_write_error(struct address_space *mapping,
unlock_page(page);
}
+/* possible outcome of pageout() */
+typedef enum {
+ /* failed to write page out, page is locked */
+ PAGE_KEEP,
+ /* move page to the active list, page is locked */
+ PAGE_ACTIVATE,
+ /* page has been sent to the disk successfully, page is unlocked */
+ PAGE_SUCCESS,
+ /* page is clean and locked */
+ PAGE_CLEAN,
+} pageout_t;
+
/*
* pageout is called by shrink_page_list() for each dirty page.
* Calls ->writepage().
*/
-pageout_t pageout(struct page *page, struct address_space *mapping)
+static pageout_t pageout(struct page *page, struct address_space *mapping)
{
/*
* If the page is dirty, only perform writeback if that write
@@ -337,6 +351,8 @@ pageout_t pageout(struct page *page, struct address_space *mapping)
struct writeback_control wbc = {
.sync_mode = WB_SYNC_NONE,
.nr_to_write = SWAP_CLUSTER_MAX,
+ .range_start = 0,
+ .range_end = LLONG_MAX,
.nonblocking = 1,
.for_reclaim = 1,
};
@@ -727,7 +743,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
* how much memory
* is mapped.
*/
- mapped_ratio = (sc->nr_mapped * 100) / total_memory;
+ mapped_ratio = (sc->nr_mapped * 100) / vm_total_pages;
/*
* Now decide how much we really want to unmap some pages. The
@@ -741,7 +757,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
* A 100% value of vm_swappiness overrides this algorithm
* altogether.
*/
- swap_tendency = mapped_ratio / 2 + distress + vm_swappiness;
+ swap_tendency = mapped_ratio / 2 + distress + sc->swappiness;
/*
* Now use this metric to decide whether to start moving mapped
@@ -957,6 +973,7 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
.may_writepage = !laptop_mode,
.swap_cluster_max = SWAP_CLUSTER_MAX,
.may_swap = 1,
+ .swappiness = vm_swappiness,
};
inc_page_state(allocstall);
@@ -1021,10 +1038,6 @@ out:
* For kswapd, balance_pgdat() will work across all this node's zones until
* they are all at pages_high.
*
- * If `nr_pages' is non-zero then it is the number of pages which are to be
- * reclaimed, regardless of the zone occupancies. This is a software suspend
- * special.
- *
* Returns the number of pages which were actually freed.
*
* There is special handling here for zones which are full of pinned pages.
@@ -1042,10 +1055,8 @@ out:
* the page allocator fallback scheme to ensure that aging of pages is balanced
* across the zones.
*/
-static unsigned long balance_pgdat(pg_data_t *pgdat, unsigned long nr_pages,
- int order)
+static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
{
- unsigned long to_free = nr_pages;
int all_zones_ok;
int priority;
int i;
@@ -1055,7 +1066,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, unsigned long nr_pages,
struct scan_control sc = {
.gfp_mask = GFP_KERNEL,
.may_swap = 1,
- .swap_cluster_max = nr_pages ? nr_pages : SWAP_CLUSTER_MAX,
+ .swap_cluster_max = SWAP_CLUSTER_MAX,
+ .swappiness = vm_swappiness,
};
loop_again:
@@ -1082,31 +1094,26 @@ loop_again:
all_zones_ok = 1;
- if (nr_pages == 0) {
- /*
- * Scan in the highmem->dma direction for the highest
- * zone which needs scanning
- */
- for (i = pgdat->nr_zones - 1; i >= 0; i--) {
- struct zone *zone = pgdat->node_zones + i;
+ /*
+ * Scan in the highmem->dma direction for the highest
+ * zone which needs scanning
+ */
+ for (i = pgdat->nr_zones - 1; i >= 0; i--) {
+ struct zone *zone = pgdat->node_zones + i;
- if (!populated_zone(zone))
- continue;
+ if (!populated_zone(zone))
+ continue;
- if (zone->all_unreclaimable &&
- priority != DEF_PRIORITY)
- continue;
+ if (zone->all_unreclaimable && priority != DEF_PRIORITY)
+ continue;
- if (!zone_watermark_ok(zone, order,
- zone->pages_high, 0, 0)) {
- end_zone = i;
- goto scan;
- }
+ if (!zone_watermark_ok(zone, order, zone->pages_high,
+ 0, 0)) {
+ end_zone = i;
+ goto scan;
}
- goto out;
- } else {
- end_zone = pgdat->nr_zones - 1;
}
+ goto out;
scan:
for (i = 0; i <= end_zone; i++) {
struct zone *zone = pgdat->node_zones + i;
@@ -1133,11 +1140,9 @@ scan:
if (zone->all_unreclaimable && priority != DEF_PRIORITY)
continue;
- if (nr_pages == 0) { /* Not software suspend */
- if (!zone_watermark_ok(zone, order,
- zone->pages_high, end_zone, 0))
- all_zones_ok = 0;
- }
+ if (!zone_watermark_ok(zone, order, zone->pages_high,
+ end_zone, 0))
+ all_zones_ok = 0;
zone->temp_priority = priority;
if (zone->prev_priority > priority)
zone->prev_priority = priority;
@@ -1162,8 +1167,6 @@ scan:
total_scanned > nr_reclaimed + nr_reclaimed / 2)
sc.may_writepage = 1;
}
- if (nr_pages && to_free > nr_reclaimed)
- continue; /* swsusp: need to do more work */
if (all_zones_ok)
break; /* kswapd: all done */
/*
@@ -1179,7 +1182,7 @@ scan:
* matches the direct reclaim path behaviour in terms of impact
* on zone->*_priority.
*/
- if ((nr_reclaimed >= SWAP_CLUSTER_MAX) && !nr_pages)
+ if (nr_reclaimed >= SWAP_CLUSTER_MAX)
break;
}
out:
@@ -1261,7 +1264,7 @@ static int kswapd(void *p)
}
finish_wait(&pgdat->kswapd_wait, &wait);
- balance_pgdat(pgdat, 0, order);
+ balance_pgdat(pgdat, order);
}
return 0;
}
@@ -1290,35 +1293,154 @@ void wakeup_kswapd(struct zone *zone, int order)
#ifdef CONFIG_PM
/*
- * Try to free `nr_pages' of memory, system-wide. Returns the number of freed
- * pages.
+ * Helper function for shrink_all_memory(). Tries to reclaim 'nr_pages' pages
+ * from LRU lists system-wide, for given pass and priority, and returns the
+ * number of reclaimed pages
+ *
+ * For pass > 3 we also try to shrink the LRU lists that contain a few pages
+ */
+static unsigned long shrink_all_zones(unsigned long nr_pages, int pass,
+ int prio, struct scan_control *sc)
+{
+ struct zone *zone;
+ unsigned long nr_to_scan, ret = 0;
+
+ for_each_zone(zone) {
+
+ if (!populated_zone(zone))
+ continue;
+
+ if (zone->all_unreclaimable && prio != DEF_PRIORITY)
+ continue;
+
+ /* For pass = 0 we don't shrink the active list */
+ if (pass > 0) {
+ zone->nr_scan_active += (zone->nr_active >> prio) + 1;
+ if (zone->nr_scan_active >= nr_pages || pass > 3) {
+ zone->nr_scan_active = 0;
+ nr_to_scan = min(nr_pages, zone->nr_active);
+ shrink_active_list(nr_to_scan, zone, sc);
+ }
+ }
+
+ zone->nr_scan_inactive += (zone->nr_inactive >> prio) + 1;
+ if (zone->nr_scan_inactive >= nr_pages || pass > 3) {
+ zone->nr_scan_inactive = 0;
+ nr_to_scan = min(nr_pages, zone->nr_inactive);
+ ret += shrink_inactive_list(nr_to_scan, zone, sc);
+ if (ret >= nr_pages)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * Try to free `nr_pages' of memory, system-wide, and return the number of
+ * freed pages.
+ *
+ * Rather than trying to age LRUs the aim is to preserve the overall
+ * LRU order by reclaiming preferentially
+ * inactive > active > active referenced > active mapped
*/
unsigned long shrink_all_memory(unsigned long nr_pages)
{
- pg_data_t *pgdat;
- unsigned long nr_to_free = nr_pages;
+ unsigned long lru_pages, nr_slab;
unsigned long ret = 0;
- unsigned retry = 2;
- struct reclaim_state reclaim_state = {
- .reclaimed_slab = 0,
+ int pass;
+ struct reclaim_state reclaim_state;
+ struct zone *zone;
+ struct scan_control sc = {
+ .gfp_mask = GFP_KERNEL,
+ .may_swap = 0,
+ .swap_cluster_max = nr_pages,
+ .may_writepage = 1,
+ .swappiness = vm_swappiness,
};
current->reclaim_state = &reclaim_state;
-repeat:
- for_each_online_pgdat(pgdat) {
- unsigned long freed;
- freed = balance_pgdat(pgdat, nr_to_free, 0);
- ret += freed;
- nr_to_free -= freed;
- if ((long)nr_to_free <= 0)
+ lru_pages = 0;
+ for_each_zone(zone)
+ lru_pages += zone->nr_active + zone->nr_inactive;
+
+ nr_slab = read_page_state(nr_slab);
+ /* If slab caches are huge, it's better to hit them first */
+ while (nr_slab >= lru_pages) {
+ reclaim_state.reclaimed_slab = 0;
+ shrink_slab(nr_pages, sc.gfp_mask, lru_pages);
+ if (!reclaim_state.reclaimed_slab)
break;
+
+ ret += reclaim_state.reclaimed_slab;
+ if (ret >= nr_pages)
+ goto out;
+
+ nr_slab -= reclaim_state.reclaimed_slab;
}
- if (retry-- && ret < nr_pages) {
- blk_congestion_wait(WRITE, HZ/5);
- goto repeat;
+
+ /*
+ * We try to shrink LRUs in 5 passes:
+ * 0 = Reclaim from inactive_list only
+ * 1 = Reclaim from active list but don't reclaim mapped
+ * 2 = 2nd pass of type 1
+ * 3 = Reclaim mapped (normal reclaim)
+ * 4 = 2nd pass of type 3
+ */
+ for (pass = 0; pass < 5; pass++) {
+ int prio;
+
+ /* Needed for shrinking slab caches later on */
+ if (!lru_pages)
+ for_each_zone(zone) {
+ lru_pages += zone->nr_active;
+ lru_pages += zone->nr_inactive;
+ }
+
+ /* Force reclaiming mapped pages in the passes #3 and #4 */
+ if (pass > 2) {
+ sc.may_swap = 1;
+ sc.swappiness = 100;
+ }
+
+ for (prio = DEF_PRIORITY; prio >= 0; prio--) {
+ unsigned long nr_to_scan = nr_pages - ret;
+
+ sc.nr_mapped = read_page_state(nr_mapped);
+ sc.nr_scanned = 0;
+
+ ret += shrink_all_zones(nr_to_scan, prio, pass, &sc);
+ if (ret >= nr_pages)
+ goto out;
+
+ reclaim_state.reclaimed_slab = 0;
+ shrink_slab(sc.nr_scanned, sc.gfp_mask, lru_pages);
+ ret += reclaim_state.reclaimed_slab;
+ if (ret >= nr_pages)
+ goto out;
+
+ if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
+ blk_congestion_wait(WRITE, HZ / 10);
+ }
+
+ lru_pages = 0;
}
+
+ /*
+ * If ret = 0, we could not shrink LRUs, but there may be something
+ * in slab caches
+ */
+ if (!ret)
+ do {
+ reclaim_state.reclaimed_slab = 0;
+ shrink_slab(nr_pages, sc.gfp_mask, lru_pages);
+ ret += reclaim_state.reclaimed_slab;
+ } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0);
+
+out:
current->reclaim_state = NULL;
+
return ret;
}
#endif
@@ -1360,7 +1482,6 @@ static int __init kswapd_init(void)
pgdat->kswapd = find_task_by_pid(pid);
read_unlock(&tasklist_lock);
}
- total_memory = nr_free_pagecache_pages();
hotcpu_notifier(cpu_callback, 0);
return 0;
}
@@ -1416,6 +1537,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
.swap_cluster_max = max_t(unsigned long, nr_pages,
SWAP_CLUSTER_MAX),
.gfp_mask = gfp_mask,
+ .swappiness = vm_swappiness,
};
disable_swap_token();
diff --git a/net/core/dev.c b/net/core/dev.c
index 9b8f0f22c81..ea2469398bd 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3097,7 +3097,7 @@ static void netdev_wait_allrefs(struct net_device *dev)
static DEFINE_MUTEX(net_todo_run_mutex);
void netdev_run_todo(void)
{
- struct list_head list = LIST_HEAD_INIT(list);
+ struct list_head list;
/* Need to guard against multiple cpu's getting out of order. */
mutex_lock(&net_todo_run_mutex);
@@ -3112,9 +3112,9 @@ void netdev_run_todo(void)
/* Snapshot list, allow later requests */
spin_lock(&net_todo_list_lock);
- list_splice_init(&net_todo_list, &list);
+ list_replace_init(&net_todo_list, &list);
spin_unlock(&net_todo_list_lock);
-
+
while (!list_empty(&list)) {
struct net_device *dev
= list_entry(list.next, struct net_device, todo_list);
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 646937cc2d8..0f37266411b 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -91,11 +91,10 @@ static void rfc2863_policy(struct net_device *dev)
/* Must be called with the rtnl semaphore held */
void linkwatch_run_queue(void)
{
- LIST_HEAD(head);
- struct list_head *n, *next;
+ struct list_head head, *n, *next;
spin_lock_irq(&lweventlist_lock);
- list_splice_init(&lweventlist, &head);
+ list_replace_init(&lweventlist, &head);
spin_unlock_irq(&lweventlist_lock);
list_for_each_safe(n, next, &head) {
diff --git a/net/dccp/Kconfig b/net/dccp/Kconfig
index 7e096ba8454..859e3359fcd 100644
--- a/net/dccp/Kconfig
+++ b/net/dccp/Kconfig
@@ -26,7 +26,7 @@ config INET_DCCP_DIAG
config IP_DCCP_ACKVEC
depends on IP_DCCP
- def_bool N
+ bool
source "net/dccp/ccids/Kconfig"
diff --git a/net/socket.c b/net/socket.c
index 02948b622bd..565f5e8d119 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -335,10 +335,11 @@ static struct super_operations sockfs_ops = {
.statfs = simple_statfs,
};
-static struct super_block *sockfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int sockfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_pseudo(fs_type, "socket:", &sockfs_ops, SOCKFS_MAGIC);
+ return get_sb_pseudo(fs_type, "socket:", &sockfs_ops, SOCKFS_MAGIC,
+ mnt);
}
static struct vfsmount *sock_mnt __read_mostly;
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index cc673dd8433..8241fa72680 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -815,11 +815,11 @@ out:
return -ENOMEM;
}
-static struct super_block *
+static int
rpc_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_single(fs_type, flags, data, rpc_fill_super);
+ return get_sb_single(fs_type, flags, data, rpc_fill_super, mnt);
}
static struct file_system_type rpc_pipe_fs_type = {
diff --git a/security/dummy.c b/security/dummy.c
index 6de4a4a5eb1..c3c5493581e 100644
--- a/security/dummy.c
+++ b/security/dummy.c
@@ -191,7 +191,7 @@ static int dummy_sb_kern_mount (struct super_block *sb, void *data)
return 0;
}
-static int dummy_sb_statfs (struct super_block *sb)
+static int dummy_sb_statfs (struct dentry *dentry)
{
return 0;
}
@@ -516,6 +516,11 @@ static int dummy_task_setnice (struct task_struct *p, int nice)
return 0;
}
+static int dummy_task_setioprio (struct task_struct *p, int ioprio)
+{
+ return 0;
+}
+
static int dummy_task_setrlimit (unsigned int resource, struct rlimit *new_rlim)
{
return 0;
@@ -532,6 +537,11 @@ static int dummy_task_getscheduler (struct task_struct *p)
return 0;
}
+static int dummy_task_movememory (struct task_struct *p)
+{
+ return 0;
+}
+
static int dummy_task_wait (struct task_struct *p)
{
return 0;
@@ -972,9 +982,11 @@ void security_fixup_ops (struct security_operations *ops)
set_to_dummy_if_null(ops, task_getsid);
set_to_dummy_if_null(ops, task_setgroups);
set_to_dummy_if_null(ops, task_setnice);
+ set_to_dummy_if_null(ops, task_setioprio);
set_to_dummy_if_null(ops, task_setrlimit);
set_to_dummy_if_null(ops, task_setscheduler);
set_to_dummy_if_null(ops, task_getscheduler);
+ set_to_dummy_if_null(ops, task_movememory);
set_to_dummy_if_null(ops, task_wait);
set_to_dummy_if_null(ops, task_kill);
set_to_dummy_if_null(ops, task_prctl);
diff --git a/security/inode.c b/security/inode.c
index 0f77b022366..e6fc29ac856 100644
--- a/security/inode.c
+++ b/security/inode.c
@@ -135,11 +135,11 @@ static int fill_super(struct super_block *sb, void *data, int silent)
return simple_fill_super(sb, SECURITYFS_MAGIC, files);
}
-static struct super_block *get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data)
+static int get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name,
+ void *data, struct vfsmount *mnt)
{
- return get_sb_single(fs_type, flags, data, fill_super);
+ return get_sb_single(fs_type, flags, data, fill_super, mnt);
}
static struct file_system_type fs_type = {
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 524915dfda6..79c16e31c88 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -1903,13 +1903,13 @@ static int selinux_sb_kern_mount(struct super_block *sb, void *data)
return superblock_has_perm(current, sb, FILESYSTEM__MOUNT, &ad);
}
-static int selinux_sb_statfs(struct super_block *sb)
+static int selinux_sb_statfs(struct dentry *dentry)
{
struct avc_audit_data ad;
AVC_AUDIT_DATA_INIT(&ad,FS);
- ad.u.fs.dentry = sb->s_root;
- return superblock_has_perm(current, sb, FILESYSTEM__GETATTR, &ad);
+ ad.u.fs.dentry = dentry->d_sb->s_root;
+ return superblock_has_perm(current, dentry->d_sb, FILESYSTEM__GETATTR, &ad);
}
static int selinux_mount(char * dev_name,
@@ -2645,6 +2645,11 @@ static int selinux_task_setnice(struct task_struct *p, int nice)
return task_has_perm(current,p, PROCESS__SETSCHED);
}
+static int selinux_task_setioprio(struct task_struct *p, int ioprio)
+{
+ return task_has_perm(current, p, PROCESS__SETSCHED);
+}
+
static int selinux_task_setrlimit(unsigned int resource, struct rlimit *new_rlim)
{
struct rlimit *old_rlim = current->signal->rlim + resource;
@@ -2674,6 +2679,11 @@ static int selinux_task_getscheduler(struct task_struct *p)
return task_has_perm(current, p, PROCESS__GETSCHED);
}
+static int selinux_task_movememory(struct task_struct *p)
+{
+ return task_has_perm(current, p, PROCESS__SETSCHED);
+}
+
static int selinux_task_kill(struct task_struct *p, struct siginfo *info, int sig)
{
u32 perm;
@@ -4383,9 +4393,11 @@ static struct security_operations selinux_ops = {
.task_getsid = selinux_task_getsid,
.task_setgroups = selinux_task_setgroups,
.task_setnice = selinux_task_setnice,
+ .task_setioprio = selinux_task_setioprio,
.task_setrlimit = selinux_task_setrlimit,
.task_setscheduler = selinux_task_setscheduler,
.task_getscheduler = selinux_task_getscheduler,
+ .task_movememory = selinux_task_movememory,
.task_kill = selinux_task_kill,
.task_wait = selinux_task_wait,
.task_prctl = selinux_task_prctl,
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 2e73d3279f2..7029bbc9bef 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -1345,10 +1345,11 @@ err:
goto out;
}
-static struct super_block *sel_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int sel_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data,
+ struct vfsmount *mnt)
{
- return get_sb_single(fs_type, flags, data, sel_fill_super);
+ return get_sb_single(fs_type, flags, data, sel_fill_super, mnt);
}
static struct file_system_type sel_fs_type = {
diff --git a/sound/oss/au1550_ac97.c b/sound/oss/au1550_ac97.c
index c1168fae6be..9011abe241a 100644
--- a/sound/oss/au1550_ac97.c
+++ b/sound/oss/au1550_ac97.c
@@ -57,9 +57,9 @@
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/hardirq.h>
-#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/au1xxx_psc.h>
#include <asm/mach-au1x00/au1xxx_dbdma.h>
+#include <asm/mach-au1x00/au1xxx.h>
#undef OSS_DOCUMENTED_MIXER_SEMANTICS
diff --git a/sound/oss/cs46xx.c b/sound/oss/cs46xx.c
index 53881bc91bb..994c71e986e 100644
--- a/sound/oss/cs46xx.c
+++ b/sound/oss/cs46xx.c
@@ -147,7 +147,7 @@
* that should be printed on any released driver.
*/
#if CSDEBUG
-#define CS_DBGOUT(mask,level,x) if((cs_debuglevel >= (level)) && ((mask) & cs_debugmask)) {x;}
+#define CS_DBGOUT(mask,level,x) if ((cs_debuglevel >= (level)) && ((mask) & cs_debugmask)) {x;}
#else
#define CS_DBGOUT(mask,level,x)
#endif
@@ -175,19 +175,19 @@
#define CS_IOCTL_CMD_RESUME 0x2 // resume
#if CSDEBUG
-static unsigned long cs_debuglevel=1; /* levels range from 1-9 */
+static unsigned long cs_debuglevel = 1; /* levels range from 1-9 */
module_param(cs_debuglevel, ulong, 0644);
-static unsigned long cs_debugmask=CS_INIT | CS_ERROR; /* use CS_DBGOUT with various mask values */
+static unsigned long cs_debugmask = CS_INIT | CS_ERROR; /* use CS_DBGOUT with various mask values */
module_param(cs_debugmask, ulong, 0644);
#endif
static unsigned long hercules_egpio_disable; /* if non-zero set all EGPIO to 0 */
module_param(hercules_egpio_disable, ulong, 0);
-static unsigned long initdelay=700; /* PM delay in millisecs */
+static unsigned long initdelay = 700; /* PM delay in millisecs */
module_param(initdelay, ulong, 0);
-static unsigned long powerdown=-1; /* turn on/off powerdown processing in driver */
+static unsigned long powerdown = -1; /* turn on/off powerdown processing in driver */
module_param(powerdown, ulong, 0);
#define DMABUF_DEFAULTORDER 3
-static unsigned long defaultorder=DMABUF_DEFAULTORDER;
+static unsigned long defaultorder = DMABUF_DEFAULTORDER;
module_param(defaultorder, ulong, 0);
static int external_amp;
@@ -200,8 +200,8 @@ module_param(thinkpad, bool, 0);
* powerdown. also set thinkpad to 1 to disable powerdown,
* but also to enable the clkrun functionality.
*/
-static unsigned cs_powerdown=1;
-static unsigned cs_laptop_wait=1;
+static unsigned cs_powerdown = 1;
+static unsigned cs_laptop_wait = 1;
/* An instance of the 4610 channel */
struct cs_channel
@@ -319,7 +319,7 @@ struct cs_card {
atomic_t mixer_use_cnt;
/* PCI device stuff */
- struct pci_dev * pci_dev;
+ struct pci_dev *pci_dev;
struct list_head list;
unsigned int pctl, cctl; /* Hardware DMA flag sets */
@@ -384,7 +384,7 @@ struct cs_card {
static int cs_open_mixdev(struct inode *inode, struct file *file);
static int cs_release_mixdev(struct inode *inode, struct file *file);
static int cs_ioctl_mixdev(struct inode *inode, struct file *file, unsigned int cmd,
- unsigned long arg);
+ unsigned long arg);
static int cs_hardware_init(struct cs_card *card);
static int cs46xx_powerup(struct cs_card *card, unsigned int type);
static int cs461x_powerdown(struct cs_card *card, unsigned int type, int suspendflag);
@@ -423,8 +423,7 @@ static void printioctl(unsigned int x)
[SOUND_MIXER_VOLUME] = 9 /* Master Volume */
};
- switch(x)
- {
+ switch (x) {
case SOUND_MIXER_CS_GETDBGMASK:
CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_CS_GETDBGMASK: ") );
break;
@@ -521,7 +520,6 @@ static void printioctl(unsigned int x)
case SOUND_PCM_READ_FILTER:
CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_PCM_READ_FILTER: ") );
break;
-
case SOUND_MIXER_PRIVATE1:
CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_PRIVATE1: ") );
break;
@@ -543,10 +541,8 @@ static void printioctl(unsigned int x)
case SOUND_OLD_MIXER_INFO:
CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_OLD_MIXER_INFO: ") );
break;
-
default:
- switch (_IOC_NR(x))
- {
+ switch (_IOC_NR(x)) {
case SOUND_MIXER_VOLUME:
CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_VOLUME: ") );
break;
@@ -579,14 +575,11 @@ static void printioctl(unsigned int x)
break;
default:
i = _IOC_NR(x);
- if (i >= SOUND_MIXER_NRDEVICES || !(vidx = mixtable1[i]))
- {
+ if (i >= SOUND_MIXER_NRDEVICES || !(vidx = mixtable1[i])) {
CS_DBGOUT(CS_IOCTL, 4, printk("UNKNOWN IOCTL: 0x%.8x NR=%d ",x,i) );
- }
- else
- {
+ } else {
CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_IOCTL AC9x: 0x%.8x NR=%d ",
- x,i) );
+ x,i));
}
break;
}
@@ -601,22 +594,22 @@ static void printioctl(unsigned int x)
static void cs461x_poke(struct cs_card *codec, unsigned long reg, unsigned int val)
{
- writel(val, codec->ba1.idx[(reg >> 16) & 3]+(reg&0xffff));
+ writel(val, codec->ba1.idx[(reg >> 16) & 3] + (reg & 0xffff));
}
static unsigned int cs461x_peek(struct cs_card *codec, unsigned long reg)
{
- return readl(codec->ba1.idx[(reg >> 16) & 3]+(reg&0xffff));
+ return readl(codec->ba1.idx[(reg >> 16) & 3] + (reg & 0xffff));
}
static void cs461x_pokeBA0(struct cs_card *codec, unsigned long reg, unsigned int val)
{
- writel(val, codec->ba0+reg);
+ writel(val, codec->ba0 + reg);
}
static unsigned int cs461x_peekBA0(struct cs_card *codec, unsigned long reg)
{
- return readl(codec->ba0+reg);
+ return readl(codec->ba0 + reg);
}
@@ -625,26 +618,26 @@ static void cs_ac97_set(struct ac97_codec *dev, u8 reg, u16 data);
static struct cs_channel *cs_alloc_pcm_channel(struct cs_card *card)
{
- if(card->channel[1].used==1)
+ if (card->channel[1].used == 1)
return NULL;
- card->channel[1].used=1;
- card->channel[1].num=1;
+ card->channel[1].used = 1;
+ card->channel[1].num = 1;
return &card->channel[1];
}
static struct cs_channel *cs_alloc_rec_pcm_channel(struct cs_card *card)
{
- if(card->channel[0].used==1)
+ if (card->channel[0].used == 1)
return NULL;
- card->channel[0].used=1;
- card->channel[0].num=0;
+ card->channel[0].used = 1;
+ card->channel[0].num = 0;
return &card->channel[0];
}
static void cs_free_pcm_channel(struct cs_card *card, int channel)
{
card->channel[channel].state = NULL;
- card->channel[channel].used=0;
+ card->channel[channel].used = 0;
}
/*
@@ -655,15 +648,15 @@ static void cs_free_pcm_channel(struct cs_card *card, int channel)
*/
static void cs_set_divisor(struct dmabuf *dmabuf)
{
- if(dmabuf->type == CS_TYPE_DAC)
+ if (dmabuf->type == CS_TYPE_DAC)
dmabuf->divisor = 1;
- else if( !(dmabuf->fmt & CS_FMT_STEREO) &&
+ else if (!(dmabuf->fmt & CS_FMT_STEREO) &&
(dmabuf->fmt & CS_FMT_16BIT))
dmabuf->divisor = 2;
- else if( (dmabuf->fmt & CS_FMT_STEREO) &&
+ else if ((dmabuf->fmt & CS_FMT_STEREO) &&
!(dmabuf->fmt & CS_FMT_16BIT))
dmabuf->divisor = 2;
- else if( !(dmabuf->fmt & CS_FMT_STEREO) &&
+ else if (!(dmabuf->fmt & CS_FMT_STEREO) &&
!(dmabuf->fmt & CS_FMT_16BIT))
dmabuf->divisor = 4;
else
@@ -680,13 +673,12 @@ static void cs_set_divisor(struct dmabuf *dmabuf)
*/
static void cs_mute(struct cs_card *card, int state)
{
- struct ac97_codec *dev=card->ac97_codec[0];
+ struct ac97_codec *dev = card->ac97_codec[0];
CS_DBGOUT(CS_FUNCTION, 2, printk(KERN_INFO "cs46xx: cs_mute()+ %s\n",
- (state == CS_TRUE) ? "Muting" : "UnMuting") );
+ (state == CS_TRUE) ? "Muting" : "UnMuting"));
- if(state == CS_TRUE)
- {
+ if (state == CS_TRUE) {
/*
* fix pops when powering up on thinkpads
*/
@@ -703,9 +695,7 @@ static void cs_mute(struct cs_card *card, int state)
cs_ac97_set(dev, (u8)BA0_AC97_HEADPHONE_VOLUME, 0x8000);
cs_ac97_set(dev, (u8)BA0_AC97_MASTER_VOLUME_MONO, 0x8000);
cs_ac97_set(dev, (u8)BA0_AC97_PCM_OUT_VOLUME, 0x8000);
- }
- else
- {
+ } else {
cs_ac97_set(dev, (u8)BA0_AC97_MASTER_VOLUME, card->pm.u32AC97_master_volume);
cs_ac97_set(dev, (u8)BA0_AC97_HEADPHONE_VOLUME, card->pm.u32AC97_headphone_volume);
cs_ac97_set(dev, (u8)BA0_AC97_MASTER_VOLUME_MONO, card->pm.u32AC97_master_volume_mono);
@@ -757,7 +747,6 @@ static unsigned int cs_set_dac_rate(struct cs_state * state, unsigned int rate)
/*
* Fill in the SampleRateConverter control block.
*/
-
spin_lock_irqsave(&state->card->lock, flags);
cs461x_poke(state->card, BA1_PSRC,
((correctionPerSec << 16) & 0xFFFF0000) | (correctionPerGOF & 0xFFFF));
@@ -770,7 +759,7 @@ static unsigned int cs_set_dac_rate(struct cs_state * state, unsigned int rate)
}
/* set recording sample rate */
-static unsigned int cs_set_adc_rate(struct cs_state * state, unsigned int rate)
+static unsigned int cs_set_adc_rate(struct cs_state *state, unsigned int rate)
{
struct dmabuf *dmabuf = &state->dmabuf;
struct cs_card *card = state->card;
@@ -815,7 +804,6 @@ static unsigned int cs_set_adc_rate(struct cs_state * state, unsigned int rate)
* dividend:remainder(ulOther / GOF_PER_SEC)
* initialDelay = dividend(((24 * Fs,in) + Fs,out - 1) / Fs,out)
*/
-
tmp1 = rate << 16;
coeffIncr = tmp1 / 48000;
tmp1 -= coeffIncr * 48000;
@@ -891,7 +879,7 @@ static void cs_play_setup(struct cs_state *state)
CS_DBGOUT(CS_FUNCTION, 2, printk("cs46xx: cs_play_setup()+\n") );
cs461x_poke(card, BA1_PVOL, 0x80008000);
- if(!dmabuf->SGok)
+ if (!dmabuf->SGok)
cs461x_poke(card, BA1_PBA, virt_to_bus(dmabuf->pbuf));
Count = 4;
@@ -899,16 +887,14 @@ static void cs_play_setup(struct cs_state *state)
if ((dmabuf->fmt & CS_FMT_STEREO)) {
playFormat &= ~DMA_RQ_C2_AC_MONO_TO_STEREO;
Count *= 2;
- }
- else
+ } else
playFormat |= DMA_RQ_C2_AC_MONO_TO_STEREO;
if ((dmabuf->fmt & CS_FMT_16BIT)) {
playFormat &= ~(DMA_RQ_C2_AC_8_TO_16_BIT
| DMA_RQ_C2_AC_SIGNED_CONVERT);
Count *= 2;
- }
- else
+ } else
playFormat |= (DMA_RQ_C2_AC_8_TO_16_BIT
| DMA_RQ_C2_AC_SIGNED_CONVERT);
@@ -919,7 +905,6 @@ static void cs_play_setup(struct cs_state *state)
cs461x_poke(card, BA1_PDTC, tmp | --Count);
CS_DBGOUT(CS_FUNCTION, 2, printk("cs46xx: cs_play_setup()-\n") );
-
}
static struct InitStruct
@@ -944,8 +929,7 @@ static void SetCaptureSPValues(struct cs_card *card)
{
unsigned i, offset;
CS_DBGOUT(CS_FUNCTION, 8, printk("cs46xx: SetCaptureSPValues()+\n") );
- for(i=0; i<sizeof(InitArray)/sizeof(struct InitStruct); i++)
- {
+ for (i = 0; i < sizeof(InitArray) / sizeof(struct InitStruct); i++) {
offset = InitArray[i].off*4; /* 8bit to 32bit offset value */
cs461x_poke(card, offset, InitArray[i].val );
}
@@ -957,8 +941,8 @@ static void cs_rec_setup(struct cs_state *state)
{
struct cs_card *card = state->card;
struct dmabuf *dmabuf = &state->dmabuf;
- CS_DBGOUT(CS_FUNCTION, 2, printk("cs46xx: cs_rec_setup()+\n") );
+ CS_DBGOUT(CS_FUNCTION, 2, printk("cs46xx: cs_rec_setup()+\n"));
SetCaptureSPValues(card);
/*
@@ -994,14 +978,11 @@ static inline unsigned cs_get_dma_addr(struct cs_state *state)
/*
* granularity is byte boundary, good part.
*/
- if(dmabuf->enable & DAC_RUNNING)
- {
+ if (dmabuf->enable & DAC_RUNNING)
offset = cs461x_peek(state->card, BA1_PBA);
- }
else /* ADC_RUNNING must be set */
- {
offset = cs461x_peek(state->card, BA1_CBA);
- }
+
CS_DBGOUT(CS_PARMS | CS_FUNCTION, 9,
printk("cs46xx: cs_get_dma_addr() %d\n",offset) );
offset = (u32)bus_to_virt((unsigned long)offset) - (u32)dmabuf->rawbuf;
@@ -1015,8 +996,7 @@ static void resync_dma_ptrs(struct cs_state *state)
struct dmabuf *dmabuf;
CS_DBGOUT(CS_FUNCTION, 2, printk("cs46xx: resync_dma_ptrs()+ \n") );
- if(state)
- {
+ if (state) {
dmabuf = &state->dmabuf;
dmabuf->hwptr=dmabuf->swptr = 0;
dmabuf->pringbuf = 0;
@@ -1149,13 +1129,13 @@ static int alloc_dmabuf(struct cs_state *state)
/*
* check for order within limits, but do not overwrite value.
*/
- if((defaultorder > 1) && (defaultorder < 12))
+ if ((defaultorder > 1) && (defaultorder < 12))
df = defaultorder;
else
df = 2;
for (order = df; order >= DMABUF_MINORDER; order--)
- if ( (rawbuf = (void *) pci_alloc_consistent(
+ if ((rawbuf = (void *)pci_alloc_consistent(
card->pci_dev, PAGE_SIZE << order, &dmabuf->dmaaddr)))
break;
if (!rawbuf) {
@@ -1181,8 +1161,7 @@ static int alloc_dmabuf(struct cs_state *state)
/*
* only allocate the conversion buffer for the ADC
*/
- if(dmabuf->type == CS_TYPE_DAC)
- {
+ if (dmabuf->type == CS_TYPE_DAC) {
dmabuf->tmpbuff = NULL;
dmabuf->buforder_tmpbuff = 0;
return 0;
@@ -1258,8 +1237,7 @@ static int __prog_dmabuf(struct cs_state *state)
/*
* check for CAPTURE and use only non-sg for initial release
*/
- if(dmabuf->type == CS_TYPE_ADC)
- {
+ if (dmabuf->type == CS_TYPE_ADC) {
CS_DBGOUT(CS_FUNCTION, 4, printk("cs46xx: prog_dmabuf() ADC\n"));
/*
* add in non-sg support for capture.
@@ -1313,9 +1291,7 @@ static int __prog_dmabuf(struct cs_state *state)
CS_DBGOUT(CS_FUNCTION, 4, printk("cs46xx: prog_dmabuf()- 0 \n"));
return 0;
- }
- else if (dmabuf->type == CS_TYPE_DAC)
- {
+ } else if (dmabuf->type == CS_TYPE_DAC) {
/*
* Must be DAC
*/
@@ -1337,8 +1313,7 @@ static int __prog_dmabuf(struct cs_state *state)
allocated_pages = 1 << dmabuf->buforder;
allocated_bytes = allocated_pages*PAGE_SIZE;
- if(allocated_pages < 2)
- {
+ if (allocated_pages < 2) {
CS_DBGOUT(CS_FUNCTION, 4, printk(
"cs46xx: prog_dmabuf() Error: allocated_pages too small (%d)\n",
(unsigned)allocated_pages));
@@ -1353,14 +1328,14 @@ static int __prog_dmabuf(struct cs_state *state)
/* Set up S/G variables. */
*ptmp = virt_to_bus(dmabuf->rawbuf);
- *(ptmp+1) = 0x00000008;
- for(tmp1= 1; tmp1 < nSGpages; tmp1++) {
- *(ptmp+2*tmp1) = virt_to_bus( (dmabuf->rawbuf)+4096*tmp1);
- if( tmp1 == nSGpages-1)
+ *(ptmp + 1) = 0x00000008;
+ for (tmp1 = 1; tmp1 < nSGpages; tmp1++) {
+ *(ptmp + 2 * tmp1) = virt_to_bus((dmabuf->rawbuf) + 4096 * tmp1);
+ if (tmp1 == nSGpages - 1)
tmp2 = 0xbfff0000;
else
- tmp2 = 0x80000000+8*(tmp1+1);
- *(ptmp+2*tmp1+1) = tmp2;
+ tmp2 = 0x80000000 + 8 * (tmp1 + 1);
+ *(ptmp + 2 * tmp1 + 1) = tmp2;
}
SGarray[0] = 0x82c0200d;
SGarray[1] = 0xffff0000;
@@ -1368,18 +1343,17 @@ static int __prog_dmabuf(struct cs_state *state)
SGarray[3] = 0x00010600;
SGarray[4] = *(ptmp+2);
SGarray[5] = 0x80000010;
- SGarray[6] = *ptmp;
- SGarray[7] = *(ptmp+2);
- SGarray[8] = (virt_to_bus(dmabuf->pbuf) & 0xffff000) | 0x10;
-
- if (dmabuf->SGok) {
- dmabuf->numfrag = nSGpages;
- dmabuf->fragsize = 4096;
- dmabuf->fragsamples = 4096 >> sample_shift[dmabuf->fmt];
- dmabuf->fragshift = 12;
- dmabuf->dmasize = dmabuf->numfrag*4096;
- }
- else {
+ SGarray[6] = *ptmp;
+ SGarray[7] = *(ptmp+2);
+ SGarray[8] = (virt_to_bus(dmabuf->pbuf) & 0xffff000) | 0x10;
+
+ if (dmabuf->SGok) {
+ dmabuf->numfrag = nSGpages;
+ dmabuf->fragsize = 4096;
+ dmabuf->fragsamples = 4096 >> sample_shift[dmabuf->fmt];
+ dmabuf->fragshift = 12;
+ dmabuf->dmasize = dmabuf->numfrag * 4096;
+ } else {
SGarray[0] = 0xf2c0000f;
SGarray[1] = 0x00000200;
SGarray[2] = 0;
@@ -1391,8 +1365,8 @@ static int __prog_dmabuf(struct cs_state *state)
dmabuf->dmasize = 4096;
dmabuf->fragshift = 11;
}
- for(tmp1 = 0; tmp1 < sizeof(SGarray)/4; tmp1++)
- cs461x_poke( state->card, BA1_PDTC+tmp1*4, SGarray[tmp1]);
+ for (tmp1 = 0; tmp1 < sizeof(SGarray) / 4; tmp1++)
+ cs461x_poke(state->card, BA1_PDTC+tmp1 * 4, SGarray[tmp1]);
memset(dmabuf->rawbuf, (dmabuf->fmt & CS_FMT_16BIT) ? 0 : 0x80,
dmabuf->dmasize);
@@ -1416,9 +1390,7 @@ static int __prog_dmabuf(struct cs_state *state)
CS_DBGOUT(CS_FUNCTION, 4, printk("cs46xx: prog_dmabuf()- \n"));
return 0;
- }
- else
- {
+ } else {
CS_DBGOUT(CS_FUNCTION, 4, printk("cs46xx: prog_dmabuf()- Invalid Type %d\n",
dmabuf->type));
}
@@ -1489,8 +1461,7 @@ static int drain_dac(struct cs_state *state, int nonblock)
}
remove_wait_queue(&dmabuf->wait, &wait);
current->state = TASK_RUNNING;
- if (signal_pending(current))
- {
+ if (signal_pending(current)) {
CS_DBGOUT(CS_FUNCTION, 4, printk("cs46xx: drain_dac()- -ERESTARTSYS\n"));
/*
* set to silence and let that clear the fifos.
@@ -1514,8 +1485,7 @@ static void cs_update_ptr(struct cs_card *card, int wake)
/* error handling and process wake up for ADC */
state = card->states[0];
- if(state)
- {
+ if (state) {
dmabuf = &state->dmabuf;
if (dmabuf->enable & ADC_RUNNING) {
/* update hardware pointer */
@@ -1531,12 +1501,10 @@ static void cs_update_ptr(struct cs_card *card, int wake)
if (dmabuf->count > dmabuf->dmasize)
dmabuf->count = dmabuf->dmasize;
- if(dmabuf->mapped)
- {
+ if (dmabuf->mapped) {
if (wake && dmabuf->count >= (signed)dmabuf->fragsize)
wake_up(&dmabuf->wait);
- } else
- {
+ } else {
if (wake && dmabuf->count > 0)
wake_up(&dmabuf->wait);
}
@@ -1547,8 +1515,7 @@ static void cs_update_ptr(struct cs_card *card, int wake)
* Now the DAC
*/
state = card->states[1];
- if(state)
- {
+ if (state) {
dmabuf = &state->dmabuf;
/* error handling and process wake up for DAC */
if (dmabuf->enable & DAC_RUNNING) {
@@ -1570,7 +1537,7 @@ static void cs_update_ptr(struct cs_card *card, int wake)
* in that, since dmasize is the buffer asked for
* via mmap.
*/
- if( dmabuf->count > dmabuf->dmasize)
+ if (dmabuf->count > dmabuf->dmasize)
dmabuf->count &= dmabuf->dmasize-1;
} else {
dmabuf->count -= diff;
@@ -1578,13 +1545,10 @@ static void cs_update_ptr(struct cs_card *card, int wake)
* backfill with silence and clear out the last
* "diff" number of bytes.
*/
- if(hwptr >= diff)
- {
+ if (hwptr >= diff) {
memset(dmabuf->rawbuf + hwptr - diff,
(dmabuf->fmt & CS_FMT_16BIT) ? 0 : 0x80, diff);
- }
- else
- {
+ } else {
memset(dmabuf->rawbuf,
(dmabuf->fmt & CS_FMT_16BIT) ? 0 : 0x80,
(unsigned)hwptr);
@@ -1602,12 +1566,12 @@ static void cs_update_ptr(struct cs_card *card, int wake)
* buffer underrun or buffer overrun, reset the
* count of bytes written back to 0.
*/
- if(dmabuf->count < 0)
- dmabuf->underrun=1;
+ if (dmabuf->count < 0)
+ dmabuf->underrun = 1;
dmabuf->count = 0;
dmabuf->error++;
}
- if (wake && dmabuf->count < (signed)dmabuf->dmasize/2)
+ if (wake && dmabuf->count < (signed)dmabuf->dmasize / 2)
wake_up(&dmabuf->wait);
}
}
@@ -1661,8 +1625,7 @@ static irqreturn_t cs_interrupt(int irq, void *dev_id, struct pt_regs *regs)
status = cs461x_peekBA0(card, BA0_HISR);
- if ((status & 0x7fffffff) == 0)
- {
+ if ((status & 0x7fffffff) == 0) {
cs461x_pokeBA0(card, BA0_HICR, HICR_CHGM|HICR_IEV);
spin_unlock(&card->lock);
return IRQ_HANDLED; /* Might be IRQ_NONE.. */
@@ -1671,15 +1634,14 @@ static irqreturn_t cs_interrupt(int irq, void *dev_id, struct pt_regs *regs)
/*
* check for playback or capture interrupt only
*/
- if( ((status & HISR_VC0) && playstate && playstate->dmabuf.ready) ||
- (((status & HISR_VC1) && recstate && recstate->dmabuf.ready)) )
- {
+ if (((status & HISR_VC0) && playstate && playstate->dmabuf.ready) ||
+ (((status & HISR_VC1) && recstate && recstate->dmabuf.ready))) {
CS_DBGOUT(CS_INTERRUPT, 8, printk(
"cs46xx: cs_interrupt() interrupt bit(s) set (0x%x)\n",status));
cs_update_ptr(card, CS_TRUE);
}
- if( status & HISR_MIDI )
+ if (status & HISR_MIDI)
cs_handle_midi(card);
/* clear 'em */
@@ -1694,7 +1656,7 @@ static irqreturn_t cs_interrupt(int irq, void *dev_id, struct pt_regs *regs)
static ssize_t cs_midi_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
{
- struct cs_card *card = (struct cs_card *)file->private_data;
+ struct cs_card *card = file->private_data;
ssize_t ret;
unsigned long flags;
unsigned ptr;
@@ -1737,7 +1699,7 @@ static ssize_t cs_midi_read(struct file *file, char __user *buffer, size_t count
static ssize_t cs_midi_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
{
- struct cs_card *card = (struct cs_card *)file->private_data;
+ struct cs_card *card = file->private_data;
ssize_t ret;
unsigned long flags;
unsigned ptr;
@@ -1785,7 +1747,7 @@ static ssize_t cs_midi_write(struct file *file, const char __user *buffer, size_
static unsigned int cs_midi_poll(struct file *file, struct poll_table_struct *wait)
{
- struct cs_card *card = (struct cs_card *)file->private_data;
+ struct cs_card *card = file->private_data;
unsigned long flags;
unsigned int mask = 0;
@@ -1810,12 +1772,11 @@ static unsigned int cs_midi_poll(struct file *file, struct poll_table_struct *wa
static int cs_midi_open(struct inode *inode, struct file *file)
{
unsigned int minor = iminor(inode);
- struct cs_card *card=NULL;
+ struct cs_card *card = NULL;
unsigned long flags;
struct list_head *entry;
- list_for_each(entry, &cs46xx_devs)
- {
+ list_for_each(entry, &cs46xx_devs) {
card = list_entry(entry, struct cs_card, list);
if (card->dev_midi == minor)
break;
@@ -1823,8 +1784,7 @@ static int cs_midi_open(struct inode *inode, struct file *file)
if (entry == &cs46xx_devs)
return -ENODEV;
- if (!card)
- {
+ if (!card) {
CS_DBGOUT(CS_FUNCTION | CS_OPEN, 2, printk(KERN_INFO
"cs46xx: cs46xx_midi_open(): Error - unable to find card struct\n"));
return -ENODEV;
@@ -1852,12 +1812,10 @@ static int cs_midi_open(struct inode *inode, struct file *file)
cs461x_pokeBA0(card, BA0_MIDCR, 0x0000000f); /* Enable xmit, rcv. */
cs461x_pokeBA0(card, BA0_HICR, HICR_IEV | HICR_CHGM); /* Enable interrupts */
}
- if (file->f_mode & FMODE_READ) {
+ if (file->f_mode & FMODE_READ)
card->midi.ird = card->midi.iwr = card->midi.icnt = 0;
- }
- if (file->f_mode & FMODE_WRITE) {
+ if (file->f_mode & FMODE_WRITE)
card->midi.ord = card->midi.owr = card->midi.ocnt = 0;
- }
spin_unlock_irqrestore(&card->midi.lock, flags);
card->midi.open_mode |= (file->f_mode & (FMODE_READ | FMODE_WRITE));
mutex_unlock(&card->midi.open_mutex);
@@ -1867,7 +1825,7 @@ static int cs_midi_open(struct inode *inode, struct file *file)
static int cs_midi_release(struct inode *inode, struct file *file)
{
- struct cs_card *card = (struct cs_card *)file->private_data;
+ struct cs_card *card = file->private_data;
DECLARE_WAITQUEUE(wait, current);
unsigned long flags;
unsigned count, tmo;
@@ -1933,11 +1891,10 @@ static /*const*/ struct file_operations cs_midi_fops = {
static void CopySamples(char *dst, char *src, int count, unsigned fmt,
struct dmabuf *dmabuf)
{
-
s32 s32AudioSample;
- s16 *psSrc=(s16 *)src;
- s16 *psDst=(s16 *)dst;
- u8 *pucDst=(u8 *)dst;
+ s16 *psSrc = (s16 *)src;
+ s16 *psDst = (s16 *)dst;
+ u8 *pucDst = (u8 *)dst;
CS_DBGOUT(CS_FUNCTION, 2, printk(KERN_INFO "cs46xx: CopySamples()+ ") );
CS_DBGOUT(CS_WAVE_READ, 8, printk(KERN_INFO
@@ -1947,34 +1904,29 @@ static void CopySamples(char *dst, char *src, int count, unsigned fmt,
/*
* See if the data should be output as 8-bit unsigned stereo.
*/
- if((fmt & CS_FMT_STEREO) && !(fmt & CS_FMT_16BIT))
- {
+ if ((fmt & CS_FMT_STEREO) && !(fmt & CS_FMT_16BIT)) {
/*
* Convert each 16-bit signed stereo sample to 8-bit unsigned
* stereo using rounding.
*/
psSrc = (s16 *)src;
- count = count/2;
- while(count--)
- {
+ count = count / 2;
+ while (count--)
*(pucDst++) = (u8)(((s16)(*psSrc++) + (s16)0x8000) >> 8);
- }
}
/*
* See if the data should be output at 8-bit unsigned mono.
*/
- else if(!(fmt & CS_FMT_STEREO) && !(fmt & CS_FMT_16BIT))
- {
+ else if (!(fmt & CS_FMT_STEREO) && !(fmt & CS_FMT_16BIT)) {
/*
* Convert each 16-bit signed stereo sample to 8-bit unsigned
* mono using averaging and rounding.
*/
psSrc = (s16 *)src;
- count = count/2;
- while(count--)
- {
- s32AudioSample = ((*psSrc)+(*(psSrc + 1)))/2 + (s32)0x80;
- if(s32AudioSample > 0x7fff)
+ count = count / 2;
+ while (count--) {
+ s32AudioSample = ((*psSrc) + (*(psSrc + 1))) / 2 + (s32)0x80;
+ if (s32AudioSample > 0x7fff)
s32AudioSample = 0x7fff;
*(pucDst++) = (u8)(((s16)s32AudioSample + (s16)0x8000) >> 8);
psSrc += 2;
@@ -1983,17 +1935,15 @@ static void CopySamples(char *dst, char *src, int count, unsigned fmt,
/*
* See if the data should be output at 16-bit signed mono.
*/
- else if(!(fmt & CS_FMT_STEREO) && (fmt & CS_FMT_16BIT))
- {
+ else if (!(fmt & CS_FMT_STEREO) && (fmt & CS_FMT_16BIT)) {
/*
* Convert each 16-bit signed stereo sample to 16-bit signed
* mono using averaging.
*/
psSrc = (s16 *)src;
- count = count/2;
- while(count--)
- {
- *(psDst++) = (s16)((*psSrc)+(*(psSrc + 1)))/2;
+ count = count / 2;
+ while (count--) {
+ *(psDst++) = (s16)((*psSrc) + (*(psSrc + 1))) / 2;
psSrc += 2;
}
}
@@ -2020,20 +1970,15 @@ static unsigned cs_copy_to_user(
"cs_copy_to_user()+ fmt=0x%x cnt=%d dest=%p\n",
dmabuf->fmt,(unsigned)cnt,dest) );
- if(cnt > dmabuf->dmasize)
- {
+ if (cnt > dmabuf->dmasize)
cnt = dmabuf->dmasize;
- }
- if(!cnt)
- {
+ if (!cnt) {
*copied = 0;
return 0;
}
- if(dmabuf->divisor != 1)
- {
- if(!dmabuf->tmpbuff)
- {
- *copied = cnt/dmabuf->divisor;
+ if (dmabuf->divisor != 1) {
+ if (!dmabuf->tmpbuff) {
+ *copied = cnt / dmabuf->divisor;
return 0;
}
@@ -2042,17 +1987,16 @@ static unsigned cs_copy_to_user(
src = dmabuf->tmpbuff;
cnt = cnt/dmabuf->divisor;
}
- if (copy_to_user(dest, src, cnt))
- {
+ if (copy_to_user(dest, src, cnt)) {
CS_DBGOUT(CS_FUNCTION, 2, printk(KERN_ERR
"cs46xx: cs_copy_to_user()- fault dest=%p src=%p cnt=%d\n",
- dest,src,cnt) );
+ dest,src,cnt));
*copied = 0;
return -EFAULT;
}
*copied = cnt;
CS_DBGOUT(CS_FUNCTION, 2, printk(KERN_INFO
- "cs46xx: cs_copy_to_user()- copied bytes is %d \n",cnt) );
+ "cs46xx: cs_copy_to_user()- copied bytes is %d \n",cnt));
return 0;
}
@@ -2060,7 +2004,7 @@ static unsigned cs_copy_to_user(
the user's buffer. it is filled by the dma machine and drained by this loop. */
static ssize_t cs_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
{
- struct cs_card *card = (struct cs_card *) file->private_data;
+ struct cs_card *card = file->private_data;
struct cs_state *state;
DECLARE_WAITQUEUE(wait, current);
struct dmabuf *dmabuf;
@@ -2068,12 +2012,12 @@ static ssize_t cs_read(struct file *file, char __user *buffer, size_t count, lof
unsigned long flags;
unsigned swptr;
int cnt;
- unsigned copied=0;
+ unsigned copied = 0;
CS_DBGOUT(CS_WAVE_READ | CS_FUNCTION, 4,
printk("cs46xx: cs_read()+ %zd\n",count) );
- state = (struct cs_state *)card->states[0];
- if(!state)
+ state = card->states[0];
+ if (!state)
return -ENODEV;
dmabuf = &state->dmabuf;
@@ -2088,11 +2032,11 @@ static ssize_t cs_read(struct file *file, char __user *buffer, size_t count, lof
add_wait_queue(&state->dmabuf.wait, &wait);
while (count > 0) {
- while(!(card->pm.flags & CS46XX_PM_IDLE))
- {
+ while (!(card->pm.flags & CS46XX_PM_IDLE)) {
schedule();
if (signal_pending(current)) {
- if(!ret) ret = -ERESTARTSYS;
+ if (!ret)
+ ret = -ERESTARTSYS;
goto out;
}
}
@@ -2112,19 +2056,20 @@ static ssize_t cs_read(struct file *file, char __user *buffer, size_t count, lof
recorded */
start_adc(state);
if (file->f_flags & O_NONBLOCK) {
- if (!ret) ret = -EAGAIN;
+ if (!ret)
+ ret = -EAGAIN;
goto out;
}
mutex_unlock(&state->sem);
schedule();
if (signal_pending(current)) {
- if(!ret) ret = -ERESTARTSYS;
+ if (!ret)
+ ret = -ERESTARTSYS;
goto out;
}
mutex_lock(&state->sem);
- if (dmabuf->mapped)
- {
- if(!ret)
+ if (dmabuf->mapped) {
+ if (!ret)
ret = -ENXIO;
goto out;
}
@@ -2135,12 +2080,12 @@ static ssize_t cs_read(struct file *file, char __user *buffer, size_t count, lof
"_read() copy_to cnt=%d count=%zd ", cnt,count) );
CS_DBGOUT(CS_WAVE_READ, 8, printk(KERN_INFO
" .dmasize=%d .count=%d buffer=%p ret=%zd\n",
- dmabuf->dmasize,dmabuf->count,buffer,ret) );
+ dmabuf->dmasize,dmabuf->count,buffer,ret));
if (cs_copy_to_user(state, buffer,
- (char *)dmabuf->rawbuf + swptr, cnt, &copied))
- {
- if (!ret) ret = -EFAULT;
+ (char *)dmabuf->rawbuf + swptr, cnt, &copied)) {
+ if (!ret)
+ ret = -EFAULT;
goto out;
}
swptr = (swptr + cnt) % dmabuf->dmasize;
@@ -2167,7 +2112,7 @@ out2:
the soundcard. it is drained by the dma machine and filled by this loop. */
static ssize_t cs_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
{
- struct cs_card *card = (struct cs_card *) file->private_data;
+ struct cs_card *card = file->private_data;
struct cs_state *state;
DECLARE_WAITQUEUE(wait, current);
struct dmabuf *dmabuf;
@@ -2178,16 +2123,15 @@ static ssize_t cs_write(struct file *file, const char __user *buffer, size_t cou
CS_DBGOUT(CS_WAVE_WRITE | CS_FUNCTION, 4,
printk("cs46xx: cs_write called, count = %zd\n", count) );
- state = (struct cs_state *)card->states[1];
- if(!state)
+ state = card->states[1];
+ if (!state)
return -ENODEV;
if (!access_ok(VERIFY_READ, buffer, count))
return -EFAULT;
dmabuf = &state->dmabuf;
mutex_lock(&state->sem);
- if (dmabuf->mapped)
- {
+ if (dmabuf->mapped) {
ret = -ENXIO;
goto out;
}
@@ -2201,11 +2145,11 @@ static ssize_t cs_write(struct file *file, const char __user *buffer, size_t cou
* check for PM events and underrun/overrun in the loop.
*/
while (count > 0) {
- while(!(card->pm.flags & CS46XX_PM_IDLE))
- {
+ while (!(card->pm.flags & CS46XX_PM_IDLE)) {
schedule();
if (signal_pending(current)) {
- if(!ret) ret = -ERESTARTSYS;
+ if (!ret)
+ ret = -ERESTARTSYS;
goto out;
}
}
@@ -2216,8 +2160,7 @@ static ssize_t cs_write(struct file *file, const char __user *buffer, size_t cou
dmabuf->count = 0;
dmabuf->swptr = dmabuf->hwptr;
}
- if (dmabuf->underrun)
- {
+ if (dmabuf->underrun) {
dmabuf->underrun = 0;
dmabuf->hwptr = cs_get_dma_addr(state);
dmabuf->swptr = dmabuf->hwptr;
@@ -2238,34 +2181,35 @@ static ssize_t cs_write(struct file *file, const char __user *buffer, size_t cou
played */
start_dac(state);
if (file->f_flags & O_NONBLOCK) {
- if (!ret) ret = -EAGAIN;
+ if (!ret)
+ ret = -EAGAIN;
goto out;
}
mutex_unlock(&state->sem);
schedule();
if (signal_pending(current)) {
- if(!ret) ret = -ERESTARTSYS;
+ if (!ret)
+ ret = -ERESTARTSYS;
goto out;
}
mutex_lock(&state->sem);
- if (dmabuf->mapped)
- {
- if(!ret)
+ if (dmabuf->mapped) {
+ if (!ret)
ret = -ENXIO;
goto out;
}
continue;
}
if (copy_from_user(dmabuf->rawbuf + swptr, buffer, cnt)) {
- if (!ret) ret = -EFAULT;
+ if (!ret)
+ ret = -EFAULT;
goto out;
}
spin_lock_irqsave(&state->card->lock, flags);
swptr = (swptr + cnt) % dmabuf->dmasize;
dmabuf->swptr = swptr;
dmabuf->count += cnt;
- if(dmabuf->count > dmabuf->dmasize)
- {
+ if (dmabuf->count > dmabuf->dmasize) {
CS_DBGOUT(CS_WAVE_WRITE | CS_ERROR, 2, printk(
"cs46xx: cs_write() d->count > dmasize - resetting\n"));
dmabuf->count = dmabuf->dmasize;
@@ -2284,38 +2228,32 @@ out:
set_current_state(TASK_RUNNING);
CS_DBGOUT(CS_WAVE_WRITE | CS_FUNCTION, 2,
- printk("cs46xx: cs_write()- ret=%zd\n", ret) );
+ printk("cs46xx: cs_write()- ret=%zd\n", ret));
return ret;
}
static unsigned int cs_poll(struct file *file, struct poll_table_struct *wait)
{
- struct cs_card *card = (struct cs_card *)file->private_data;
+ struct cs_card *card = file->private_data;
struct dmabuf *dmabuf;
struct cs_state *state;
-
unsigned long flags;
unsigned int mask = 0;
CS_DBGOUT(CS_FUNCTION, 2, printk("cs46xx: cs_poll()+ \n"));
- if (!(file->f_mode & (FMODE_WRITE | FMODE_READ)))
- {
+ if (!(file->f_mode & (FMODE_WRITE | FMODE_READ))) {
return -EINVAL;
}
- if (file->f_mode & FMODE_WRITE)
- {
+ if (file->f_mode & FMODE_WRITE) {
state = card->states[1];
- if(state)
- {
+ if (state) {
dmabuf = &state->dmabuf;
poll_wait(file, &dmabuf->wait, wait);
}
}
- if (file->f_mode & FMODE_READ)
- {
+ if (file->f_mode & FMODE_READ) {
state = card->states[0];
- if(state)
- {
+ if (state) {
dmabuf = &state->dmabuf;
poll_wait(file, &dmabuf->wait, wait);
}
@@ -2325,8 +2263,7 @@ static unsigned int cs_poll(struct file *file, struct poll_table_struct *wait)
cs_update_ptr(card, CS_FALSE);
if (file->f_mode & FMODE_READ) {
state = card->states[0];
- if(state)
- {
+ if (state) {
dmabuf = &state->dmabuf;
if (dmabuf->count >= (signed)dmabuf->fragsize)
mask |= POLLIN | POLLRDNORM;
@@ -2334,8 +2271,7 @@ static unsigned int cs_poll(struct file *file, struct poll_table_struct *wait)
}
if (file->f_mode & FMODE_WRITE) {
state = card->states[1];
- if(state)
- {
+ if (state) {
dmabuf = &state->dmabuf;
if (dmabuf->mapped) {
if (dmabuf->count >= (signed)dmabuf->fragsize)
@@ -2364,7 +2300,7 @@ static unsigned int cs_poll(struct file *file, struct poll_table_struct *wait)
static int cs_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct cs_card *card = (struct cs_card *)file->private_data;
+ struct cs_card *card = file->private_data;
struct cs_state *state;
struct dmabuf *dmabuf;
int ret = 0;
@@ -2376,8 +2312,7 @@ static int cs_mmap(struct file *file, struct vm_area_struct *vma)
if (vma->vm_flags & VM_WRITE) {
state = card->states[1];
- if(state)
- {
+ if (state) {
CS_DBGOUT(CS_OPEN, 2, printk(
"cs46xx: cs_mmap() VM_WRITE - state TRUE prog_dmabuf DAC\n") );
if ((ret = prog_dmabuf(state)) != 0)
@@ -2385,8 +2320,7 @@ static int cs_mmap(struct file *file, struct vm_area_struct *vma)
}
} else if (vma->vm_flags & VM_READ) {
state = card->states[0];
- if(state)
- {
+ if (state) {
CS_DBGOUT(CS_OPEN, 2, printk(
"cs46xx: cs_mmap() VM_READ - state TRUE prog_dmabuf ADC\n") );
if ((ret = prog_dmabuf(state)) != 0)
@@ -2414,8 +2348,7 @@ static int cs_mmap(struct file *file, struct vm_area_struct *vma)
mutex_lock(&state->sem);
dmabuf = &state->dmabuf;
- if (cs4x_pgoff(vma) != 0)
- {
+ if (cs4x_pgoff(vma) != 0) {
ret = -EINVAL;
goto out;
}
@@ -2423,15 +2356,13 @@ static int cs_mmap(struct file *file, struct vm_area_struct *vma)
CS_DBGOUT(CS_PARMS, 2, printk("cs46xx: cs_mmap(): size=%d\n",(unsigned)size) );
- if (size > (PAGE_SIZE << dmabuf->buforder))
- {
+ if (size > (PAGE_SIZE << dmabuf->buforder)) {
ret = -EINVAL;
goto out;
}
if (remap_pfn_range(vma, vma->vm_start,
virt_to_phys(dmabuf->rawbuf) >> PAGE_SHIFT,
- size, vma->vm_page_prot))
- {
+ size, vma->vm_page_prot)) {
ret = -EAGAIN;
goto out;
}
@@ -2445,25 +2376,24 @@ out:
static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
{
- struct cs_card *card = (struct cs_card *)file->private_data;
+ struct cs_card *card = file->private_data;
struct cs_state *state;
- struct dmabuf *dmabuf=NULL;
+ struct dmabuf *dmabuf = NULL;
unsigned long flags;
audio_buf_info abinfo;
count_info cinfo;
- int val, valsave, mapped, ret;
+ int val, valsave, ret;
+ int mapped = 0;
void __user *argp = (void __user *)arg;
int __user *p = argp;
- state = (struct cs_state *)card->states[0];
- if(state)
- {
+ state = card->states[0];
+ if (state) {
dmabuf = &state->dmabuf;
mapped = (file->f_mode & FMODE_READ) && dmabuf->mapped;
}
- state = (struct cs_state *)card->states[1];
- if(state)
- {
+ state = card->states[1];
+ if (state) {
dmabuf = &state->dmabuf;
mapped |= (file->f_mode & FMODE_WRITE) && dmabuf->mapped;
}
@@ -2472,17 +2402,14 @@ static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
printioctl(cmd);
#endif
- switch (cmd)
- {
+ switch (cmd) {
case OSS_GETVERSION:
return put_user(SOUND_VERSION, p);
-
case SNDCTL_DSP_RESET:
/* FIXME: spin_lock ? */
if (file->f_mode & FMODE_WRITE) {
- state = (struct cs_state *)card->states[1];
- if(state)
- {
+ state = card->states[1];
+ if (state) {
dmabuf = &state->dmabuf;
stop_dac(state);
synchronize_irq(card->irq);
@@ -2495,9 +2422,8 @@ static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
}
}
if (file->f_mode & FMODE_READ) {
- state = (struct cs_state *)card->states[0];
- if(state)
- {
+ state = card->states[0];
+ if (state) {
dmabuf = &state->dmabuf;
stop_adc(state);
synchronize_irq(card->irq);
@@ -2511,20 +2437,17 @@ static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
}
CS_DBGOUT(CS_IOCTL, 2, printk("cs46xx: DSP_RESET()-\n") );
return 0;
-
case SNDCTL_DSP_SYNC:
if (file->f_mode & FMODE_WRITE)
return drain_dac(state, file->f_flags & O_NONBLOCK);
return 0;
-
case SNDCTL_DSP_SPEED: /* set sample rate */
if (get_user(val, p))
return -EFAULT;
if (val >= 0) {
if (file->f_mode & FMODE_READ) {
- state = (struct cs_state *)card->states[0];
- if(state)
- {
+ state = card->states[0];
+ if (state) {
dmabuf = &state->dmabuf;
stop_adc(state);
dmabuf->ready = 0;
@@ -2534,9 +2457,8 @@ static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
}
}
if (file->f_mode & FMODE_WRITE) {
- state = (struct cs_state *)card->states[1];
- if(state)
- {
+ state = card->states[1];
+ if (state) {
dmabuf = &state->dmabuf;
stop_dac(state);
dmabuf->ready = 0;
@@ -2553,19 +2475,17 @@ static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
return put_user(dmabuf->rate, p);
}
return put_user(0, p);
-
case SNDCTL_DSP_STEREO: /* set stereo or mono channel */
if (get_user(val, p))
return -EFAULT;
if (file->f_mode & FMODE_WRITE) {
- state = (struct cs_state *)card->states[1];
- if(state)
- {
+ state = card->states[1];
+ if (state) {
dmabuf = &state->dmabuf;
stop_dac(state);
dmabuf->ready = 0;
dmabuf->SGok = 0;
- if(val)
+ if (val)
dmabuf->fmt |= CS_FMT_STEREO;
else
dmabuf->fmt &= ~CS_FMT_STEREO;
@@ -2577,14 +2497,13 @@ static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
}
}
if (file->f_mode & FMODE_READ) {
- state = (struct cs_state *)card->states[0];
- if(state)
- {
+ state = card->states[0];
+ if (state) {
dmabuf = &state->dmabuf;
stop_adc(state);
dmabuf->ready = 0;
dmabuf->SGok = 0;
- if(val)
+ if (val)
dmabuf->fmt |= CS_FMT_STEREO;
else
dmabuf->fmt &= ~CS_FMT_STEREO;
@@ -2596,12 +2515,10 @@ static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
}
}
return 0;
-
case SNDCTL_DSP_GETBLKSIZE:
if (file->f_mode & FMODE_WRITE) {
- state = (struct cs_state *)card->states[1];
- if(state)
- {
+ state = card->states[1];
+ if (state) {
dmabuf = &state->dmabuf;
if ((val = prog_dmabuf(state)))
return val;
@@ -2609,9 +2526,8 @@ static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
}
}
if (file->f_mode & FMODE_READ) {
- state = (struct cs_state *)card->states[0];
- if(state)
- {
+ state = card->states[0];
+ if (state) {
dmabuf = &state->dmabuf;
if ((val = prog_dmabuf(state)))
return val;
@@ -2620,10 +2536,8 @@ static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
}
}
return put_user(0, p);
-
case SNDCTL_DSP_GETFMTS: /* Returns a mask of supported sample format*/
return put_user(AFMT_S16_LE | AFMT_U8, p);
-
case SNDCTL_DSP_SETFMT: /* Select sample format */
if (get_user(val, p))
return -EFAULT;
@@ -2635,88 +2549,75 @@ static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
val == AFMT_U8 ? "8Bit Unsigned" : "") );
valsave = val;
if (val != AFMT_QUERY) {
- if(val==AFMT_S16_LE || val==AFMT_U8)
- {
+ if (val==AFMT_S16_LE || val==AFMT_U8) {
if (file->f_mode & FMODE_WRITE) {
- state = (struct cs_state *)card->states[1];
- if(state)
- {
+ state = card->states[1];
+ if (state) {
dmabuf = &state->dmabuf;
stop_dac(state);
dmabuf->ready = 0;
dmabuf->SGok = 0;
- if(val==AFMT_S16_LE)
+ if (val == AFMT_S16_LE)
dmabuf->fmt |= CS_FMT_16BIT;
else
dmabuf->fmt &= ~CS_FMT_16BIT;
cs_set_divisor(dmabuf);
- if((ret = prog_dmabuf(state)))
+ if ((ret = prog_dmabuf(state)))
return ret;
}
}
if (file->f_mode & FMODE_READ) {
val = valsave;
- state = (struct cs_state *)card->states[0];
- if(state)
- {
+ state = card->states[0];
+ if (state) {
dmabuf = &state->dmabuf;
stop_adc(state);
dmabuf->ready = 0;
dmabuf->SGok = 0;
- if(val==AFMT_S16_LE)
+ if (val == AFMT_S16_LE)
dmabuf->fmt |= CS_FMT_16BIT;
else
dmabuf->fmt &= ~CS_FMT_16BIT;
cs_set_divisor(dmabuf);
- if((ret = prog_dmabuf(state)))
+ if ((ret = prog_dmabuf(state)))
return ret;
}
}
- }
- else
- {
+ } else {
CS_DBGOUT(CS_IOCTL | CS_ERROR, 2, printk(
"cs46xx: DSP_SETFMT() Unsupported format (0x%x)\n",
valsave) );
}
- }
- else
- {
- if(file->f_mode & FMODE_WRITE)
- {
- state = (struct cs_state *)card->states[1];
- if(state)
+ } else {
+ if (file->f_mode & FMODE_WRITE) {
+ state = card->states[1];
+ if (state)
dmabuf = &state->dmabuf;
- }
- else if(file->f_mode & FMODE_READ)
- {
- state = (struct cs_state *)card->states[0];
- if(state)
+ } else if (file->f_mode & FMODE_READ) {
+ state = card->states[0];
+ if (state)
dmabuf = &state->dmabuf;
}
}
- if(dmabuf)
- {
- if(dmabuf->fmt & CS_FMT_16BIT)
+ if (dmabuf) {
+ if (dmabuf->fmt & CS_FMT_16BIT)
return put_user(AFMT_S16_LE, p);
else
return put_user(AFMT_U8, p);
}
return put_user(0, p);
-
case SNDCTL_DSP_CHANNELS:
if (get_user(val, p))
return -EFAULT;
if (val != 0) {
if (file->f_mode & FMODE_WRITE) {
- state = (struct cs_state *)card->states[1];
- if(state)
- {
+ state = card->states[1];
+ if (state) {
dmabuf = &state->dmabuf;
stop_dac(state);
dmabuf->ready = 0;
dmabuf->SGok = 0;
- if(val>1)
+ if (val > 1)
dmabuf->fmt |= CS_FMT_STEREO;
else
dmabuf->fmt &= ~CS_FMT_STEREO;
@@ -2726,14 +2627,13 @@ static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
}
}
if (file->f_mode & FMODE_READ) {
- state = (struct cs_state *)card->states[0];
- if(state)
- {
+ state = card->states[0];
+ if (state) {
dmabuf = &state->dmabuf;
stop_adc(state);
dmabuf->ready = 0;
dmabuf->SGok = 0;
- if(val>1)
+ if (val > 1)
dmabuf->fmt |= CS_FMT_STEREO;
else
dmabuf->fmt &= ~CS_FMT_STEREO;
@@ -2745,19 +2645,16 @@ static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
}
return put_user((dmabuf->fmt & CS_FMT_STEREO) ? 2 : 1,
p);
-
case SNDCTL_DSP_POST:
/*
* There will be a longer than normal pause in the data.
* so... do nothing, because there is nothing that we can do.
*/
return 0;
-
case SNDCTL_DSP_SUBDIVIDE:
if (file->f_mode & FMODE_WRITE) {
- state = (struct cs_state *)card->states[1];
- if(state)
- {
+ state = card->states[1];
+ if (state) {
dmabuf = &state->dmabuf;
if (dmabuf->subdivision)
return -EINVAL;
@@ -2769,9 +2666,8 @@ static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
}
}
if (file->f_mode & FMODE_READ) {
- state = (struct cs_state *)card->states[0];
- if(state)
- {
+ state = card->states[0];
+ if (state) {
dmabuf = &state->dmabuf;
if (dmabuf->subdivision)
return -EINVAL;
@@ -2783,37 +2679,31 @@ static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
}
}
return 0;
-
case SNDCTL_DSP_SETFRAGMENT:
if (get_user(val, p))
return -EFAULT;
-
if (file->f_mode & FMODE_WRITE) {
- state = (struct cs_state *)card->states[1];
- if(state)
- {
+ state = card->states[1];
+ if (state) {
dmabuf = &state->dmabuf;
dmabuf->ossfragshift = val & 0xffff;
dmabuf->ossmaxfrags = (val >> 16) & 0xffff;
}
}
if (file->f_mode & FMODE_READ) {
- state = (struct cs_state *)card->states[0];
- if(state)
- {
+ state = card->states[0];
+ if (state) {
dmabuf = &state->dmabuf;
dmabuf->ossfragshift = val & 0xffff;
dmabuf->ossmaxfrags = (val >> 16) & 0xffff;
}
}
return 0;
-
case SNDCTL_DSP_GETOSPACE:
if (!(file->f_mode & FMODE_WRITE))
return -EINVAL;
- state = (struct cs_state *)card->states[1];
- if(state)
- {
+ state = card->states[1];
+ if (state) {
dmabuf = &state->dmabuf;
spin_lock_irqsave(&state->card->lock, flags);
cs_update_ptr(card, CS_TRUE);
@@ -2832,13 +2722,11 @@ static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
return copy_to_user(argp, &abinfo, sizeof(abinfo)) ? -EFAULT : 0;
}
return -ENODEV;
-
case SNDCTL_DSP_GETISPACE:
if (!(file->f_mode & FMODE_READ))
return -EINVAL;
- state = (struct cs_state *)card->states[0];
- if(state)
- {
+ state = card->states[0];
+ if (state) {
dmabuf = &state->dmabuf;
spin_lock_irqsave(&state->card->lock, flags);
cs_update_ptr(card, CS_TRUE);
@@ -2850,48 +2738,39 @@ static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
return copy_to_user(argp, &abinfo, sizeof(abinfo)) ? -EFAULT : 0;
}
return -ENODEV;
-
case SNDCTL_DSP_NONBLOCK:
file->f_flags |= O_NONBLOCK;
return 0;
-
case SNDCTL_DSP_GETCAPS:
return put_user(DSP_CAP_REALTIME|DSP_CAP_TRIGGER|DSP_CAP_MMAP,
p);
-
case SNDCTL_DSP_GETTRIGGER:
val = 0;
CS_DBGOUT(CS_IOCTL, 2, printk("cs46xx: DSP_GETTRIGGER()+\n") );
- if (file->f_mode & FMODE_WRITE)
- {
- state = (struct cs_state *)card->states[1];
- if(state)
- {
+ if (file->f_mode & FMODE_WRITE) {
+ state = card->states[1];
+ if (state) {
dmabuf = &state->dmabuf;
- if(dmabuf->enable & DAC_RUNNING)
+ if (dmabuf->enable & DAC_RUNNING)
val |= PCM_ENABLE_INPUT;
}
}
- if (file->f_mode & FMODE_READ)
- {
- if(state)
- {
- state = (struct cs_state *)card->states[0];
+ if (file->f_mode & FMODE_READ) {
+ if (state) {
+ state = card->states[0];
dmabuf = &state->dmabuf;
- if(dmabuf->enable & ADC_RUNNING)
+ if (dmabuf->enable & ADC_RUNNING)
val |= PCM_ENABLE_OUTPUT;
}
}
CS_DBGOUT(CS_IOCTL, 2, printk("cs46xx: DSP_GETTRIGGER()- val=0x%x\n",val) );
return put_user(val, p);
-
case SNDCTL_DSP_SETTRIGGER:
if (get_user(val, p))
return -EFAULT;
if (file->f_mode & FMODE_READ) {
- state = (struct cs_state *)card->states[0];
- if(state)
- {
+ state = card->states[0];
+ if (state) {
dmabuf = &state->dmabuf;
if (val & PCM_ENABLE_INPUT) {
if (!dmabuf->ready && (ret = prog_dmabuf(state)))
@@ -2902,9 +2781,8 @@ static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
}
}
if (file->f_mode & FMODE_WRITE) {
- state = (struct cs_state *)card->states[1];
- if(state)
- {
+ state = card->states[1];
+ if (state) {
dmabuf = &state->dmabuf;
if (val & PCM_ENABLE_OUTPUT) {
if (!dmabuf->ready && (ret = prog_dmabuf(state)))
@@ -2915,13 +2793,11 @@ static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
}
}
return 0;
-
case SNDCTL_DSP_GETIPTR:
if (!(file->f_mode & FMODE_READ))
return -EINVAL;
- state = (struct cs_state *)card->states[0];
- if(state)
- {
+ state = card->states[0];
+ if (state) {
dmabuf = &state->dmabuf;
spin_lock_irqsave(&state->card->lock, flags);
cs_update_ptr(card, CS_TRUE);
@@ -2934,28 +2810,23 @@ static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
return 0;
}
return -ENODEV;
-
case SNDCTL_DSP_GETOPTR:
if (!(file->f_mode & FMODE_WRITE))
return -EINVAL;
- state = (struct cs_state *)card->states[1];
- if(state)
- {
+ state = card->states[1];
+ if (state) {
dmabuf = &state->dmabuf;
spin_lock_irqsave(&state->card->lock, flags);
cs_update_ptr(card, CS_TRUE);
cinfo.bytes = dmabuf->total_bytes;
- if (dmabuf->mapped)
- {
+ if (dmabuf->mapped) {
cinfo.blocks = (cinfo.bytes >> dmabuf->fragshift)
- dmabuf->blocks;
CS_DBGOUT(CS_PARMS, 8,
printk("total_bytes=%d blocks=%d dmabuf->blocks=%d\n",
cinfo.bytes,cinfo.blocks,dmabuf->blocks) );
dmabuf->blocks = cinfo.bytes >> dmabuf->fragshift;
- }
- else
- {
+ } else {
cinfo.blocks = dmabuf->count >> dmabuf->fragshift;
}
cinfo.ptr = dmabuf->hwptr;
@@ -2969,66 +2840,54 @@ static int cs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, un
return 0;
}
return -ENODEV;
-
case SNDCTL_DSP_SETDUPLEX:
return 0;
-
case SNDCTL_DSP_GETODELAY:
if (!(file->f_mode & FMODE_WRITE))
return -EINVAL;
- state = (struct cs_state *)card->states[1];
- if(state)
- {
+ state = card->states[1];
+ if (state) {
dmabuf = &state->dmabuf;
spin_lock_irqsave(&state->card->lock, flags);
cs_update_ptr(card, CS_TRUE);
val = dmabuf->count;
spin_unlock_irqrestore(&state->card->lock, flags);
- }
- else
+ } else
val = 0;
return put_user(val, p);
-
case SOUND_PCM_READ_RATE:
- if(file->f_mode & FMODE_READ)
- state = (struct cs_state *)card->states[0];
+ if (file->f_mode & FMODE_READ)
+ state = card->states[0];
else
- state = (struct cs_state *)card->states[1];
- if(state)
- {
+ state = card->states[1];
+ if (state) {
dmabuf = &state->dmabuf;
return put_user(dmabuf->rate, p);
}
return put_user(0, p);
-
-
case SOUND_PCM_READ_CHANNELS:
- if(file->f_mode & FMODE_READ)
- state = (struct cs_state *)card->states[0];
+ if (file->f_mode & FMODE_READ)
+ state = card->states[0];
else
- state = (struct cs_state *)card->states[1];
- if(state)
- {
+ state = card->states[1];
+ if (state) {
dmabuf = &state->dmabuf;
return put_user((dmabuf->fmt & CS_FMT_STEREO) ? 2 : 1,
p);
}
return put_user(0, p);
-
case SOUND_PCM_READ_BITS:
- if(file->f_mode & FMODE_READ)
- state = (struct cs_state *)card->states[0];
+ if (file->f_mode & FMODE_READ)
+ state = card->states[0];
else
- state = (struct cs_state *)card->states[1];
- if(state)
- {
+ state = card->states[1];
+ if (state) {
dmabuf = &state->dmabuf;
return put_user((dmabuf->fmt & CS_FMT_16BIT) ?
AFMT_S16_LE : AFMT_U8, p);
}
return put_user(0, p);
-
case SNDCTL_DSP_MAPINBUF:
case SNDCTL_DSP_MAPOUTBUF:
case SNDCTL_DSP_SETSYNCRO:
@@ -3057,18 +2916,15 @@ static void amp_voyetra(struct cs_card *card, int change)
/* Manage the EAPD bit on the Crystal 4297
and the Analog AD1885 */
- int old=card->amplifier;
+ int old = card->amplifier;
card->amplifier+=change;
- if(card->amplifier && !old)
- {
+ if (card->amplifier && !old) {
/* Turn the EAPD amp on */
cs_ac97_set(card->ac97_codec[0], AC97_POWER_CONTROL,
cs_ac97_get(card->ac97_codec[0], AC97_POWER_CONTROL) |
0x8000);
- }
- else if(old && !card->amplifier)
- {
+ } else if(old && !card->amplifier) {
/* Turn the EAPD amp off */
cs_ac97_set(card->ac97_codec[0], AC97_POWER_CONTROL,
cs_ac97_get(card->ac97_codec[0], AC97_POWER_CONTROL) &
@@ -3083,25 +2939,21 @@ static void amp_voyetra(struct cs_card *card, int change)
static void amp_hercules(struct cs_card *card, int change)
{
- int old=card->amplifier;
- if(!card)
- {
+ int old = card->amplifier;
+ if (!card) {
CS_DBGOUT(CS_ERROR, 2, printk(KERN_INFO
"cs46xx: amp_hercules() called before initialized.\n"));
return;
}
card->amplifier+=change;
- if( (card->amplifier && !old) && !(hercules_egpio_disable))
- {
+ if ((card->amplifier && !old) && !(hercules_egpio_disable)) {
CS_DBGOUT(CS_PARMS, 4, printk(KERN_INFO
"cs46xx: amp_hercules() external amp enabled\n"));
cs461x_pokeBA0(card, BA0_EGPIODR,
EGPIODR_GPOE2); /* enable EGPIO2 output */
cs461x_pokeBA0(card, BA0_EGPIOPTR,
EGPIOPTR_GPPT2); /* open-drain on output */
- }
- else if(old && !card->amplifier)
- {
+ } else if (old && !card->amplifier) {
CS_DBGOUT(CS_PARMS, 4, printk(KERN_INFO
"cs46xx: amp_hercules() external amp disabled\n"));
cs461x_pokeBA0(card, BA0_EGPIODR, 0); /* disable */
@@ -3124,31 +2976,28 @@ static void clkrun_hack(struct cs_card *card, int change)
u16 control;
u8 pp;
unsigned long port;
- int old=card->active;
+ int old = card->active;
card->active+=change;
acpi_dev = pci_find_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, NULL);
- if(acpi_dev == NULL)
+ if (acpi_dev == NULL)
return; /* Not a thinkpad thats for sure */
/* Find the control port */
pci_read_config_byte(acpi_dev, 0x41, &pp);
- port=pp<<8;
+ port = pp << 8;
/* Read ACPI port */
- control=inw(port+0x10);
+ control = inw(port + 0x10);
/* Flip CLKRUN off while running */
- if(!card->active && old)
- {
+ if (!card->active && old) {
CS_DBGOUT(CS_PARMS , 9, printk( KERN_INFO
"cs46xx: clkrun() enable clkrun - change=%d active=%d\n",
change,card->active));
outw(control|0x2000, port+0x10);
- }
- else
- {
+ } else {
/*
* sometimes on a resume the bit is set, so always reset the bit.
*/
@@ -3162,20 +3011,19 @@ static void clkrun_hack(struct cs_card *card, int change)
static int cs_open(struct inode *inode, struct file *file)
{
- struct cs_card *card = (struct cs_card *)file->private_data;
+ struct cs_card *card = file->private_data;
struct cs_state *state = NULL;
struct dmabuf *dmabuf = NULL;
struct list_head *entry;
unsigned int minor = iminor(inode);
- int ret=0;
+ int ret = 0;
unsigned int tmp;
CS_DBGOUT(CS_OPEN | CS_FUNCTION, 2, printk("cs46xx: cs_open()+ file=%p %s %s\n",
file, file->f_mode & FMODE_WRITE ? "FMODE_WRITE" : "",
file->f_mode & FMODE_READ ? "FMODE_READ" : "") );
- list_for_each(entry, &cs46xx_devs)
- {
+ list_for_each(entry, &cs46xx_devs) {
card = list_entry(entry, struct cs_card, list);
if (!((card->dev_audio ^ minor) & ~0xf))
@@ -3192,11 +3040,10 @@ static int cs_open(struct inode *inode, struct file *file)
/*
* hardcode state[0] for capture, [1] for playback
*/
- if(file->f_mode & FMODE_READ)
- {
+ if (file->f_mode & FMODE_READ) {
CS_DBGOUT(CS_WAVE_READ, 2, printk("cs46xx: cs_open() FMODE_READ\n") );
if (card->states[0] == NULL) {
- state = card->states[0] = (struct cs_state *)
+ state = card->states[0] =
kmalloc(sizeof(struct cs_state), GFP_KERNEL);
if (state == NULL)
return -ENOMEM;
@@ -3204,36 +3051,32 @@ static int cs_open(struct inode *inode, struct file *file)
mutex_init(&state->sem);
dmabuf = &state->dmabuf;
dmabuf->pbuf = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if(dmabuf->pbuf==NULL)
- {
+ if (dmabuf->pbuf == NULL) {
kfree(state);
- card->states[0]=NULL;
+ card->states[0] = NULL;
return -ENOMEM;
}
- }
- else
- {
+ } else {
state = card->states[0];
- if(state->open_mode & FMODE_READ)
+ if (state->open_mode & FMODE_READ)
return -EBUSY;
}
dmabuf->channel = card->alloc_rec_pcm_channel(card);
if (dmabuf->channel == NULL) {
- kfree (card->states[0]);
+ kfree(card->states[0]);
card->states[0] = NULL;
return -ENODEV;
}
/* Now turn on external AMP if needed */
state->card = card;
- state->card->active_ctrl(state->card,1);
- state->card->amplifier_ctrl(state->card,1);
+ state->card->active_ctrl(state->card, 1);
+ state->card->amplifier_ctrl(state->card, 1);
- if( (tmp = cs46xx_powerup(card, CS_POWER_ADC)) )
- {
+ if ((tmp = cs46xx_powerup(card, CS_POWER_ADC))) {
CS_DBGOUT(CS_ERROR | CS_INIT, 1, printk(KERN_INFO
- "cs46xx: cs46xx_powerup of ADC failed (0x%x)\n",tmp) );
+ "cs46xx: cs46xx_powerup of ADC failed (0x%x)\n", tmp));
return -EIO;
}
@@ -3263,11 +3106,10 @@ static int cs_open(struct inode *inode, struct file *file)
state->open_mode |= FMODE_READ;
mutex_unlock(&state->open_mutex);
}
- if(file->f_mode & FMODE_WRITE)
- {
+ if (file->f_mode & FMODE_WRITE) {
CS_DBGOUT(CS_OPEN, 2, printk("cs46xx: cs_open() FMODE_WRITE\n") );
if (card->states[1] == NULL) {
- state = card->states[1] = (struct cs_state *)
+ state = card->states[1] =
kmalloc(sizeof(struct cs_state), GFP_KERNEL);
if (state == NULL)
return -ENOMEM;
@@ -3275,36 +3117,32 @@ static int cs_open(struct inode *inode, struct file *file)
mutex_init(&state->sem);
dmabuf = &state->dmabuf;
dmabuf->pbuf = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if(dmabuf->pbuf==NULL)
- {
+ if (dmabuf->pbuf == NULL) {
kfree(state);
- card->states[1]=NULL;
+ card->states[1] = NULL;
return -ENOMEM;
}
- }
- else
- {
+ } else {
state = card->states[1];
- if(state->open_mode & FMODE_WRITE)
+ if (state->open_mode & FMODE_WRITE)
return -EBUSY;
}
dmabuf->channel = card->alloc_pcm_channel(card);
if (dmabuf->channel == NULL) {
- kfree (card->states[1]);
+ kfree(card->states[1]);
card->states[1] = NULL;
return -ENODEV;
}
/* Now turn on external AMP if needed */
state->card = card;
- state->card->active_ctrl(state->card,1);
- state->card->amplifier_ctrl(state->card,1);
+ state->card->active_ctrl(state->card, 1);
+ state->card->amplifier_ctrl(state->card, 1);
- if( (tmp = cs46xx_powerup(card, CS_POWER_DAC)) )
- {
+ if ((tmp = cs46xx_powerup(card, CS_POWER_DAC))) {
CS_DBGOUT(CS_ERROR | CS_INIT, 1, printk(KERN_INFO
- "cs46xx: cs46xx_powerup of DAC failed (0x%x)\n",tmp) );
+ "cs46xx: cs46xx_powerup of DAC failed (0x%x)\n", tmp));
return -EIO;
}
@@ -3333,33 +3171,29 @@ static int cs_open(struct inode *inode, struct file *file)
state->open_mode |= FMODE_WRITE;
mutex_unlock(&state->open_mutex);
- if((ret = prog_dmabuf(state)))
+ if ((ret = prog_dmabuf(state)))
return ret;
}
- CS_DBGOUT(CS_OPEN | CS_FUNCTION, 2, printk("cs46xx: cs_open()- 0\n") );
+ CS_DBGOUT(CS_OPEN | CS_FUNCTION, 2, printk("cs46xx: cs_open()- 0\n"));
return nonseekable_open(inode, file);
}
static int cs_release(struct inode *inode, struct file *file)
{
- struct cs_card *card = (struct cs_card *)file->private_data;
+ struct cs_card *card = file->private_data;
struct dmabuf *dmabuf;
struct cs_state *state;
unsigned int tmp;
CS_DBGOUT(CS_RELEASE | CS_FUNCTION, 2, printk("cs46xx: cs_release()+ file=%p %s %s\n",
file, file->f_mode & FMODE_WRITE ? "FMODE_WRITE" : "",
- file->f_mode & FMODE_READ ? "FMODE_READ" : "") );
+ file->f_mode & FMODE_READ ? "FMODE_READ" : ""));
if (!(file->f_mode & (FMODE_WRITE | FMODE_READ)))
- {
return -EINVAL;
- }
state = card->states[1];
- if(state)
- {
- if ( (state->open_mode & FMODE_WRITE) & (file->f_mode & FMODE_WRITE) )
- {
- CS_DBGOUT(CS_RELEASE, 2, printk("cs46xx: cs_release() FMODE_WRITE\n") );
+ if (state) {
+ if ((state->open_mode & FMODE_WRITE) & (file->f_mode & FMODE_WRITE)) {
+ CS_DBGOUT(CS_RELEASE, 2, printk("cs46xx: cs_release() FMODE_WRITE\n"));
dmabuf = &state->dmabuf;
cs_clear_tail(state);
drain_dac(state, file->f_flags & O_NONBLOCK);
@@ -3375,8 +3209,7 @@ static int cs_release(struct inode *inode, struct file *file)
state->card->states[state->virt] = NULL;
state->open_mode &= (~file->f_mode) & (FMODE_READ|FMODE_WRITE);
- if( (tmp = cs461x_powerdown(card, CS_POWER_DAC, CS_FALSE )) )
- {
+ if ((tmp = cs461x_powerdown(card, CS_POWER_DAC, CS_FALSE))) {
CS_DBGOUT(CS_ERROR, 1, printk(KERN_INFO
"cs46xx: cs_release_mixdev() powerdown DAC failure (0x%x)\n",tmp) );
}
@@ -3384,17 +3217,14 @@ static int cs_release(struct inode *inode, struct file *file)
/* Now turn off external AMP if needed */
state->card->amplifier_ctrl(state->card, -1);
state->card->active_ctrl(state->card, -1);
-
kfree(state);
}
}
state = card->states[0];
- if(state)
- {
- if ( (state->open_mode & FMODE_READ) & (file->f_mode & FMODE_READ) )
- {
- CS_DBGOUT(CS_RELEASE, 2, printk("cs46xx: cs_release() FMODE_READ\n") );
+ if (state) {
+ if ((state->open_mode & FMODE_READ) & (file->f_mode & FMODE_READ)) {
+ CS_DBGOUT(CS_RELEASE, 2, printk("cs46xx: cs_release() FMODE_READ\n"));
dmabuf = &state->dmabuf;
mutex_lock(&state->open_mutex);
stop_adc(state);
@@ -3407,8 +3237,7 @@ static int cs_release(struct inode *inode, struct file *file)
state->card->states[state->virt] = NULL;
state->open_mode &= (~file->f_mode) & (FMODE_READ|FMODE_WRITE);
- if( (tmp = cs461x_powerdown(card, CS_POWER_ADC, CS_FALSE )) )
- {
+ if ((tmp = cs461x_powerdown(card, CS_POWER_ADC, CS_FALSE))) {
CS_DBGOUT(CS_ERROR, 1, printk(KERN_INFO
"cs46xx: cs_release_mixdev() powerdown ADC failure (0x%x)\n",tmp) );
}
@@ -3416,12 +3245,11 @@ static int cs_release(struct inode *inode, struct file *file)
/* Now turn off external AMP if needed */
state->card->amplifier_ctrl(state->card, -1);
state->card->active_ctrl(state->card, -1);
-
kfree(state);
}
}
- CS_DBGOUT(CS_FUNCTION | CS_RELEASE, 2, printk("cs46xx: cs_release()- 0\n") );
+ CS_DBGOUT(CS_FUNCTION | CS_RELEASE, 2, printk("cs46xx: cs_release()- 0\n"));
return 0;
}
@@ -3474,21 +3302,18 @@ static void cs46xx_ac97_suspend(struct cs_card *card)
CS_DBGOUT(CS_PM, 9, printk("cs46xx: cs46xx_ac97_suspend()+\n"));
- if(card->states[1])
- {
+ if (card->states[1]) {
stop_dac(card->states[1]);
resync_dma_ptrs(card->states[1]);
}
- if(card->states[0])
- {
+ if (card->states[0]) {
stop_adc(card->states[0]);
resync_dma_ptrs(card->states[0]);
}
- for(Count = 0x2, i=0; (Count <= CS46XX_AC97_HIGHESTREGTORESTORE)
- && (i < CS46XX_AC97_NUMBER_RESTORE_REGS);
- Count += 2, i++)
- {
+ for (Count = 0x2, i = 0; (Count <= CS46XX_AC97_HIGHESTREGTORESTORE)
+ && (i < CS46XX_AC97_NUMBER_RESTORE_REGS);
+ Count += 2, i++) {
card->pm.ac97[i] = cs_ac97_get(dev, BA0_AC97_RESET + Count);
}
/*
@@ -3522,11 +3347,10 @@ static void cs46xx_ac97_suspend(struct cs_card *card)
* well, for now, only power down the DAC/ADC and MIXER VREFON components.
* trouble with removing VREF.
*/
- if( (tmp = cs461x_powerdown(card, CS_POWER_DAC | CS_POWER_ADC |
- CS_POWER_MIXVON, CS_TRUE )) )
- {
+ if ((tmp = cs461x_powerdown(card, CS_POWER_DAC | CS_POWER_ADC |
+ CS_POWER_MIXVON, CS_TRUE))) {
CS_DBGOUT(CS_ERROR | CS_INIT, 1, printk(KERN_INFO
- "cs46xx: cs46xx_ac97_suspend() failure (0x%x)\n",tmp) );
+ "cs46xx: cs46xx_ac97_suspend() failure (0x%x)\n",tmp));
}
CS_DBGOUT(CS_PM, 9, printk("cs46xx: cs46xx_ac97_suspend()-\n"));
@@ -3566,16 +3390,13 @@ static void cs46xx_ac97_resume(struct cs_card *card)
* Restore just the first set of registers, from register number
* 0x02 to the register number that ulHighestRegToRestore specifies.
*/
- for( Count = 0x2, i=0;
- (Count <= CS46XX_AC97_HIGHESTREGTORESTORE)
- && (i < CS46XX_AC97_NUMBER_RESTORE_REGS);
- Count += 2, i++)
- {
+ for (Count = 0x2, i=0; (Count <= CS46XX_AC97_HIGHESTREGTORESTORE) &&
+ (i < CS46XX_AC97_NUMBER_RESTORE_REGS); Count += 2, i++) {
cs_ac97_set(dev, (u8)(BA0_AC97_RESET + Count), (u16)card->pm.ac97[i]);
}
/* Check if we have to init the amplifier */
- if(card->amp_init)
+ if (card->amp_init)
card->amp_init(card);
CS_DBGOUT(CS_PM, 9, printk("cs46xx: cs46xx_ac97_resume()-\n"));
@@ -3585,30 +3406,27 @@ static void cs46xx_ac97_resume(struct cs_card *card)
static int cs46xx_restart_part(struct cs_card *card)
{
struct dmabuf *dmabuf;
+
CS_DBGOUT(CS_PM | CS_FUNCTION, 4,
printk( "cs46xx: cs46xx_restart_part()+\n"));
- if(card->states[1])
- {
+ if (card->states[1]) {
dmabuf = &card->states[1]->dmabuf;
dmabuf->ready = 0;
resync_dma_ptrs(card->states[1]);
cs_set_divisor(dmabuf);
- if(__prog_dmabuf(card->states[1]))
- {
+ if (__prog_dmabuf(card->states[1])) {
CS_DBGOUT(CS_PM | CS_ERROR, 1,
printk("cs46xx: cs46xx_restart_part()- (-1) prog_dmabuf() dac error\n"));
return -1;
}
cs_set_dac_rate(card->states[1], dmabuf->rate);
}
- if(card->states[0])
- {
+ if (card->states[0]) {
dmabuf = &card->states[0]->dmabuf;
dmabuf->ready = 0;
resync_dma_ptrs(card->states[0]);
cs_set_divisor(dmabuf);
- if(__prog_dmabuf(card->states[0]))
- {
+ if (__prog_dmabuf(card->states[0])) {
CS_DBGOUT(CS_PM | CS_ERROR, 1,
printk("cs46xx: cs46xx_restart_part()- (-1) prog_dmabuf() adc error\n"));
return -1;
@@ -3616,17 +3434,17 @@ static int cs46xx_restart_part(struct cs_card *card)
cs_set_adc_rate(card->states[0], dmabuf->rate);
}
card->pm.flags |= CS46XX_PM_RESUMED;
- if(card->states[0])
+ if (card->states[0])
start_adc(card->states[0]);
- if(card->states[1])
+ if (card->states[1])
start_dac(card->states[1]);
card->pm.flags |= CS46XX_PM_IDLE;
card->pm.flags &= ~(CS46XX_PM_SUSPENDING | CS46XX_PM_SUSPENDED
| CS46XX_PM_RESUMING | CS46XX_PM_RESUMED);
- if(card->states[0])
+ if (card->states[0])
wake_up(&card->states[0]->dmabuf.wait);
- if(card->states[1])
+ if (card->states[1])
wake_up(&card->states[1]->dmabuf.wait);
CS_DBGOUT(CS_PM | CS_FUNCTION, 4,
@@ -3634,20 +3452,19 @@ static int cs46xx_restart_part(struct cs_card *card)
return 0;
}
-
static void cs461x_reset(struct cs_card *card);
static void cs461x_proc_stop(struct cs_card *card);
static int cs46xx_suspend(struct cs_card *card, pm_message_t state)
{
unsigned int tmp;
+
CS_DBGOUT(CS_PM | CS_FUNCTION, 4,
printk("cs46xx: cs46xx_suspend()+ flags=0x%x s=%p\n",
(unsigned)card->pm.flags,card));
/*
* check the current state, only suspend if IDLE
*/
- if(!(card->pm.flags & CS46XX_PM_IDLE))
- {
+ if (!(card->pm.flags & CS46XX_PM_IDLE)) {
CS_DBGOUT(CS_PM | CS_ERROR, 2,
printk("cs46xx: cs46xx_suspend() unable to suspend, not IDLE\n"));
return 1;
@@ -3679,13 +3496,11 @@ static int cs46xx_suspend(struct cs_card *card, pm_message_t state)
tmp = cs461x_peek(card, BA1_CCTL);
cs461x_poke(card, BA1_CCTL, tmp & 0xffff0000);
- if(card->states[1])
- {
+ if (card->states[1]) {
card->pm.dmabuf_swptr_play = card->states[1]->dmabuf.swptr;
card->pm.dmabuf_count_play = card->states[1]->dmabuf.count;
}
- if(card->states[0])
- {
+ if (card->states[0]) {
card->pm.dmabuf_swptr_capture = card->states[0]->dmabuf.swptr;
card->pm.dmabuf_count_capture = card->states[0]->dmabuf.count;
}
@@ -3736,8 +3551,7 @@ static int cs46xx_resume(struct cs_card *card)
CS_DBGOUT(CS_PM | CS_FUNCTION, 4,
printk( "cs46xx: cs46xx_resume()+ flags=0x%x\n",
(unsigned)card->pm.flags));
- if(!(card->pm.flags & CS46XX_PM_SUSPENDED))
- {
+ if (!(card->pm.flags & CS46XX_PM_SUSPENDED)) {
CS_DBGOUT(CS_PM | CS_ERROR, 2,
printk("cs46xx: cs46xx_resume() unable to resume, not SUSPENDED\n"));
return 1;
@@ -3747,10 +3561,8 @@ static int cs46xx_resume(struct cs_card *card)
printpm(card);
card->active_ctrl(card, 1);
- for(i=0;i<5;i++)
- {
- if (cs_hardware_init(card) != 0)
- {
+ for (i = 0; i < 5; i++) {
+ if (cs_hardware_init(card) != 0) {
CS_DBGOUT(CS_PM | CS_ERROR, 4, printk(
"cs46xx: cs46xx_resume()- ERROR in cs_hardware_init()\n"));
mdelay(10 * cs_laptop_wait);
@@ -3759,15 +3571,13 @@ static int cs46xx_resume(struct cs_card *card)
}
break;
}
- if(i>=4)
- {
+ if (i >= 4) {
CS_DBGOUT(CS_PM | CS_ERROR, 1, printk(
"cs46xx: cs46xx_resume()- cs_hardware_init() failed, retried %d times.\n",i));
return 0;
}
- if(cs46xx_restart_part(card))
- {
+ if (cs46xx_restart_part(card)) {
CS_DBGOUT(CS_PM | CS_ERROR, 4, printk(
"cs46xx: cs46xx_resume(): cs46xx_restart_part() returned error\n"));
}
@@ -3835,7 +3645,7 @@ static u16 _cs_ac97_get(struct ac97_codec *dev, u8 reg)
/*
* Wait for the read to occur.
*/
- if(!(card->pm.flags & CS46XX_PM_IDLE))
+ if (!(card->pm.flags & CS46XX_PM_IDLE))
loopcnt = 2000;
else
loopcnt = 500 * cs_laptop_wait;
@@ -3866,7 +3676,7 @@ static u16 _cs_ac97_get(struct ac97_codec *dev, u8 reg)
* Wait for the valid status bit to go active.
*/
- if(!(card->pm.flags & CS46XX_PM_IDLE))
+ if (!(card->pm.flags & CS46XX_PM_IDLE))
loopcnt = 2000;
else
loopcnt = 1000;
@@ -3885,7 +3695,7 @@ static u16 _cs_ac97_get(struct ac97_codec *dev, u8 reg)
/*
* Make sure we got valid status.
*/
- if (!( (tmp=cs461x_peekBA0(card, BA0_ACSTS)) & ACSTS_VSTS)) {
+ if (!((tmp = cs461x_peekBA0(card, BA0_ACSTS)) & ACSTS_VSTS)) {
CS_DBGOUT(CS_ERROR, 2, printk(KERN_WARNING
"cs46xx: AC'97 read problem (ACSTS_VSTS), reg = 0x%x val=0x%x 0xffff \n",
reg, tmp));
@@ -3923,12 +3733,9 @@ static void cs_ac97_set(struct ac97_codec *dev, u8 reg, u16 val)
spin_lock(&card->ac97_lock);
- if(reg == AC97_CD_VOL)
- {
+ if (reg == AC97_CD_VOL)
val2 = _cs_ac97_get(dev, AC97_CD_VOL);
- }
-
-
+
/*
* 1. Write ACCAD = Command Address Register = 46Ch for AC97 register address
* 2. Write ACCDA = Command Data Register = 470h for data to write to AC97
@@ -3970,8 +3777,7 @@ static void cs_ac97_set(struct ac97_codec *dev, u8 reg, u16 val)
/*
* Make sure the write completed.
*/
- if (cs461x_peekBA0(card, BA0_ACCTL) & ACCTL_DCV)
- {
+ if (cs461x_peekBA0(card, BA0_ACCTL) & ACCTL_DCV) {
CS_DBGOUT(CS_ERROR, 1, printk(KERN_WARNING
"cs46xx: AC'97 write problem, reg = 0x%x, val = 0x%x\n", reg, val));
}
@@ -3998,25 +3804,23 @@ static void cs_ac97_set(struct ac97_codec *dev, u8 reg, u16 val)
/* CD mute change ? */
- if(reg==AC97_CD_VOL)
- {
+ if (reg == AC97_CD_VOL) {
/* Mute bit change ? */
- if((val2^val)&0x8000 || ((val2 == 0x1f1f || val == 0x1f1f) && val2 != val))
- {
+ if ((val2^val) & 0x8000 ||
+ ((val2 == 0x1f1f || val == 0x1f1f) && val2 != val)) {
/* This is a hack but its cleaner than the alternatives.
Right now card->ac97_codec[0] might be NULL as we are
still doing codec setup. This does an early assignment
to avoid the problem if it occurs */
- if(card->ac97_codec[0]==NULL)
- card->ac97_codec[0]=dev;
+ if (card->ac97_codec[0] == NULL)
+ card->ac97_codec[0] = dev;
/* Mute on */
- if(val&0x8000 || val == 0x1f1f)
+ if (val & 0x8000 || val == 0x1f1f)
card->amplifier_ctrl(card, -1);
- else /* Mute off power on */
- {
- if(card->amp_init)
+ else { /* Mute off power on */
+ if (card->amp_init)
card->amp_init(card);
card->amplifier_ctrl(card, 1);
}
@@ -4024,46 +3828,41 @@ static void cs_ac97_set(struct ac97_codec *dev, u8 reg, u16 val)
}
}
-
/* OSS /dev/mixer file operation methods */
static int cs_open_mixdev(struct inode *inode, struct file *file)
{
- int i=0;
+ int i = 0;
unsigned int minor = iminor(inode);
- struct cs_card *card=NULL;
+ struct cs_card *card = NULL;
struct list_head *entry;
unsigned int tmp;
CS_DBGOUT(CS_FUNCTION | CS_OPEN, 4,
printk(KERN_INFO "cs46xx: cs_open_mixdev()+\n"));
- list_for_each(entry, &cs46xx_devs)
- {
+ list_for_each(entry, &cs46xx_devs) {
card = list_entry(entry, struct cs_card, list);
for (i = 0; i < NR_AC97; i++)
if (card->ac97_codec[i] != NULL &&
card->ac97_codec[i]->dev_mixer == minor)
goto match;
}
- if (!card)
- {
+ if (!card) {
CS_DBGOUT(CS_FUNCTION | CS_OPEN | CS_ERROR, 2,
printk(KERN_INFO "cs46xx: cs46xx_open_mixdev()- -ENODEV\n"));
return -ENODEV;
}
match:
- if(!card->ac97_codec[i])
+ if (!card->ac97_codec[i])
return -ENODEV;
file->private_data = card->ac97_codec[i];
card->active_ctrl(card,1);
- if(!CS_IN_USE(&card->mixer_use_cnt))
- {
- if( (tmp = cs46xx_powerup(card, CS_POWER_MIXVON )) )
- {
+ if (!CS_IN_USE(&card->mixer_use_cnt)) {
+ if ((tmp = cs46xx_powerup(card, CS_POWER_MIXVON))) {
CS_DBGOUT(CS_ERROR | CS_INIT, 1, printk(KERN_INFO
- "cs46xx: cs_open_mixdev() powerup failure (0x%x)\n",tmp) );
+ "cs46xx: cs_open_mixdev() powerup failure (0x%x)\n", tmp));
return -EIO;
}
}
@@ -4077,7 +3876,7 @@ static int cs_open_mixdev(struct inode *inode, struct file *file)
static int cs_release_mixdev(struct inode *inode, struct file *file)
{
unsigned int minor = iminor(inode);
- struct cs_card *card=NULL;
+ struct cs_card *card = NULL;
struct list_head *entry;
int i;
unsigned int tmp;
@@ -4092,15 +3891,13 @@ static int cs_release_mixdev(struct inode *inode, struct file *file)
card->ac97_codec[i]->dev_mixer == minor)
goto match;
}
- if (!card)
- {
+ if (!card) {
CS_DBGOUT(CS_FUNCTION | CS_OPEN | CS_ERROR, 2,
printk(KERN_INFO "cs46xx: cs46xx_open_mixdev()- -ENODEV\n"));
return -ENODEV;
}
match:
- if(!CS_DEC_AND_TEST(&card->mixer_use_cnt))
- {
+ if (!CS_DEC_AND_TEST(&card->mixer_use_cnt)) {
CS_DBGOUT(CS_FUNCTION | CS_RELEASE, 4,
printk(KERN_INFO "cs46xx: cs_release_mixdev()- no powerdown, usecnt>0\n"));
card->active_ctrl(card, -1);
@@ -4110,10 +3907,9 @@ match:
/*
* ok, no outstanding mixer opens, so powerdown.
*/
- if( (tmp = cs461x_powerdown(card, CS_POWER_MIXVON, CS_FALSE )) )
- {
+ if ((tmp = cs461x_powerdown(card, CS_POWER_MIXVON, CS_FALSE))) {
CS_DBGOUT(CS_ERROR | CS_INIT, 1, printk(KERN_INFO
- "cs46xx: cs_release_mixdev() powerdown MIXVON failure (0x%x)\n",tmp) );
+ "cs46xx: cs_release_mixdev() powerdown MIXVON failure (0x%x)\n", tmp));
card->active_ctrl(card, -1);
card->amplifier_ctrl(card, -1);
return -EIO;
@@ -4126,76 +3922,60 @@ match:
}
static int cs_ioctl_mixdev(struct inode *inode, struct file *file, unsigned int cmd,
- unsigned long arg)
+ unsigned long arg)
{
- struct ac97_codec *codec = (struct ac97_codec *)file->private_data;
- struct cs_card *card=NULL;
+ struct ac97_codec *codec = file->private_data;
+ struct cs_card *card = NULL;
struct list_head *entry;
unsigned long __user *p = (long __user *)arg;
-
#if CSDEBUG_INTERFACE
int val;
- if( (cmd == SOUND_MIXER_CS_GETDBGMASK) ||
+ if ( (cmd == SOUND_MIXER_CS_GETDBGMASK) ||
(cmd == SOUND_MIXER_CS_SETDBGMASK) ||
(cmd == SOUND_MIXER_CS_GETDBGLEVEL) ||
(cmd == SOUND_MIXER_CS_SETDBGLEVEL) ||
- (cmd == SOUND_MIXER_CS_APM))
- {
- switch(cmd)
- {
-
+ (cmd == SOUND_MIXER_CS_APM)) {
+ switch (cmd) {
case SOUND_MIXER_CS_GETDBGMASK:
return put_user(cs_debugmask, p);
-
case SOUND_MIXER_CS_GETDBGLEVEL:
return put_user(cs_debuglevel, p);
-
case SOUND_MIXER_CS_SETDBGMASK:
if (get_user(val, p))
return -EFAULT;
cs_debugmask = val;
return 0;
-
case SOUND_MIXER_CS_SETDBGLEVEL:
if (get_user(val, p))
return -EFAULT;
cs_debuglevel = val;
return 0;
-
case SOUND_MIXER_CS_APM:
if (get_user(val, p))
return -EFAULT;
- if(val == CS_IOCTL_CMD_SUSPEND)
- {
- list_for_each(entry, &cs46xx_devs)
- {
+ if (val == CS_IOCTL_CMD_SUSPEND) {
+ list_for_each(entry, &cs46xx_devs) {
card = list_entry(entry, struct cs_card, list);
cs46xx_suspend(card, PMSG_ON);
}
- }
- else if(val == CS_IOCTL_CMD_RESUME)
- {
- list_for_each(entry, &cs46xx_devs)
- {
+ } else if (val == CS_IOCTL_CMD_RESUME) {
+ list_for_each(entry, &cs46xx_devs) {
card = list_entry(entry, struct cs_card, list);
cs46xx_resume(card);
}
- }
- else
- {
+ } else {
CS_DBGOUT(CS_ERROR, 1, printk(KERN_INFO
"cs46xx: mixer_ioctl(): invalid APM cmd (%d)\n",
val));
}
return 0;
-
default:
CS_DBGOUT(CS_ERROR, 1, printk(KERN_INFO
- "cs46xx: mixer_ioctl(): ERROR unknown debug cmd\n") );
+ "cs46xx: mixer_ioctl(): ERROR unknown debug cmd\n"));
return 0;
- }
+ }
}
#endif
return codec->mixer_ioctl(codec, cmd, arg);
@@ -4232,8 +4012,7 @@ static int __init cs_ac97_init(struct cs_card *card)
codec->codec_read = cs_ac97_get;
codec->codec_write = cs_ac97_set;
- if (ac97_probe_codec(codec) == 0)
- {
+ if (ac97_probe_codec(codec) == 0) {
CS_DBGOUT(CS_FUNCTION | CS_INIT, 2, printk(KERN_INFO
"cs46xx: cs_ac97_init()- codec number %d not found\n",
num_ac97) );
@@ -4241,12 +4020,11 @@ static int __init cs_ac97_init(struct cs_card *card)
break;
}
CS_DBGOUT(CS_FUNCTION | CS_INIT, 2, printk(KERN_INFO
- "cs46xx: cs_ac97_init() found codec %d\n",num_ac97) );
+ "cs46xx: cs_ac97_init() found codec %d\n",num_ac97));
eid = cs_ac97_get(codec, AC97_EXTENDED_ID);
- if(eid==0xFFFF)
- {
+ if (eid == 0xFFFF) {
printk(KERN_WARNING "cs46xx: codec %d not present\n",num_ac97);
ac97_release_codec(codec);
break;
@@ -4285,27 +4063,23 @@ static void cs461x_download_image(struct cs_card *card)
{
unsigned i, j, temp1, temp2, offset, count;
unsigned char __iomem *pBA1 = ioremap(card->ba1_addr, 0x40000);
- for( i=0; i < CLEAR__COUNT; i++)
- {
+ for (i = 0; i < CLEAR__COUNT; i++) {
offset = ClrStat[i].BA1__DestByteOffset;
count = ClrStat[i].BA1__SourceSize;
- for( temp1 = offset; temp1<(offset+count); temp1+=4 )
+ for (temp1 = offset; temp1 < (offset + count); temp1 += 4)
writel(0, pBA1+temp1);
}
- for(i=0; i<FILL__COUNT; i++)
- {
+ for (i = 0; i < FILL__COUNT; i++) {
temp2 = FillStat[i].Offset;
- for(j=0; j<(FillStat[i].Size)/4; j++)
- {
+ for (j = 0; j < (FillStat[i].Size) / 4; j++) {
temp1 = (FillStat[i]).pFill[j];
- writel(temp1, pBA1+temp2+j*4);
+ writel(temp1, pBA1+temp2 + j * 4);
}
}
iounmap(pBA1);
}
-
/*
* Chip reset
*/
@@ -4365,15 +4139,13 @@ static void cs461x_clear_serial_FIFOs(struct cs_card *card, int type)
* playing or capturing then we don't want to put in 128 bytes of
* "noise".
*/
- if(type & CS_TYPE_DAC)
- {
+ if (type & CS_TYPE_DAC) {
startfifo = 128;
endfifo = 256;
}
- if(type & CS_TYPE_ADC)
- {
+ if (type & CS_TYPE_ADC) {
startfifo = 0;
- if(!endfifo)
+ if (!endfifo)
endfifo = 128;
}
/*
@@ -4417,8 +4189,7 @@ static int cs461x_powerdown(struct cs_card *card, unsigned int type, int suspend
CS_DBGOUT(CS_FUNCTION, 4, printk(KERN_INFO
"cs46xx: cs461x_powerdown()+ type=0x%x\n",type));
- if(!cs_powerdown && !suspendflag)
- {
+ if (!cs_powerdown && !suspendflag) {
CS_DBGOUT(CS_FUNCTION, 8, printk(KERN_INFO
"cs46xx: cs461x_powerdown() DISABLED exiting\n"));
return 0;
@@ -4432,12 +4203,11 @@ static int cs461x_powerdown(struct cs_card *card, unsigned int type, int suspend
* currently powered down. If powering down DAC and ADC, then
* it is possible to power down the VREF (ON).
*/
- if ( ((type & CS_POWER_MIXVON) &&
- (!(type & CS_POWER_ADC) || (!(type & CS_POWER_DAC))) )
+ if (((type & CS_POWER_MIXVON) &&
+ (!(type & CS_POWER_ADC) || (!(type & CS_POWER_DAC))))
&&
((tmp & CS_AC97_POWER_CONTROL_ADC_ON) ||
- (tmp & CS_AC97_POWER_CONTROL_DAC_ON) ) )
- {
+ (tmp & CS_AC97_POWER_CONTROL_DAC_ON))) {
CS_DBGOUT(CS_FUNCTION, 8, printk(KERN_INFO
"cs46xx: cs461x_powerdown()- 0 unable to powerdown. tmp=0x%x\n",tmp));
return 0;
@@ -4452,8 +4222,7 @@ static int cs461x_powerdown(struct cs_card *card, unsigned int type, int suspend
/*
* Power down indicated areas.
*/
- if(type & CS_POWER_MIXVOFF)
- {
+ if (type & CS_POWER_MIXVOFF) {
CS_DBGOUT(CS_FUNCTION, 4,
printk(KERN_INFO "cs46xx: cs461x_powerdown()+ MIXVOFF\n"));
@@ -4461,12 +4230,10 @@ static int cs461x_powerdown(struct cs_card *card, unsigned int type, int suspend
* Power down the MIXER (VREF ON) on the AC97 card.
*/
tmp = cs_ac97_get(card->ac97_codec[0], AC97_POWER_CONTROL);
- if (tmp & CS_AC97_POWER_CONTROL_MIXVOFF_ON)
- {
- if(!muted)
- {
+ if (tmp & CS_AC97_POWER_CONTROL_MIXVOFF_ON) {
+ if (!muted) {
cs_mute(card, CS_TRUE);
- muted=1;
+ muted = 1;
}
tmp |= CS_AC97_POWER_CONTROL_MIXVOFF;
cs_ac97_set(card->ac97_codec[0], AC97_POWER_CONTROL, tmp );
@@ -4492,16 +4259,14 @@ static int cs461x_powerdown(struct cs_card *card, unsigned int type, int suspend
* Check the status..
*/
if (cs_ac97_get(card->ac97_codec[0], AC97_POWER_CONTROL) &
- CS_AC97_POWER_CONTROL_MIXVOFF_ON)
- {
+ CS_AC97_POWER_CONTROL_MIXVOFF_ON) {
CS_DBGOUT(CS_ERROR, 1, printk(KERN_WARNING
"cs46xx: powerdown MIXVOFF failed\n"));
return 1;
}
}
}
- if(type & CS_POWER_MIXVON)
- {
+ if (type & CS_POWER_MIXVON) {
CS_DBGOUT(CS_FUNCTION, 4,
printk(KERN_INFO "cs46xx: cs461x_powerdown()+ MIXVON\n"));
@@ -4509,15 +4274,13 @@ static int cs461x_powerdown(struct cs_card *card, unsigned int type, int suspend
* Power down the MIXER (VREF ON) on the AC97 card.
*/
tmp = cs_ac97_get(card->ac97_codec[0], AC97_POWER_CONTROL);
- if (tmp & CS_AC97_POWER_CONTROL_MIXVON_ON)
- {
- if(!muted)
- {
+ if (tmp & CS_AC97_POWER_CONTROL_MIXVON_ON) {
+ if (!muted) {
cs_mute(card, CS_TRUE);
- muted=1;
+ muted = 1;
}
tmp |= CS_AC97_POWER_CONTROL_MIXVON;
- cs_ac97_set(card->ac97_codec[0], AC97_POWER_CONTROL, tmp );
+ cs_ac97_set(card->ac97_codec[0], AC97_POWER_CONTROL, tmp);
/*
* Now, we wait until we sample a ready state.
*/
@@ -4540,30 +4303,26 @@ static int cs461x_powerdown(struct cs_card *card, unsigned int type, int suspend
* Check the status..
*/
if (cs_ac97_get(card->ac97_codec[0], AC97_POWER_CONTROL) &
- CS_AC97_POWER_CONTROL_MIXVON_ON)
- {
+ CS_AC97_POWER_CONTROL_MIXVON_ON) {
CS_DBGOUT(CS_ERROR, 1, printk(KERN_WARNING
"cs46xx: powerdown MIXVON failed\n"));
return 1;
}
}
}
- if(type & CS_POWER_ADC)
- {
+ if (type & CS_POWER_ADC) {
/*
* Power down the ADC on the AC97 card.
*/
CS_DBGOUT(CS_FUNCTION, 4, printk(KERN_INFO "cs46xx: cs461x_powerdown()+ ADC\n"));
tmp = cs_ac97_get(card->ac97_codec[0], AC97_POWER_CONTROL);
- if (tmp & CS_AC97_POWER_CONTROL_ADC_ON)
- {
- if(!muted)
- {
+ if (tmp & CS_AC97_POWER_CONTROL_ADC_ON) {
+ if (!muted) {
cs_mute(card, CS_TRUE);
- muted=1;
+ muted = 1;
}
tmp |= CS_AC97_POWER_CONTROL_ADC;
- cs_ac97_set(card->ac97_codec[0], AC97_POWER_CONTROL, tmp );
+ cs_ac97_set(card->ac97_codec[0], AC97_POWER_CONTROL, tmp);
/*
* Now, we wait until we sample a ready state.
@@ -4587,16 +4346,14 @@ static int cs461x_powerdown(struct cs_card *card, unsigned int type, int suspend
* Check the status..
*/
if (cs_ac97_get(card->ac97_codec[0], AC97_POWER_CONTROL) &
- CS_AC97_POWER_CONTROL_ADC_ON)
- {
+ CS_AC97_POWER_CONTROL_ADC_ON) {
CS_DBGOUT(CS_ERROR, 1, printk(KERN_WARNING
"cs46xx: powerdown ADC failed\n"));
return 1;
}
}
}
- if(type & CS_POWER_DAC)
- {
+ if (type & CS_POWER_DAC) {
/*
* Power down the DAC on the AC97 card.
*/
@@ -4604,15 +4361,13 @@ static int cs461x_powerdown(struct cs_card *card, unsigned int type, int suspend
CS_DBGOUT(CS_FUNCTION, 4,
printk(KERN_INFO "cs46xx: cs461x_powerdown()+ DAC\n"));
tmp = cs_ac97_get(card->ac97_codec[0], AC97_POWER_CONTROL);
- if (tmp & CS_AC97_POWER_CONTROL_DAC_ON)
- {
- if(!muted)
- {
+ if (tmp & CS_AC97_POWER_CONTROL_DAC_ON) {
+ if (!muted) {
cs_mute(card, CS_TRUE);
- muted=1;
+ muted = 1;
}
tmp |= CS_AC97_POWER_CONTROL_DAC;
- cs_ac97_set(card->ac97_codec[0], AC97_POWER_CONTROL, tmp );
+ cs_ac97_set(card->ac97_codec[0], AC97_POWER_CONTROL, tmp);
/*
* Now, we wait until we sample a ready state.
*/
@@ -4635,8 +4390,7 @@ static int cs461x_powerdown(struct cs_card *card, unsigned int type, int suspend
* Check the status..
*/
if (cs_ac97_get(card->ac97_codec[0], AC97_POWER_CONTROL) &
- CS_AC97_POWER_CONTROL_DAC_ON)
- {
+ CS_AC97_POWER_CONTROL_DAC_ON) {
CS_DBGOUT(CS_ERROR, 1, printk(KERN_WARNING
"cs46xx: powerdown DAC failed\n"));
return 1;
@@ -4644,7 +4398,7 @@ static int cs461x_powerdown(struct cs_card *card, unsigned int type, int suspend
}
}
tmp = cs_ac97_get(card->ac97_codec[0], AC97_POWER_CONTROL);
- if(muted)
+ if (muted)
cs_mute(card, CS_FALSE);
CS_DBGOUT(CS_FUNCTION, 4, printk(KERN_INFO
"cs46xx: cs461x_powerdown()- 0 tmp=0x%x\n",tmp));
@@ -4654,23 +4408,22 @@ static int cs461x_powerdown(struct cs_card *card, unsigned int type, int suspend
static int cs46xx_powerup(struct cs_card *card, unsigned int type)
{
int count;
- unsigned int tmp=0,muted=0;
+ unsigned int tmp = 0, muted = 0;
CS_DBGOUT(CS_FUNCTION, 8, printk(KERN_INFO
"cs46xx: cs46xx_powerup()+ type=0x%x\n",type));
/*
* check for VREF and powerup if need to.
*/
- if(type & CS_POWER_MIXVON)
+ if (type & CS_POWER_MIXVON)
type |= CS_POWER_MIXVOFF;
- if(type & (CS_POWER_DAC | CS_POWER_ADC))
+ if (type & (CS_POWER_DAC | CS_POWER_ADC))
type |= CS_POWER_MIXVON | CS_POWER_MIXVOFF;
/*
* Power up indicated areas.
*/
- if(type & CS_POWER_MIXVOFF)
- {
+ if (type & CS_POWER_MIXVOFF) {
CS_DBGOUT(CS_FUNCTION, 4,
printk(KERN_INFO "cs46xx: cs46xx_powerup()+ MIXVOFF\n"));
@@ -4678,12 +4431,10 @@ static int cs46xx_powerup(struct cs_card *card, unsigned int type)
* Power up the MIXER (VREF ON) on the AC97 card.
*/
tmp = cs_ac97_get(card->ac97_codec[0], AC97_POWER_CONTROL);
- if (!(tmp & CS_AC97_POWER_CONTROL_MIXVOFF_ON))
- {
- if(!muted)
- {
+ if (!(tmp & CS_AC97_POWER_CONTROL_MIXVOFF_ON)) {
+ if (!muted) {
cs_mute(card, CS_TRUE);
- muted=1;
+ muted = 1;
}
tmp &= ~CS_AC97_POWER_CONTROL_MIXVOFF;
cs_ac97_set(card->ac97_codec[0], AC97_POWER_CONTROL, tmp );
@@ -4709,16 +4460,14 @@ static int cs46xx_powerup(struct cs_card *card, unsigned int type)
* Check the status..
*/
if (!(cs_ac97_get(card->ac97_codec[0], AC97_POWER_CONTROL) &
- CS_AC97_POWER_CONTROL_MIXVOFF_ON))
- {
+ CS_AC97_POWER_CONTROL_MIXVOFF_ON)) {
CS_DBGOUT(CS_ERROR, 1, printk(KERN_WARNING
"cs46xx: powerup MIXVOFF failed\n"));
return 1;
}
}
}
- if(type & CS_POWER_MIXVON)
- {
+ if(type & CS_POWER_MIXVON) {
CS_DBGOUT(CS_FUNCTION, 4,
printk(KERN_INFO "cs46xx: cs46xx_powerup()+ MIXVON\n"));
@@ -4726,12 +4475,10 @@ static int cs46xx_powerup(struct cs_card *card, unsigned int type)
* Power up the MIXER (VREF ON) on the AC97 card.
*/
tmp = cs_ac97_get(card->ac97_codec[0], AC97_POWER_CONTROL);
- if (!(tmp & CS_AC97_POWER_CONTROL_MIXVON_ON))
- {
- if(!muted)
- {
+ if (!(tmp & CS_AC97_POWER_CONTROL_MIXVON_ON)) {
+ if (!muted) {
cs_mute(card, CS_TRUE);
- muted=1;
+ muted = 1;
}
tmp &= ~CS_AC97_POWER_CONTROL_MIXVON;
cs_ac97_set(card->ac97_codec[0], AC97_POWER_CONTROL, tmp );
@@ -4757,27 +4504,23 @@ static int cs46xx_powerup(struct cs_card *card, unsigned int type)
* Check the status..
*/
if (!(cs_ac97_get(card->ac97_codec[0], AC97_POWER_CONTROL) &
- CS_AC97_POWER_CONTROL_MIXVON_ON))
- {
+ CS_AC97_POWER_CONTROL_MIXVON_ON)) {
CS_DBGOUT(CS_ERROR, 1, printk(KERN_WARNING
"cs46xx: powerup MIXVON failed\n"));
return 1;
}
}
}
- if(type & CS_POWER_ADC)
- {
+ if (type & CS_POWER_ADC) {
/*
* Power up the ADC on the AC97 card.
*/
CS_DBGOUT(CS_FUNCTION, 4, printk(KERN_INFO "cs46xx: cs46xx_powerup()+ ADC\n"));
tmp = cs_ac97_get(card->ac97_codec[0], AC97_POWER_CONTROL);
- if (!(tmp & CS_AC97_POWER_CONTROL_ADC_ON))
- {
- if(!muted)
- {
+ if (!(tmp & CS_AC97_POWER_CONTROL_ADC_ON)) {
+ if (!muted) {
cs_mute(card, CS_TRUE);
- muted=1;
+ muted = 1;
}
tmp &= ~CS_AC97_POWER_CONTROL_ADC;
cs_ac97_set(card->ac97_codec[0], AC97_POWER_CONTROL, tmp );
@@ -4804,16 +4547,14 @@ static int cs46xx_powerup(struct cs_card *card, unsigned int type)
* Check the status..
*/
if (!(cs_ac97_get(card->ac97_codec[0], AC97_POWER_CONTROL) &
- CS_AC97_POWER_CONTROL_ADC_ON))
- {
+ CS_AC97_POWER_CONTROL_ADC_ON)) {
CS_DBGOUT(CS_ERROR, 1, printk(KERN_WARNING
"cs46xx: powerup ADC failed\n"));
return 1;
}
}
}
- if(type & CS_POWER_DAC)
- {
+ if (type & CS_POWER_DAC) {
/*
* Power up the DAC on the AC97 card.
*/
@@ -4821,12 +4562,10 @@ static int cs46xx_powerup(struct cs_card *card, unsigned int type)
CS_DBGOUT(CS_FUNCTION, 4,
printk(KERN_INFO "cs46xx: cs46xx_powerup()+ DAC\n"));
tmp = cs_ac97_get(card->ac97_codec[0], AC97_POWER_CONTROL);
- if (!(tmp & CS_AC97_POWER_CONTROL_DAC_ON))
- {
- if(!muted)
- {
+ if (!(tmp & CS_AC97_POWER_CONTROL_DAC_ON)) {
+ if (!muted) {
cs_mute(card, CS_TRUE);
- muted=1;
+ muted = 1;
}
tmp &= ~CS_AC97_POWER_CONTROL_DAC;
cs_ac97_set(card->ac97_codec[0], AC97_POWER_CONTROL, tmp );
@@ -4852,8 +4591,7 @@ static int cs46xx_powerup(struct cs_card *card, unsigned int type)
* Check the status..
*/
if (!(cs_ac97_get(card->ac97_codec[0], AC97_POWER_CONTROL) &
- CS_AC97_POWER_CONTROL_DAC_ON))
- {
+ CS_AC97_POWER_CONTROL_DAC_ON)) {
CS_DBGOUT(CS_ERROR, 1, printk(KERN_WARNING
"cs46xx: powerup DAC failed\n"));
return 1;
@@ -4861,14 +4599,13 @@ static int cs46xx_powerup(struct cs_card *card, unsigned int type)
}
}
tmp = cs_ac97_get(card->ac97_codec[0], AC97_POWER_CONTROL);
- if(muted)
+ if (muted)
cs_mute(card, CS_FALSE);
CS_DBGOUT(CS_FUNCTION, 4, printk(KERN_INFO
"cs46xx: cs46xx_powerup()- 0 tmp=0x%x\n",tmp));
return 0;
}
-
static void cs461x_proc_start(struct cs_card *card)
{
int cnt;
@@ -4965,7 +4702,7 @@ static int cs_hardware_init(struct cs_card *card)
* is not enough for some platforms! tested on an IBM Thinkpads and
* reference cards.
*/
- if(!(card->pm.flags & CS46XX_PM_IDLE))
+ if (!(card->pm.flags & CS46XX_PM_IDLE))
mdelay(initdelay);
/*
* Write the selected clock control setup to the hardware. Do not turn on
@@ -5017,8 +4754,7 @@ static int cs_hardware_init(struct cs_card *card)
* If we are resuming under 2.2.x then we can not schedule a timeout.
* so, just spin the CPU.
*/
- if(card->pm.flags & CS46XX_PM_IDLE)
- {
+ if (card->pm.flags & CS46XX_PM_IDLE) {
/*
* Wait for the card ready signal from the AC97 card.
*/
@@ -5033,9 +4769,7 @@ static int cs_hardware_init(struct cs_card *card)
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(1);
} while (time_before(jiffies, end_time));
- }
- else
- {
+ } else {
for (count = 0; count < 100; count++) {
// First, we want to wait for a short time.
udelay(25 * cs_laptop_wait);
@@ -5064,8 +4798,7 @@ static int cs_hardware_init(struct cs_card *card)
*/
cs461x_pokeBA0(card, BA0_ACCTL, ACCTL_VFRM | ACCTL_ESYN | ACCTL_RSTN);
- if(card->pm.flags & CS46XX_PM_IDLE)
- {
+ if (card->pm.flags & CS46XX_PM_IDLE) {
/*
* Wait until we've sampled input slots 3 and 4 as valid, meaning that
* the card is pumping ADC data across the AC-link.
@@ -5081,9 +4814,7 @@ static int cs_hardware_init(struct cs_card *card)
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(1);
} while (time_before(jiffies, end_time));
- }
- else
- {
+ } else {
for (count = 0; count < 100; count++) {
// First, we want to wait for a short time.
udelay(25 * cs_laptop_wait);
@@ -5140,17 +4871,13 @@ static int cs_hardware_init(struct cs_card *card)
cs461x_poke(card, BA1_CCTL, tmp & 0xffff0000);
/* initialize AC97 codec and register /dev/mixer */
- if(card->pm.flags & CS46XX_PM_IDLE)
- {
- if (cs_ac97_init(card) <= 0)
- {
+ if (card->pm.flags & CS46XX_PM_IDLE) {
+ if (cs_ac97_init(card) <= 0) {
CS_DBGOUT(CS_ERROR | CS_INIT, 1, printk(KERN_INFO
- "cs46xx: cs_ac97_init() failure\n") );
+ "cs46xx: cs_ac97_init() failure\n"));
return -EIO;
}
- }
- else
- {
+ } else {
cs46xx_ac97_resume(card);
}
@@ -5174,23 +4901,17 @@ static int cs_hardware_init(struct cs_card *card)
* If IDLE then Power down the part. We will power components up
* when we need them.
*/
- if(card->pm.flags & CS46XX_PM_IDLE)
- {
- if(!cs_powerdown)
- {
- if( (tmp = cs46xx_powerup(card, CS_POWER_DAC | CS_POWER_ADC |
- CS_POWER_MIXVON )) )
- {
+ if (card->pm.flags & CS46XX_PM_IDLE) {
+ if (!cs_powerdown) {
+ if ((tmp = cs46xx_powerup(card, CS_POWER_DAC | CS_POWER_ADC |
+ CS_POWER_MIXVON))) {
CS_DBGOUT(CS_ERROR | CS_INIT, 1, printk(KERN_INFO
"cs46xx: cs461x_powerup() failure (0x%x)\n",tmp) );
return -EIO;
}
- }
- else
- {
- if( (tmp = cs461x_powerdown(card, CS_POWER_DAC | CS_POWER_ADC |
- CS_POWER_MIXVON, CS_FALSE )) )
- {
+ } else {
+ if ((tmp = cs461x_powerdown(card, CS_POWER_DAC | CS_POWER_ADC |
+ CS_POWER_MIXVON, CS_FALSE))) {
CS_DBGOUT(CS_ERROR | CS_INIT, 1, printk(KERN_INFO
"cs46xx: cs461x_powerdown() failure (0x%x)\n",tmp) );
return -EIO;
@@ -5310,14 +5031,13 @@ MODULE_AUTHOR("Alan Cox <alan@redhat.com>, Jaroslav Kysela, <pcaudio@crystal.cir
MODULE_DESCRIPTION("Crystal SoundFusion Audio Support");
MODULE_LICENSE("GPL");
-
static const char cs46xx_banner[] = KERN_INFO "Crystal 4280/46xx + AC97 Audio, version " CS46XX_MAJOR_VERSION "." CS46XX_MINOR_VERSION "." CS46XX_ARCH ", " __TIME__ " " __DATE__ "\n";
static const char fndmsg[] = KERN_INFO "cs46xx: Found %d audio device(s).\n";
static int __devinit cs46xx_probe(struct pci_dev *pci_dev,
const struct pci_device_id *pciid)
{
- int i,j;
+ int i, j;
u16 ss_card, ss_vendor;
struct cs_card *card;
dma_addr_t dma_mask;
@@ -5378,42 +5098,35 @@ static int __devinit cs46xx_probe(struct pci_dev *pci_dev,
while (cp->name)
{
- if(cp->vendor == ss_vendor && cp->id == ss_card)
- {
+ if (cp->vendor == ss_vendor && cp->id == ss_card) {
card->amplifier_ctrl = cp->amp;
- if(cp->active)
+ if (cp->active)
card->active_ctrl = cp->active;
- if(cp->amp_init)
+ if (cp->amp_init)
card->amp_init = cp->amp_init;
break;
}
cp++;
}
- if (cp->name==NULL)
- {
+ if (cp->name == NULL) {
printk(KERN_INFO "cs46xx: Unknown card (%04X:%04X) at 0x%08lx/0x%08lx, IRQ %d\n",
ss_vendor, ss_card, card->ba0_addr, card->ba1_addr, card->irq);
- }
- else
- {
+ } else {
printk(KERN_INFO "cs46xx: %s (%04X:%04X) at 0x%08lx/0x%08lx, IRQ %d\n",
cp->name, ss_vendor, ss_card, card->ba0_addr, card->ba1_addr, card->irq);
}
- if (card->amplifier_ctrl==NULL)
- {
+ if (card->amplifier_ctrl == NULL) {
card->amplifier_ctrl = amp_none;
card->active_ctrl = clkrun_hack;
}
- if (external_amp == 1)
- {
+ if (external_amp == 1) {
printk(KERN_INFO "cs46xx: Crystal EAPD support forced on.\n");
card->amplifier_ctrl = amp_voyetra;
}
- if (thinkpad == 1)
- {
+ if (thinkpad == 1) {
printk(KERN_INFO "cs46xx: Activating CLKRUN hack for Thinkpad.\n");
card->active_ctrl = clkrun_hack;
}
@@ -5425,13 +5138,11 @@ static int __devinit cs46xx_probe(struct pci_dev *pci_dev,
* and mdelay kernel code is replaced by a pm timer, or the delays
* work well for battery and/or AC power both.
*/
- if(card->active_ctrl == clkrun_hack)
- {
+ if (card->active_ctrl == clkrun_hack) {
initdelay = 2100;
cs_laptop_wait = 5;
}
- if((card->active_ctrl == clkrun_hack) && !(powerdown == 1))
- {
+ if ((card->active_ctrl == clkrun_hack) && !(powerdown == 1)) {
/*
* for some currently unknown reason, powering down the DAC and ADC component
* blocks on thinkpads causes some funky behavior... distoorrrtion and ac97
@@ -5440,7 +5151,7 @@ static int __devinit cs46xx_probe(struct pci_dev *pci_dev,
*/
cs_powerdown = 0;
}
- if(powerdown == 0)
+ if (powerdown == 0)
cs_powerdown = 0;
card->active_ctrl(card, 1);
@@ -5461,7 +5172,7 @@ static int __devinit cs46xx_probe(struct pci_dev *pci_dev,
card->ba1.name.pmem,
card->ba1.name.reg) );
- if(card->ba0 == 0 || card->ba1.name.data0 == 0 ||
+ if (card->ba0 == 0 || card->ba1.name.data0 == 0 ||
card->ba1.name.data1 == 0 || card->ba1.name.pmem == 0 ||
card->ba1.name.reg == 0)
goto fail2;
@@ -5477,14 +5188,12 @@ static int __devinit cs46xx_probe(struct pci_dev *pci_dev,
}
/* register /dev/midi */
- if((card->dev_midi = register_sound_midi(&cs_midi_fops, -1)) < 0)
+ if ((card->dev_midi = register_sound_midi(&cs_midi_fops, -1)) < 0)
printk(KERN_ERR "cs46xx: unable to register midi\n");
card->pm.flags |= CS46XX_PM_IDLE;
- for(i=0;i<5;i++)
- {
- if (cs_hardware_init(card) != 0)
- {
+ for (i = 0; i < 5; i++) {
+ if (cs_hardware_init(card) != 0) {
CS_DBGOUT(CS_ERROR, 4, printk(
"cs46xx: ERROR in cs_hardware_init()... retrying\n"));
for (j = 0; j < NR_AC97; j++)
@@ -5497,12 +5206,11 @@ static int __devinit cs46xx_probe(struct pci_dev *pci_dev,
}
break;
}
- if(i>=4)
- {
+ if(i >= 4) {
CS_DBGOUT(CS_PM | CS_ERROR, 1, printk(
"cs46xx: cs46xx_probe()- cs_hardware_init() failed, retried %d times.\n",i));
unregister_sound_dsp(card->dev_audio);
- if(card->dev_midi)
+ if (card->dev_midi)
unregister_sound_midi(card->dev_midi);
goto fail;
}
@@ -5518,7 +5226,7 @@ static int __devinit cs46xx_probe(struct pci_dev *pci_dev,
* Check if we have to init the amplifier, but probably already done
* since the CD logic in the ac97 init code will turn on the ext amp.
*/
- if(cp->amp_init)
+ if (cp->amp_init)
cp->amp_init(card);
card->active_ctrl(card, -1);
@@ -5536,15 +5244,15 @@ static int __devinit cs46xx_probe(struct pci_dev *pci_dev,
fail:
free_irq(card->irq, card);
fail2:
- if(card->ba0)
+ if (card->ba0)
iounmap(card->ba0);
- if(card->ba1.name.data0)
+ if (card->ba1.name.data0)
iounmap(card->ba1.name.data0);
- if(card->ba1.name.data1)
+ if (card->ba1.name.data1)
iounmap(card->ba1.name.data1);
- if(card->ba1.name.pmem)
+ if (card->ba1.name.pmem)
iounmap(card->ba1.name.pmem);
- if(card->ba1.name.reg)
+ if (card->ba1.name.reg)
iounmap(card->ba1.name.reg);
kfree(card);
CS_DBGOUT(CS_INIT | CS_ERROR, 1, printk(KERN_INFO
@@ -5598,9 +5306,8 @@ static void __devexit cs46xx_remove(struct pci_dev *pci_dev)
* Power down the DAC and ADC. We will power them up (if) when we need
* them.
*/
- if( (tmp = cs461x_powerdown(card, CS_POWER_DAC | CS_POWER_ADC |
- CS_POWER_MIXVON, CS_TRUE )) )
- {
+ if ((tmp = cs461x_powerdown(card, CS_POWER_DAC | CS_POWER_ADC |
+ CS_POWER_MIXVON, CS_TRUE))) {
CS_DBGOUT(CS_ERROR | CS_INIT, 1, printk(KERN_INFO
"cs46xx: cs461x_powerdown() failure (0x%x)\n",tmp) );
}
@@ -5634,7 +5341,7 @@ static void __devexit cs46xx_remove(struct pci_dev *pci_dev)
ac97_release_codec(card->ac97_codec[i]);
}
unregister_sound_dsp(card->dev_audio);
- if(card->dev_midi)
+ if (card->dev_midi)
unregister_sound_midi(card->dev_midi);
list_del(&card->list);
kfree(card);
@@ -5693,8 +5400,7 @@ static int __init cs46xx_init_module(void)
"cs46xx: cs46xx_init_module()+ \n"));
rtn = pci_register_driver(&cs46xx_pci_driver);
- if(rtn == -ENODEV)
- {
+ if (rtn == -ENODEV) {
CS_DBGOUT(CS_ERROR | CS_INIT, 1, printk(
"cs46xx: Unable to detect valid cs46xx device\n"));
}