aboutsummaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/Kbuild10
-rw-r--r--include/linux/acpi.h39
-rw-r--r--include/linux/adfs_fs.h13
-rw-r--r--include/linux/adfs_fs_i.h24
-rw-r--r--include/linux/adfs_fs_sb.h38
-rw-r--r--include/linux/agp_backend.h2
-rw-r--r--include/linux/agpgart.h14
-rw-r--r--include/linux/amba/bus.h2
-rw-r--r--include/linux/amba/pl022.h264
-rw-r--r--include/linux/amba/pl061.h15
-rw-r--r--include/linux/amba/serial.h4
-rw-r--r--include/linux/arcdevice.h9
-rw-r--r--include/linux/async_tx.h9
-rw-r--r--include/linux/ata.h91
-rw-r--r--include/linux/ath9k_platform.h28
-rw-r--r--include/linux/atmel-mci.h2
-rw-r--r--include/linux/atmlec.h5
-rw-r--r--include/linux/atmmpc.h45
-rw-r--r--include/linux/audit.h9
-rw-r--r--include/linux/auto_dev-ioctl.h7
-rw-r--r--include/linux/auto_fs.h7
-rw-r--r--include/linux/backing-dev.h12
-rw-r--r--include/linux/binfmts.h17
-rw-r--r--include/linux/bio.h160
-rw-r--r--include/linux/bitops.h19
-rw-r--r--include/linux/blkdev.h309
-rw-r--r--include/linux/blktrace_api.h50
-rw-r--r--include/linux/bootmem.h42
-rw-r--r--include/linux/bsg.h10
-rw-r--r--include/linux/buffer_head.h24
-rw-r--r--include/linux/bug.h12
-rw-r--r--include/linux/c2port.h3
-rw-r--r--include/linux/can/Kbuild1
-rw-r--r--include/linux/can/dev.h70
-rw-r--r--include/linux/can/netlink.h113
-rw-r--r--include/linux/can/platform/sja1000.h35
-rw-r--r--include/linux/capability.h23
-rw-r--r--include/linux/cb710.h231
-rw-r--r--include/linux/cdev.h2
-rw-r--r--include/linux/cgroup.h142
-rw-r--r--include/linux/clk.h30
-rw-r--r--include/linux/clockchips.h9
-rw-r--r--include/linux/clocksource.h146
-rw-r--r--include/linux/cm4000_cs.h10
-rw-r--r--include/linux/cn_proc.h20
-rw-r--r--include/linux/coda_psdev.h15
-rw-r--r--include/linux/com20020.h1
-rw-r--r--include/linux/compat.h16
-rw-r--r--include/linux/compiler-gcc3.h6
-rw-r--r--include/linux/compiler-gcc4.h6
-rw-r--r--include/linux/compiler.h18
-rw-r--r--include/linux/connector.h12
-rw-r--r--include/linux/console.h4
-rw-r--r--include/linux/cpu.h21
-rw-r--r--include/linux/cpumask.h15
-rw-r--r--include/linux/cpuset.h50
-rw-r--r--include/linux/cramfs_fs.h3
-rw-r--r--include/linux/cred.h1
-rw-r--r--include/linux/crypto.h4
-rw-r--r--include/linux/cyclades.h43
-rw-r--r--include/linux/dcache.h13
-rw-r--r--include/linux/dccp.h51
-rw-r--r--include/linux/debug_locks.h8
-rw-r--r--include/linux/debugfs.h8
-rw-r--r--include/linux/decompress/bunzip2.h10
-rw-r--r--include/linux/decompress/generic.h33
-rw-r--r--include/linux/decompress/inflate.h13
-rw-r--r--include/linux/decompress/mm.h87
-rw-r--r--include/linux/decompress/unlzma.h12
-rw-r--r--include/linux/device-mapper.h6
-rw-r--r--include/linux/device.h40
-rw-r--r--include/linux/dlm.h4
-rw-r--r--include/linux/dlm_netlink.h18
-rw-r--r--include/linux/dm-dirty-log.h13
-rw-r--r--include/linux/dm-ioctl.h42
-rw-r--r--include/linux/dma-debug.h181
-rw-r--r--include/linux/dma-mapping.h93
-rw-r--r--include/linux/dma_remapping.h1
-rw-r--r--include/linux/dmaengine.h36
-rw-r--r--include/linux/dmar.h66
-rw-r--r--include/linux/dmi.h7
-rw-r--r--include/linux/dnotify.h29
-rw-r--r--include/linux/ds1wm.h12
-rw-r--r--include/linux/dst.h587
-rw-r--r--include/linux/dvb/audio.h2
-rw-r--r--include/linux/dvb/video.h24
-rw-r--r--include/linux/dw_dmac.h19
-rw-r--r--include/linux/dynamic_debug.h88
-rw-r--r--include/linux/dynamic_printk.h93
-rw-r--r--include/linux/efi.h2
-rw-r--r--include/linux/eisa.h4
-rw-r--r--include/linux/elevator.h5
-rw-r--r--include/linux/elfcore.h9
-rw-r--r--include/linux/errqueue.h1
-rw-r--r--include/linux/etherdevice.h48
-rw-r--r--include/linux/ethtool.h112
-rw-r--r--include/linux/eventfd.h12
-rw-r--r--include/linux/eventpoll.h1
-rw-r--r--include/linux/ext3_fs.h6
-rw-r--r--include/linux/fb.h33
-rw-r--r--include/linux/fdtable.h4
-rw-r--r--include/linux/fiemap.h2
-rw-r--r--include/linux/firmware-map.h12
-rw-r--r--include/linux/firmware.h1
-rw-r--r--include/linux/fs.h393
-rw-r--r--include/linux/fs_enet_pd.h6
-rw-r--r--include/linux/fs_struct.h7
-rw-r--r--include/linux/fscache-cache.h505
-rw-r--r--include/linux/fscache.h618
-rw-r--r--include/linux/fsl_devices.h33
-rw-r--r--include/linux/fsnotify.h199
-rw-r--r--include/linux/fsnotify_backend.h387
-rw-r--r--include/linux/ftrace.h258
-rw-r--r--include/linux/ftrace_event.h172
-rw-r--r--include/linux/ftrace_irq.h2
-rw-r--r--include/linux/fuse.h31
-rw-r--r--include/linux/futex.h6
-rw-r--r--include/linux/gameport.h3
-rw-r--r--include/linux/gcd.h8
-rw-r--r--include/linux/gen_stats.h2
-rw-r--r--include/linux/genhd.h10
-rw-r--r--include/linux/gfp.h175
-rw-r--r--include/linux/hardirq.h75
-rw-r--r--include/linux/hdlc.h5
-rw-r--r--include/linux/hdlcdrv.h3
-rw-r--r--include/linux/hdreg.h66
-rw-r--r--include/linux/hid.h59
-rw-r--r--include/linux/highmem.h21
-rw-r--r--include/linux/hrtimer.h12
-rw-r--r--include/linux/hugetlb.h7
-rw-r--r--include/linux/i2c-algo-pca.h33
-rw-r--r--include/linux/i2c-algo-sgi.h26
-rw-r--r--include/linux/i2c-id.h38
-rw-r--r--include/linux/i2c-ocores.h2
-rw-r--r--include/linux/i2c-pca-platform.h2
-rw-r--r--include/linux/i2c.h74
-rw-r--r--include/linux/i2c/at24.h4
-rw-r--r--include/linux/i2c/lm8323.h46
-rw-r--r--include/linux/i2c/pca953x.h1
-rw-r--r--include/linux/i2c/s6000.h10
-rw-r--r--include/linux/i2c/twl4030.h47
-rw-r--r--include/linux/i7300_idle.h20
-rw-r--r--include/linux/ibmtr.h2
-rw-r--r--include/linux/ide.h534
-rw-r--r--include/linux/idr.h1
-rw-r--r--include/linux/ieee80211.h255
-rw-r--r--include/linux/if.h4
-rw-r--r--include/linux/if_arcnet.h27
-rw-r--r--include/linux/if_arp.h2
-rw-r--r--include/linux/if_ether.h11
-rw-r--r--include/linux/if_frad.h1
-rw-r--r--include/linux/if_packet.h21
-rw-r--r--include/linux/if_pppol2tp.h2
-rw-r--r--include/linux/if_pppox.h20
-rw-r--r--include/linux/if_tun.h3
-rw-r--r--include/linux/if_tunnel.h18
-rw-r--r--include/linux/if_vlan.h6
-rw-r--r--include/linux/ima.h64
-rw-r--r--include/linux/in.h1
-rw-r--r--include/linux/in6.h2
-rw-r--r--include/linux/inetdevice.h1
-rw-r--r--include/linux/init.h17
-rw-r--r--include/linux/init_task.h43
-rw-r--r--include/linux/input.h28
-rw-r--r--include/linux/intel-iommu.h27
-rw-r--r--include/linux/interrupt.h130
-rw-r--r--include/linux/iocontext.h6
-rw-r--r--include/linux/iommu.h13
-rw-r--r--include/linux/ioport.h2
-rw-r--r--include/linux/ip_vs.h26
-rw-r--r--include/linux/ipc_namespace.h68
-rw-r--r--include/linux/ipmi.h2
-rw-r--r--include/linux/ipmi_msgdefs.h8
-rw-r--r--include/linux/ipmi_smi.h2
-rw-r--r--include/linux/ipv6.h6
-rw-r--r--include/linux/irq.h154
-rw-r--r--include/linux/irqflags.h8
-rw-r--r--include/linux/irqnr.h9
-rw-r--r--include/linux/irqreturn.h30
-rw-r--r--include/linux/isdn/capilli.h2
-rw-r--r--include/linux/ivtv.h10
-rw-r--r--include/linux/ivtvfb.h2
-rw-r--r--include/linux/jbd.h10
-rw-r--r--include/linux/jbd2.h15
-rw-r--r--include/linux/jffs2.h27
-rw-r--r--include/linux/kallsyms.h15
-rw-r--r--include/linux/kernel.h190
-rw-r--r--include/linux/kernel_stat.h30
-rw-r--r--include/linux/key.h1
-rw-r--r--include/linux/keyboard.h1
-rw-r--r--include/linux/kmemcheck.h153
-rw-r--r--include/linux/kmemleak.h96
-rw-r--r--include/linux/kmemtrace.h25
-rw-r--r--include/linux/kmod.h11
-rw-r--r--include/linux/kobject.h3
-rw-r--r--include/linux/kprobes.h74
-rw-r--r--include/linux/kvm.h161
-rw-r--r--include/linux/kvm_host.h83
-rw-r--r--include/linux/kvm_types.h36
-rw-r--r--include/linux/latencytop.h10
-rw-r--r--include/linux/leds-bd2802.h26
-rw-r--r--include/linux/leds.h4
-rw-r--r--include/linux/leds_pwm.h21
-rw-r--r--include/linux/lguest.h4
-rw-r--r--include/linux/lguest_launcher.h3
-rw-r--r--include/linux/libata.h18
-rw-r--r--include/linux/linux_logo.h16
-rw-r--r--include/linux/lis3lv02d.h39
-rw-r--r--include/linux/list_nulls.h18
-rw-r--r--include/linux/lm_interface.h277
-rw-r--r--include/linux/lockd/lockd.h2
-rw-r--r--include/linux/lockd/xdr.h12
-rw-r--r--include/linux/lockd/xdr4.h10
-rw-r--r--include/linux/lockdep.h67
-rw-r--r--include/linux/loop.h4
-rw-r--r--include/linux/lsm_audit.h111
-rw-r--r--include/linux/mISDNdsp.h4
-rw-r--r--include/linux/mISDNhw.h12
-rw-r--r--include/linux/mISDNif.h19
-rw-r--r--include/linux/magic.h4
-rw-r--r--include/linux/major.h2
-rw-r--r--include/linux/maple.h62
-rw-r--r--include/linux/matroxfb.h2
-rw-r--r--include/linux/mdio.h356
-rw-r--r--include/linux/memcontrol.h40
-rw-r--r--include/linux/memory.h17
-rw-r--r--include/linux/mfd/ab3100.h103
-rw-r--r--include/linux/mfd/asic3.h236
-rw-r--r--include/linux/mfd/ds1wm.h6
-rw-r--r--include/linux/mfd/ezx-pcap.h256
-rw-r--r--include/linux/mfd/htc-pasic3.h1
-rw-r--r--include/linux/mfd/pcf50633/core.h2
-rw-r--r--include/linux/mfd/pcf50633/mbc.h1
-rw-r--r--include/linux/mfd/tmio.h7
-rw-r--r--include/linux/mfd/wm8350/audio.h1
-rw-r--r--include/linux/mfd/wm8350/core.h2
-rw-r--r--include/linux/mfd/wm8400-audio.h1
-rw-r--r--include/linux/mg_disk.h45
-rw-r--r--include/linux/mii.h34
-rw-r--r--include/linux/miscdevice.h2
-rw-r--r--include/linux/mlx4/cmd.h1
-rw-r--r--include/linux/mlx4/device.h7
-rw-r--r--include/linux/mlx4/qp.h1
-rw-r--r--include/linux/mm.h57
-rw-r--r--include/linux/mm_types.h14
-rw-r--r--include/linux/mman.h9
-rw-r--r--include/linux/mmc/host.h5
-rw-r--r--include/linux/mmc/sdio_ids.h10
-rw-r--r--include/linux/mmiotrace.h80
-rw-r--r--include/linux/mmzone.h76
-rw-r--r--include/linux/mnt_namespace.h12
-rw-r--r--include/linux/mod_devicetable.h9
-rw-r--r--include/linux/module.h100
-rw-r--r--include/linux/moduleparam.h50
-rw-r--r--include/linux/mount.h26
-rw-r--r--include/linux/mpage.h10
-rw-r--r--include/linux/mroute.h18
-rw-r--r--include/linux/mroute6.h2
-rw-r--r--include/linux/msi.h13
-rw-r--r--include/linux/mtd/mtd.h21
-rw-r--r--include/linux/mtd/nand.h20
-rw-r--r--include/linux/mtd/onenand.h24
-rw-r--r--include/linux/mtd/onenand_regs.h20
-rw-r--r--include/linux/mtd/partitions.h13
-rw-r--r--include/linux/mtd/ubi.h37
-rw-r--r--include/linux/mutex.h6
-rw-r--r--include/linux/namei.h6
-rw-r--r--include/linux/ncp_fs.h2
-rw-r--r--include/linux/ncp_no.h26
-rw-r--r--include/linux/neighbour.h1
-rw-r--r--include/linux/net.h8
-rw-r--r--include/linux/net_dropmon.h65
-rw-r--r--include/linux/net_tstamp.h104
-rw-r--r--include/linux/netdevice.h355
-rw-r--r--include/linux/netfilter/Kbuild9
-rw-r--r--include/linux/netfilter/nf_conntrack_common.h69
-rw-r--r--include/linux/netfilter/nf_conntrack_tcp.h13
-rw-r--r--include/linux/netfilter/nfnetlink.h10
-rw-r--r--include/linux/netfilter/nfnetlink_compat.h7
-rw-r--r--include/linux/netfilter/nfnetlink_conntrack.h2
-rw-r--r--include/linux/netfilter/nfnetlink_log.h32
-rw-r--r--include/linux/netfilter/nfnetlink_queue.h24
-rw-r--r--include/linux/netfilter/x_tables.h141
-rw-r--r--include/linux/netfilter/xt_CLASSIFY.h4
-rw-r--r--include/linux/netfilter/xt_CONNMARK.h8
-rw-r--r--include/linux/netfilter/xt_CONNSECMARK.h4
-rw-r--r--include/linux/netfilter/xt_DSCP.h7
-rw-r--r--include/linux/netfilter/xt_LED.h15
-rw-r--r--include/linux/netfilter/xt_MARK.h6
-rw-r--r--include/linux/netfilter/xt_NFLOG.h12
-rw-r--r--include/linux/netfilter/xt_NFQUEUE.h9
-rw-r--r--include/linux/netfilter/xt_RATEEST.h6
-rw-r--r--include/linux/netfilter/xt_SECMARK.h6
-rw-r--r--include/linux/netfilter/xt_TCPMSS.h4
-rw-r--r--include/linux/netfilter/xt_cluster.h17
-rw-r--r--include/linux/netfilter/xt_connbytes.h6
-rw-r--r--include/linux/netfilter/xt_connmark.h8
-rw-r--r--include/linux/netfilter/xt_conntrack.h12
-rw-r--r--include/linux/netfilter/xt_dccp.h14
-rw-r--r--include/linux/netfilter/xt_dscp.h12
-rw-r--r--include/linux/netfilter/xt_esp.h6
-rw-r--r--include/linux/netfilter/xt_hashlimit.h32
-rw-r--r--include/linux/netfilter/xt_iprange.h4
-rw-r--r--include/linux/netfilter/xt_length.h6
-rw-r--r--include/linux/netfilter/xt_limit.h17
-rw-r--r--include/linux/netfilter/xt_mark.h8
-rw-r--r--include/linux/netfilter/xt_multiport.h18
-rw-r--r--include/linux/netfilter/xt_osf.h133
-rw-r--r--include/linux/netfilter/xt_owner.h8
-rw-r--r--include/linux/netfilter/xt_physdev.h6
-rw-r--r--include/linux/netfilter/xt_policy.h14
-rw-r--r--include/linux/netfilter/xt_quota.h4
-rw-r--r--include/linux/netfilter/xt_rateest.h14
-rw-r--r--include/linux/netfilter/xt_realm.h8
-rw-r--r--include/linux/netfilter/xt_recent.h12
-rw-r--r--include/linux/netfilter/xt_sctp.h36
-rw-r--r--include/linux/netfilter/xt_socket.h12
-rw-r--r--include/linux/netfilter/xt_statistic.h19
-rw-r--r--include/linux/netfilter/xt_string.h12
-rw-r--r--include/linux/netfilter/xt_tcpmss.h6
-rw-r--r--include/linux/netfilter/xt_tcpudp.h20
-rw-r--r--include/linux/netfilter_bridge.h4
-rw-r--r--include/linux/netfilter_ipv4/ipt_owner.h8
-rw-r--r--include/linux/netfilter_ipv6/Kbuild1
-rw-r--r--include/linux/netfilter_ipv6/ip6t_owner.h8
-rw-r--r--include/linux/netlink.h2
-rw-r--r--include/linux/netpoll.h13
-rw-r--r--include/linux/nfs.h6
-rw-r--r--include/linux/nfs2.h7
-rw-r--r--include/linux/nfs3.h5
-rw-r--r--include/linux/nfs4.h156
-rw-r--r--include/linux/nfs_fs.h19
-rw-r--r--include/linux/nfs_fs_sb.h81
-rw-r--r--include/linux/nfs_iostat.h12
-rw-r--r--include/linux/nfs_xdr.h232
-rw-r--r--include/linux/nfsd/cache.h11
-rw-r--r--include/linux/nfsd/export.h6
-rw-r--r--include/linux/nfsd/nfsd.h225
-rw-r--r--include/linux/nfsd/nfsfh.h14
-rw-r--r--include/linux/nfsd/state.h115
-rw-r--r--include/linux/nfsd/stats.h9
-rw-r--r--include/linux/nfsd/xdr4.h135
-rw-r--r--include/linux/nilfs2_fs.h801
-rw-r--r--include/linux/nl80211.h433
-rw-r--r--include/linux/nl802154.h119
-rw-r--r--include/linux/nls.h54
-rw-r--r--include/linux/nodemask.h19
-rw-r--r--include/linux/notifier.h1
-rw-r--r--include/linux/nsproxy.h1
-rw-r--r--include/linux/nubus.h2
-rw-r--r--include/linux/of.h3
-rw-r--r--include/linux/of_mdio.h22
-rw-r--r--include/linux/of_platform.h10
-rw-r--r--include/linux/page-debug-flags.h30
-rw-r--r--include/linux/page-flags.h58
-rw-r--r--include/linux/page_cgroup.h31
-rw-r--r--include/linux/pagemap.h25
-rw-r--r--include/linux/pagevec.h1
-rw-r--r--include/linux/parport.h4
-rw-r--r--include/linux/parport_pc.h11
-rw-r--r--include/linux/pci-acpi.h67
-rw-r--r--include/linux/pci.h110
-rw-r--r--include/linux/pci_hotplug.h23
-rw-r--r--include/linux/pci_ids.h117
-rw-r--r--include/linux/pci_regs.h42
-rw-r--r--include/linux/pcieport_if.h36
-rw-r--r--include/linux/pda_power.h2
-rw-r--r--include/linux/percpu-defs.h84
-rw-r--r--include/linux/percpu.h196
-rw-r--r--include/linux/perf_counter.h728
-rw-r--r--include/linux/pfkeyv2.h242
-rw-r--r--include/linux/phy.h19
-rw-r--r--include/linux/pim.h4
-rw-r--r--include/linux/pipe_fs_i.h6
-rw-r--r--include/linux/pktcdvd.h1
-rw-r--r--include/linux/platform_device.h52
-rw-r--r--include/linux/plist.h9
-rw-r--r--include/linux/pm.h22
-rw-r--r--include/linux/pnp.h2
-rw-r--r--include/linux/poison.h3
-rw-r--r--include/linux/poll.h3
-rw-r--r--include/linux/power_supply.h2
-rw-r--r--include/linux/ppp_channel.h6
-rw-r--r--include/linux/ppp_defs.h4
-rw-r--r--include/linux/pps.h122
-rw-r--r--include/linux/pps_kernel.h89
-rw-r--r--include/linux/prctl.h3
-rw-r--r--include/linux/proc_fs.h28
-rw-r--r--include/linux/ptrace.h12
-rw-r--r--include/linux/pwm.h2
-rw-r--r--include/linux/qnx4_fs.h61
-rw-r--r--include/linux/quota.h11
-rw-r--r--include/linux/quotaops.h139
-rw-r--r--include/linux/radix-tree.h2
-rw-r--r--include/linux/raid/bitmap.h288
-rw-r--r--include/linux/raid/linear.h31
-rw-r--r--include/linux/raid/md.h81
-rw-r--r--include/linux/raid/md_k.h402
-rw-r--r--include/linux/raid/md_p.h2
-rw-r--r--include/linux/raid/md_u.h35
-rw-r--r--include/linux/raid/multipath.h42
-rw-r--r--include/linux/raid/pq.h132
-rw-r--r--include/linux/raid/raid0.h30
-rw-r--r--include/linux/raid/raid1.h134
-rw-r--r--include/linux/raid/raid10.h123
-rw-r--r--include/linux/raid/raid5.h402
-rw-r--r--include/linux/raid/xor.h2
-rw-r--r--include/linux/rational.h19
-rw-r--r--include/linux/rcuclassic.h16
-rw-r--r--include/linux/rculist.h30
-rw-r--r--include/linux/rcupdate.h1
-rw-r--r--include/linux/rcupreempt.h53
-rw-r--r--include/linux/rcutree.h37
-rw-r--r--include/linux/rds.h250
-rw-r--r--include/linux/regulator/bq24022.h3
-rw-r--r--include/linux/regulator/consumer.h6
-rw-r--r--include/linux/regulator/driver.h82
-rw-r--r--include/linux/regulator/fixed.h3
-rw-r--r--include/linux/regulator/lp3971.h51
-rw-r--r--include/linux/regulator/machine.h12
-rw-r--r--include/linux/regulator/max1586.h63
-rw-r--r--include/linux/regulator/userspace-consumer.h25
-rw-r--r--include/linux/reiserfs_acl.h22
-rw-r--r--include/linux/reiserfs_fs.h265
-rw-r--r--include/linux/reiserfs_fs_i.h4
-rw-r--r--include/linux/reiserfs_fs_sb.h36
-rw-r--r--include/linux/reiserfs_xattr.h159
-rw-r--r--include/linux/res_counter.h2
-rw-r--r--include/linux/rfkill.h410
-rw-r--r--include/linux/ring_buffer.h106
-rw-r--r--include/linux/rmap.h12
-rw-r--r--include/linux/romfs_fs.h5
-rw-r--r--include/linux/rotary_encoder.h15
-rw-r--r--include/linux/rtc-v3020.h6
-rw-r--r--include/linux/rtc.h6
-rw-r--r--include/linux/rtnetlink.h5
-rw-r--r--include/linux/sched.h269
-rw-r--r--include/linux/sctp.h102
-rw-r--r--include/linux/seccomp.h2
-rw-r--r--include/linux/security.h39
-rw-r--r--include/linux/selinux_netlink.h6
-rw-r--r--include/linux/seq_file.h10
-rw-r--r--include/linux/serial.h116
-rw-r--r--include/linux/serial_core.h14
-rw-r--r--include/linux/serial_max3100.h52
-rw-r--r--include/linux/serial_reg.h1
-rw-r--r--include/linux/serial_sci.h3
-rw-r--r--include/linux/serio.h9
-rw-r--r--include/linux/sh_intc.h1
-rw-r--r--include/linux/sh_timer.h13
-rw-r--r--include/linux/sht15.h24
-rw-r--r--include/linux/signal.h2
-rw-r--r--include/linux/skbuff.h200
-rw-r--r--include/linux/slab.h11
-rw-r--r--include/linux/slab_def.h149
-rw-r--r--include/linux/slob_def.h14
-rw-r--r--include/linux/slow-work.h95
-rw-r--r--include/linux/slub_def.h72
-rw-r--r--include/linux/smp.h14
-rw-r--r--include/linux/smsc911x.h13
-rw-r--r--include/linux/snmp.h10
-rw-r--r--include/linux/socket.h20
-rw-r--r--include/linux/sockios.h3
-rw-r--r--include/linux/sonypi.h8
-rw-r--r--include/linux/spi/ad7879.h35
-rw-r--r--include/linux/spi/ads7846.h2
-rw-r--r--include/linux/spi/eeprom.h6
-rw-r--r--include/linux/spi/libertas_spi.h29
-rw-r--r--include/linux/spi/spi.h41
-rw-r--r--include/linux/spi/spi_gpio.h6
-rw-r--r--include/linux/spi/wl12xx.h31
-rw-r--r--include/linux/spinlock.h6
-rw-r--r--include/linux/spinlock_up.h1
-rw-r--r--include/linux/splice.h15
-rw-r--r--include/linux/ssb/ssb.h4
-rw-r--r--include/linux/ssb/ssb_driver_chipcommon.h224
-rw-r--r--include/linux/ssb/ssb_regs.h36
-rw-r--r--include/linux/stackprotector.h16
-rw-r--r--include/linux/stacktrace.h3
-rw-r--r--include/linux/string.h17
-rw-r--r--include/linux/stringify.h4
-rw-r--r--include/linux/sunrpc/bc_xprt.h49
-rw-r--r--include/linux/sunrpc/clnt.h1
-rw-r--r--include/linux/sunrpc/sched.h3
-rw-r--r--include/linux/sunrpc/svc.h36
-rw-r--r--include/linux/sunrpc/svc_xprt.h59
-rw-r--r--include/linux/sunrpc/svcsock.h9
-rw-r--r--include/linux/sunrpc/xdr.h42
-rw-r--r--include/linux/sunrpc/xprt.h41
-rw-r--r--include/linux/suspend.h57
-rw-r--r--include/linux/suspend_ioctls.h11
-rw-r--r--include/linux/swap.h59
-rw-r--r--include/linux/swiotlb.h44
-rw-r--r--include/linux/synclink.h1
-rw-r--r--include/linux/syscalls.h75
-rw-r--r--include/linux/sysctl.h1
-rw-r--r--include/linux/tcp.h27
-rw-r--r--include/linux/thermal.h48
-rw-r--r--include/linux/thread_info.h3
-rw-r--r--include/linux/tick.h3
-rw-r--r--include/linux/time.h23
-rw-r--r--include/linux/timecompare.h125
-rw-r--r--include/linux/timer.h118
-rw-r--r--include/linux/timeriomem-rng.h21
-rw-r--r--include/linux/times.h8
-rw-r--r--include/linux/timex.h47
-rw-r--r--include/linux/topology.h17
-rw-r--r--include/linux/tpm.h35
-rw-r--r--include/linux/trace_clock.h19
-rw-r--r--include/linux/trace_seq.h94
-rw-r--r--include/linux/tracehook.h28
-rw-r--r--include/linux/tracepoint.h118
-rw-r--r--include/linux/tty.h18
-rw-r--r--include/linux/tty_driver.h12
-rw-r--r--include/linux/types.h21
-rw-r--r--include/linux/ucb1400.h23
-rw-r--r--include/linux/uio_driver.h4
-rw-r--r--include/linux/ultrasound.h2
-rw-r--r--include/linux/usb.h230
-rw-r--r--include/linux/usb/audio.h265
-rw-r--r--include/linux/usb/cdc.h3
-rw-r--r--include/linux/usb/ch9.h200
-rw-r--r--include/linux/usb/composite.h11
-rw-r--r--include/linux/usb/gadget.h6
-rw-r--r--include/linux/usb/langwell_otg.h177
-rw-r--r--include/linux/usb/langwell_udc.h310
-rw-r--r--include/linux/usb/musb.h5
-rw-r--r--include/linux/usb/otg.h8
-rw-r--r--include/linux/usb/quirks.h3
-rw-r--r--include/linux/usb/r8a66597.h44
-rw-r--r--include/linux/usb/rndis_host.h85
-rw-r--r--include/linux/usb/serial.h52
-rw-r--r--include/linux/usb/usbnet.h6
-rw-r--r--include/linux/usb/wusb.h3
-rw-r--r--include/linux/usb_usual.h21
-rw-r--r--include/linux/utime.h4
-rw-r--r--include/linux/utsname.h12
-rw-r--r--include/linux/video_decoder.h48
-rw-r--r--include/linux/video_encoder.h23
-rw-r--r--include/linux/videodev.h18
-rw-r--r--include/linux/videodev2.h77
-rw-r--r--include/linux/virtio.h15
-rw-r--r--include/linux/virtio_blk.h12
-rw-r--r--include/linux/virtio_config.h49
-rw-r--r--include/linux/virtio_net.h72
-rw-r--r--include/linux/virtio_pci.h10
-rw-r--r--include/linux/virtio_ring.h8
-rw-r--r--include/linux/vlynq.h161
-rw-r--r--include/linux/vmalloc.h4
-rw-r--r--include/linux/vmstat.h5
-rw-r--r--include/linux/w1-gpio.h1
-rw-r--r--include/linux/wait.h37
-rw-r--r--include/linux/wimax.h7
-rw-r--r--include/linux/wimax/debug.h2
-rw-r--r--include/linux/wimax/i2400m.h71
-rw-r--r--include/linux/wireless.h12
-rw-r--r--include/linux/workqueue.h5
-rw-r--r--include/linux/writeback.h5
-rw-r--r--include/linux/xfrm.h2
559 files changed, 20595 insertions, 6867 deletions
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 106c3ba5084..03f22076381 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -67,6 +67,7 @@ header-y += falloc.h
header-y += fd.h
header-y += fdreg.h
header-y += fib_rules.h
+header-y += fiemap.h
header-y += firewire-cdev.h
header-y += firewire-constants.h
header-y += fuse.h
@@ -115,6 +116,7 @@ header-y += mqueue.h
header-y += mtio.h
header-y += ncp_no.h
header-y += neighbour.h
+header-y += net_dropmon.h
header-y += netfilter_arp.h
header-y += netrom.h
header-y += nfs2.h
@@ -133,9 +135,11 @@ header-y += posix_types.h
header-y += ppdev.h
header-y += prctl.h
header-y += qnxtypes.h
+header-y += qnx4_fs.h
header-y += radeonfb.h
header-y += raw.h
header-y += resource.h
+header-y += romfs_fs.h
header-y += rose.h
header-y += serial_reg.h
header-y += smbno.h
@@ -157,8 +161,6 @@ header-y += ultrasound.h
header-y += un.h
header-y += utime.h
header-y += veth.h
-header-y += video_decoder.h
-header-y += video_encoder.h
header-y += videotext.h
header-y += x25.h
@@ -306,15 +308,15 @@ unifdef-y += pmu.h
unifdef-y += poll.h
unifdef-y += ppp_defs.h
unifdef-y += ppp-comp.h
+unifdef-y += pps.h
unifdef-y += ptrace.h
-unifdef-y += qnx4_fs.h
unifdef-y += quota.h
unifdef-y += random.h
+unifdef-y += rfkill.h
unifdef-y += irqnr.h
unifdef-y += reboot.h
unifdef-y += reiserfs_fs.h
unifdef-y += reiserfs_xattr.h
-unifdef-y += romfs_fs.h
unifdef-y += route.h
unifdef-y += rtc.h
unifdef-y += rtnetlink.h
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 6fce2fc2d12..51b4b0a5ce8 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -79,6 +79,7 @@ typedef int (*acpi_table_handler) (struct acpi_table_header *table);
typedef int (*acpi_table_entry_handler) (struct acpi_subtable_header *header, const unsigned long end);
char * __acpi_map_table (unsigned long phys_addr, unsigned long size);
+void __acpi_unmap_table(char *map, unsigned long size);
int early_acpi_boot_init(void);
int acpi_boot_init (void);
int acpi_boot_table_init (void);
@@ -96,6 +97,7 @@ void acpi_table_print_madt_entry (struct acpi_subtable_header *madt);
/* the following four functions are architecture-dependent */
void acpi_numa_slit_init (struct acpi_table_slit *slit);
void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa);
+void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa);
void acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma);
void acpi_numa_arch_fixup(void);
@@ -109,6 +111,7 @@ int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base);
int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base);
void acpi_irq_stats_init(void);
extern u32 acpi_irq_handled;
+extern u32 acpi_irq_not_handled;
extern struct acpi_mcfg_allocation *pci_mmcfg_config;
extern int pci_mmcfg_config_num;
@@ -116,7 +119,7 @@ extern int pci_mmcfg_config_num;
extern int sbf_port;
extern unsigned long acpi_realmode_flags;
-int acpi_register_gsi (u32 gsi, int triggering, int polarity);
+int acpi_register_gsi (struct device *dev, u32 gsi, int triggering, int polarity);
int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
#ifdef CONFIG_X86_IO_APIC
@@ -256,6 +259,40 @@ void __init acpi_no_s4_hw_signature(void);
void __init acpi_old_suspend_ordering(void);
void __init acpi_s4_no_nvs(void);
#endif /* CONFIG_PM_SLEEP */
+
+#define OSC_QUERY_TYPE 0
+#define OSC_SUPPORT_TYPE 1
+#define OSC_CONTROL_TYPE 2
+#define OSC_SUPPORT_MASKS 0x1f
+
+/* _OSC DW0 Definition */
+#define OSC_QUERY_ENABLE 1
+#define OSC_REQUEST_ERROR 2
+#define OSC_INVALID_UUID_ERROR 4
+#define OSC_INVALID_REVISION_ERROR 8
+#define OSC_CAPABILITIES_MASK_ERROR 16
+
+/* _OSC DW1 Definition (OS Support Fields) */
+#define OSC_EXT_PCI_CONFIG_SUPPORT 1
+#define OSC_ACTIVE_STATE_PWR_SUPPORT 2
+#define OSC_CLOCK_PWR_CAPABILITY_SUPPORT 4
+#define OSC_PCI_SEGMENT_GROUPS_SUPPORT 8
+#define OSC_MSI_SUPPORT 16
+
+/* _OSC DW1 Definition (OS Control Fields) */
+#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 1
+#define OSC_SHPC_NATIVE_HP_CONTROL 2
+#define OSC_PCI_EXPRESS_PME_CONTROL 4
+#define OSC_PCI_EXPRESS_AER_CONTROL 8
+#define OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL 16
+
+#define OSC_CONTROL_MASKS (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | \
+ OSC_SHPC_NATIVE_HP_CONTROL | \
+ OSC_PCI_EXPRESS_PME_CONTROL | \
+ OSC_PCI_EXPRESS_AER_CONTROL | \
+ OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL)
+
+extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 flags);
#else /* CONFIG_ACPI */
static inline int early_acpi_boot_init(void)
diff --git a/include/linux/adfs_fs.h b/include/linux/adfs_fs.h
index ef788c2085a..b19801f7389 100644
--- a/include/linux/adfs_fs.h
+++ b/include/linux/adfs_fs.h
@@ -41,8 +41,6 @@ struct adfs_discrecord {
#define ADFS_DR_SIZE_BITS (ADFS_DR_SIZE << 3)
#ifdef __KERNEL__
-#include <linux/adfs_fs_i.h>
-#include <linux/adfs_fs_sb.h>
/*
* Calculate the boot block checksum on an ADFS drive. Note that this will
* appear to be correct if the sector contains all zeros, so also check that
@@ -60,17 +58,6 @@ static inline int adfs_checkbblk(unsigned char *ptr)
return (result & 0xff) != ptr[511];
}
-
-static inline struct adfs_sb_info *ADFS_SB(struct super_block *sb)
-{
- return sb->s_fs_info;
-}
-
-static inline struct adfs_inode_info *ADFS_I(struct inode *inode)
-{
- return container_of(inode, struct adfs_inode_info, vfs_inode);
-}
-
#endif
#endif
diff --git a/include/linux/adfs_fs_i.h b/include/linux/adfs_fs_i.h
deleted file mode 100644
index cb543034e54..00000000000
--- a/include/linux/adfs_fs_i.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * linux/include/linux/adfs_fs_i.h
- *
- * Copyright (C) 1997 Russell King
- */
-
-#ifndef _ADFS_FS_I
-#define _ADFS_FS_I
-
-/*
- * adfs file system inode data in memory
- */
-struct adfs_inode_info {
- loff_t mmu_private;
- unsigned long parent_id; /* object id of parent */
- __u32 loadaddr; /* RISC OS load address */
- __u32 execaddr; /* RISC OS exec address */
- unsigned int filetype; /* RISC OS file type */
- unsigned int attr; /* RISC OS permissions */
- unsigned int stamped:1; /* RISC OS file has date/time */
- struct inode vfs_inode;
-};
-
-#endif
diff --git a/include/linux/adfs_fs_sb.h b/include/linux/adfs_fs_sb.h
deleted file mode 100644
index d9bf05c02cc..00000000000
--- a/include/linux/adfs_fs_sb.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * linux/include/linux/adfs_fs_sb.h
- *
- * Copyright (C) 1997-1999 Russell King
- */
-
-#ifndef _ADFS_FS_SB
-#define _ADFS_FS_SB
-
-/*
- * Forward-declare this
- */
-struct adfs_discmap;
-struct adfs_dir_ops;
-
-/*
- * ADFS file system superblock data in memory
- */
-struct adfs_sb_info {
- struct adfs_discmap *s_map; /* bh list containing map */
- struct adfs_dir_ops *s_dir; /* directory operations */
-
- uid_t s_uid; /* owner uid */
- gid_t s_gid; /* owner gid */
- umode_t s_owner_mask; /* ADFS owner perm -> unix perm */
- umode_t s_other_mask; /* ADFS other perm -> unix perm */
-
- __u32 s_ids_per_zone; /* max. no ids in one zone */
- __u32 s_idlen; /* length of ID in map */
- __u32 s_map_size; /* sector size of a map */
- unsigned long s_size; /* total size (in blocks) of this fs */
- signed int s_map2blk; /* shift left by this for map->sector */
- unsigned int s_log2sharesize;/* log2 share size */
- __le32 s_version; /* disc format version */
- unsigned int s_namelen; /* maximum number of characters in name */
-};
-
-#endif
diff --git a/include/linux/agp_backend.h b/include/linux/agp_backend.h
index 2b8df8b420f..76fa794fdac 100644
--- a/include/linux/agp_backend.h
+++ b/include/linux/agp_backend.h
@@ -70,7 +70,7 @@ struct agp_memory {
struct agp_memory *next;
struct agp_memory *prev;
struct agp_bridge_data *bridge;
- unsigned long *memory;
+ struct page **pages;
size_t page_count;
int key;
int num_scratch_pages;
diff --git a/include/linux/agpgart.h b/include/linux/agpgart.h
index 110c600c885..f6778eceb8f 100644
--- a/include/linux/agpgart.h
+++ b/include/linux/agpgart.h
@@ -77,20 +77,20 @@ typedef struct _agp_setup {
* The "prot" down below needs still a "sleep" flag somehow ...
*/
typedef struct _agp_segment {
- off_t pg_start; /* starting page to populate */
- size_t pg_count; /* number of pages */
- int prot; /* prot flags for mmap */
+ __kernel_off_t pg_start; /* starting page to populate */
+ __kernel_size_t pg_count; /* number of pages */
+ int prot; /* prot flags for mmap */
} agp_segment;
typedef struct _agp_region {
- pid_t pid; /* pid of process */
- size_t seg_count; /* number of segments */
+ __kernel_pid_t pid; /* pid of process */
+ __kernel_size_t seg_count; /* number of segments */
struct _agp_segment *seg_list;
} agp_region;
typedef struct _agp_allocate {
int key; /* tag of allocation */
- size_t pg_count; /* number of pages */
+ __kernel_size_t pg_count;/* number of pages */
__u32 type; /* 0 == normal, other devspec */
__u32 physical; /* device specific (some devices
* need a phys address of the
@@ -100,7 +100,7 @@ typedef struct _agp_allocate {
typedef struct _agp_bind {
int key; /* tag of allocation */
- off_t pg_start; /* starting page to populate */
+ __kernel_off_t pg_start;/* starting page to populate */
} agp_bind;
typedef struct _agp_unbind {
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index 51e6e54b2aa..9b93cafa82a 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -28,7 +28,7 @@ struct amba_id {
struct amba_driver {
struct device_driver drv;
- int (*probe)(struct amba_device *, void *);
+ int (*probe)(struct amba_device *, struct amba_id *);
int (*remove)(struct amba_device *);
void (*shutdown)(struct amba_device *);
int (*suspend)(struct amba_device *, pm_message_t);
diff --git a/include/linux/amba/pl022.h b/include/linux/amba/pl022.h
new file mode 100644
index 00000000000..dcad0ffd175
--- /dev/null
+++ b/include/linux/amba/pl022.h
@@ -0,0 +1,264 @@
+/*
+ * include/linux/amba/pl022.h
+ *
+ * Copyright (C) 2008-2009 ST-Ericsson AB
+ * Copyright (C) 2006 STMicroelectronics Pvt. Ltd.
+ *
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ *
+ * Initial version inspired by:
+ * linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c
+ * Initial adoption to PL022 by:
+ * Sachin Verma <sachin.verma@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SSP_PL022_H
+#define _SSP_PL022_H
+
+#include <linux/device.h>
+
+/**
+ * whether SSP is in loopback mode or not
+ */
+enum ssp_loopback {
+ LOOPBACK_DISABLED,
+ LOOPBACK_ENABLED
+};
+
+/**
+ * enum ssp_interface - interfaces allowed for this SSP Controller
+ * @SSP_INTERFACE_MOTOROLA_SPI: Motorola Interface
+ * @SSP_INTERFACE_TI_SYNC_SERIAL: Texas Instrument Synchronous Serial
+ * interface
+ * @SSP_INTERFACE_NATIONAL_MICROWIRE: National Semiconductor Microwire
+ * interface
+ * @SSP_INTERFACE_UNIDIRECTIONAL: Unidirectional interface (STn8810
+ * &STn8815 only)
+ */
+enum ssp_interface {
+ SSP_INTERFACE_MOTOROLA_SPI,
+ SSP_INTERFACE_TI_SYNC_SERIAL,
+ SSP_INTERFACE_NATIONAL_MICROWIRE,
+ SSP_INTERFACE_UNIDIRECTIONAL
+};
+
+/**
+ * enum ssp_hierarchy - whether SSP is configured as Master or Slave
+ */
+enum ssp_hierarchy {
+ SSP_MASTER,
+ SSP_SLAVE
+};
+
+/**
+ * enum ssp_clock_params - clock parameters, to set SSP clock at a
+ * desired freq
+ */
+struct ssp_clock_params {
+ u8 cpsdvsr; /* value from 2 to 254 (even only!) */
+ u8 scr; /* value from 0 to 255 */
+};
+
+/**
+ * enum ssp_rx_endian - endianess of Rx FIFO Data
+ */
+enum ssp_rx_endian {
+ SSP_RX_MSB,
+ SSP_RX_LSB
+};
+
+/**
+ * enum ssp_tx_endian - endianess of Tx FIFO Data
+ */
+enum ssp_tx_endian {
+ SSP_TX_MSB,
+ SSP_TX_LSB
+};
+
+/**
+ * enum ssp_data_size - number of bits in one data element
+ */
+enum ssp_data_size {
+ SSP_DATA_BITS_4 = 0x03, SSP_DATA_BITS_5, SSP_DATA_BITS_6,
+ SSP_DATA_BITS_7, SSP_DATA_BITS_8, SSP_DATA_BITS_9,
+ SSP_DATA_BITS_10, SSP_DATA_BITS_11, SSP_DATA_BITS_12,
+ SSP_DATA_BITS_13, SSP_DATA_BITS_14, SSP_DATA_BITS_15,
+ SSP_DATA_BITS_16, SSP_DATA_BITS_17, SSP_DATA_BITS_18,
+ SSP_DATA_BITS_19, SSP_DATA_BITS_20, SSP_DATA_BITS_21,
+ SSP_DATA_BITS_22, SSP_DATA_BITS_23, SSP_DATA_BITS_24,
+ SSP_DATA_BITS_25, SSP_DATA_BITS_26, SSP_DATA_BITS_27,
+ SSP_DATA_BITS_28, SSP_DATA_BITS_29, SSP_DATA_BITS_30,
+ SSP_DATA_BITS_31, SSP_DATA_BITS_32
+};
+
+/**
+ * enum ssp_mode - SSP mode of operation (Communication modes)
+ */
+enum ssp_mode {
+ INTERRUPT_TRANSFER,
+ POLLING_TRANSFER,
+ DMA_TRANSFER
+};
+
+/**
+ * enum ssp_rx_level_trig - receive FIFO watermark level which triggers
+ * IT: Interrupt fires when _N_ or more elements in RX FIFO.
+ */
+enum ssp_rx_level_trig {
+ SSP_RX_1_OR_MORE_ELEM,
+ SSP_RX_4_OR_MORE_ELEM,
+ SSP_RX_8_OR_MORE_ELEM,
+ SSP_RX_16_OR_MORE_ELEM,
+ SSP_RX_32_OR_MORE_ELEM
+};
+
+/**
+ * Transmit FIFO watermark level which triggers (IT Interrupt fires
+ * when _N_ or more empty locations in TX FIFO)
+ */
+enum ssp_tx_level_trig {
+ SSP_TX_1_OR_MORE_EMPTY_LOC,
+ SSP_TX_4_OR_MORE_EMPTY_LOC,
+ SSP_TX_8_OR_MORE_EMPTY_LOC,
+ SSP_TX_16_OR_MORE_EMPTY_LOC,
+ SSP_TX_32_OR_MORE_EMPTY_LOC
+};
+
+/**
+ * enum SPI Clock Phase - clock phase (Motorola SPI interface only)
+ * @SSP_CLK_RISING_EDGE: Receive data on rising edge
+ * @SSP_CLK_FALLING_EDGE: Receive data on falling edge
+ */
+enum ssp_spi_clk_phase {
+ SSP_CLK_RISING_EDGE,
+ SSP_CLK_FALLING_EDGE
+};
+
+/**
+ * enum SPI Clock Polarity - clock polarity (Motorola SPI interface only)
+ * @SSP_CLK_POL_IDLE_LOW: Low inactive level
+ * @SSP_CLK_POL_IDLE_HIGH: High inactive level
+ */
+enum ssp_spi_clk_pol {
+ SSP_CLK_POL_IDLE_LOW,
+ SSP_CLK_POL_IDLE_HIGH
+};
+
+/**
+ * Microwire Conrol Lengths Command size in microwire format
+ */
+enum ssp_microwire_ctrl_len {
+ SSP_BITS_4 = 0x03, SSP_BITS_5, SSP_BITS_6,
+ SSP_BITS_7, SSP_BITS_8, SSP_BITS_9,
+ SSP_BITS_10, SSP_BITS_11, SSP_BITS_12,
+ SSP_BITS_13, SSP_BITS_14, SSP_BITS_15,
+ SSP_BITS_16, SSP_BITS_17, SSP_BITS_18,
+ SSP_BITS_19, SSP_BITS_20, SSP_BITS_21,
+ SSP_BITS_22, SSP_BITS_23, SSP_BITS_24,
+ SSP_BITS_25, SSP_BITS_26, SSP_BITS_27,
+ SSP_BITS_28, SSP_BITS_29, SSP_BITS_30,
+ SSP_BITS_31, SSP_BITS_32
+};
+
+/**
+ * enum Microwire Wait State
+ * @SSP_MWIRE_WAIT_ZERO: No wait state inserted after last command bit
+ * @SSP_MWIRE_WAIT_ONE: One wait state inserted after last command bit
+ */
+enum ssp_microwire_wait_state {
+ SSP_MWIRE_WAIT_ZERO,
+ SSP_MWIRE_WAIT_ONE
+};
+
+/**
+ * enum Microwire - whether Full/Half Duplex
+ * @SSP_MICROWIRE_CHANNEL_FULL_DUPLEX: SSPTXD becomes bi-directional,
+ * SSPRXD not used
+ * @SSP_MICROWIRE_CHANNEL_HALF_DUPLEX: SSPTXD is an output, SSPRXD is
+ * an input.
+ */
+enum ssp_duplex {
+ SSP_MICROWIRE_CHANNEL_FULL_DUPLEX,
+ SSP_MICROWIRE_CHANNEL_HALF_DUPLEX
+};
+
+/**
+ * CHIP select/deselect commands
+ */
+enum ssp_chip_select {
+ SSP_CHIP_SELECT,
+ SSP_CHIP_DESELECT
+};
+
+
+/**
+ * struct pl022_ssp_master - device.platform_data for SPI controller devices.
+ * @num_chipselect: chipselects are used to distinguish individual
+ * SPI slaves, and are numbered from zero to num_chipselects - 1.
+ * each slave has a chipselect signal, but it's common that not
+ * every chipselect is connected to a slave.
+ * @enable_dma: if true enables DMA driven transfers.
+ */
+struct pl022_ssp_controller {
+ u16 bus_id;
+ u8 num_chipselect;
+ u8 enable_dma:1;
+};
+
+/**
+ * struct ssp_config_chip - spi_board_info.controller_data for SPI
+ * slave devices, copied to spi_device.controller_data.
+ *
+ * @lbm: used for test purpose to internally connect RX and TX
+ * @iface: Interface type(Motorola, TI, Microwire, Universal)
+ * @hierarchy: sets whether interface is master or slave
+ * @slave_tx_disable: SSPTXD is disconnected (in slave mode only)
+ * @clk_freq: Tune freq parameters of SSP(when in master mode)
+ * @endian_rx: Endianess of Data in Rx FIFO
+ * @endian_tx: Endianess of Data in Tx FIFO
+ * @data_size: Width of data element(4 to 32 bits)
+ * @com_mode: communication mode: polling, Interrupt or DMA
+ * @rx_lev_trig: Rx FIFO watermark level (for IT & DMA mode)
+ * @tx_lev_trig: Tx FIFO watermark level (for IT & DMA mode)
+ * @clk_phase: Motorola SPI interface Clock phase
+ * @clk_pol: Motorola SPI interface Clock polarity
+ * @ctrl_len: Microwire interface: Control length
+ * @wait_state: Microwire interface: Wait state
+ * @duplex: Microwire interface: Full/Half duplex
+ * @cs_control: function pointer to board-specific function to
+ * assert/deassert I/O port to control HW generation of devices chip-select.
+ * @dma_xfer_type: Type of DMA xfer (Mem-to-periph or Periph-to-Periph)
+ * @dma_config: DMA configuration for SSP controller and peripheral
+ */
+struct pl022_config_chip {
+ struct device *dev;
+ enum ssp_loopback lbm;
+ enum ssp_interface iface;
+ enum ssp_hierarchy hierarchy;
+ bool slave_tx_disable;
+ struct ssp_clock_params clk_freq;
+ enum ssp_rx_endian endian_rx;
+ enum ssp_tx_endian endian_tx;
+ enum ssp_data_size data_size;
+ enum ssp_mode com_mode;
+ enum ssp_rx_level_trig rx_lev_trig;
+ enum ssp_tx_level_trig tx_lev_trig;
+ enum ssp_spi_clk_phase clk_phase;
+ enum ssp_spi_clk_pol clk_pol;
+ enum ssp_microwire_ctrl_len ctrl_len;
+ enum ssp_microwire_wait_state wait_state;
+ enum ssp_duplex duplex;
+ void (*cs_control) (u32 control);
+};
+
+#endif /* _SSP_PL022_H */
diff --git a/include/linux/amba/pl061.h b/include/linux/amba/pl061.h
new file mode 100644
index 00000000000..b4fbd986260
--- /dev/null
+++ b/include/linux/amba/pl061.h
@@ -0,0 +1,15 @@
+/* platform data for the PL061 GPIO driver */
+
+struct pl061_platform_data {
+ /* number of the first GPIO */
+ unsigned gpio_base;
+
+ /* number of the first IRQ.
+ * If the IRQ functionality in not desired this must be set to
+ * (unsigned) -1.
+ */
+ unsigned irq_base;
+
+ u8 directions; /* startup directions, 1: out, 0: in */
+ u8 values; /* startup values */
+};
diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h
index 48ee32a18ac..5a5a7fd6249 100644
--- a/include/linux/amba/serial.h
+++ b/include/linux/amba/serial.h
@@ -114,6 +114,9 @@
#define UART011_IFLS_TX4_8 (2 << 0)
#define UART011_IFLS_TX6_8 (3 << 0)
#define UART011_IFLS_TX7_8 (4 << 0)
+/* special values for ST vendor with deeper fifo */
+#define UART011_IFLS_RX_HALF (5 << 3)
+#define UART011_IFLS_TX_HALF (5 << 0)
#define UART011_OEIM (1 << 10) /* overrun error interrupt mask */
#define UART011_BEIM (1 << 9) /* break error interrupt mask */
@@ -159,6 +162,7 @@
#define UART01x_FR_MODEM_ANY (UART01x_FR_DCD|UART01x_FR_DSR|UART01x_FR_CTS)
#ifndef __ASSEMBLY__
+struct amba_device; /* in uncompress this is included but amba/bus.h is not */
struct amba_pl010_data {
void (*set_mctrl)(struct amba_device *dev, void __iomem *base, unsigned int mctrl);
};
diff --git a/include/linux/arcdevice.h b/include/linux/arcdevice.h
index a1916078fd0..cd4bcb6989c 100644
--- a/include/linux/arcdevice.h
+++ b/include/linux/arcdevice.h
@@ -235,8 +235,6 @@ struct Outgoing {
struct arcnet_local {
- struct net_device_stats stats;
-
uint8_t config, /* current value of CONFIG register */
timeout, /* Extended timeout for COM20020 */
backplane, /* Backplane flag for COM20020 */
@@ -335,7 +333,12 @@ void arcnet_dump_skb(struct net_device *dev, struct sk_buff *skb, char *desc);
void arcnet_unregister_proto(struct ArcProto *proto);
irqreturn_t arcnet_interrupt(int irq, void *dev_id);
-struct net_device *alloc_arcdev(char *name);
+struct net_device *alloc_arcdev(const char *name);
+
+int arcnet_open(struct net_device *dev);
+int arcnet_close(struct net_device *dev);
+int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev);
+void arcnet_timeout(struct net_device *dev);
#endif /* __KERNEL__ */
#endif /* _LINUX_ARCDEVICE_H */
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h
index 45f6297821b..5fc2ef8d97f 100644
--- a/include/linux/async_tx.h
+++ b/include/linux/async_tx.h
@@ -21,6 +21,15 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
+/* on architectures without dma-mapping capabilities we need to ensure
+ * that the asynchronous path compiles away
+ */
+#ifdef CONFIG_HAS_DMA
+#define __async_inline
+#else
+#define __async_inline __always_inline
+#endif
+
/**
* dma_chan_ref - object used to manage dma channels received from the
* dmaengine core.
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 9a061accd8b..9c75921f0c1 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -29,6 +29,8 @@
#ifndef __LINUX_ATA_H__
#define __LINUX_ATA_H__
+#include <linux/kernel.h>
+#include <linux/string.h>
#include <linux/types.h>
#include <asm/byteorder.h>
@@ -91,6 +93,7 @@ enum {
ATA_ID_CFA_POWER = 160,
ATA_ID_CFA_KEY_MGMT = 162,
ATA_ID_CFA_MODES = 163,
+ ATA_ID_DATA_SET_MGMT = 169,
ATA_ID_ROT_SPEED = 217,
ATA_ID_PIO4 = (1 << 1),
@@ -108,6 +111,8 @@ enum {
ATA_PIO5 = ATA_PIO4 | (1 << 5),
ATA_PIO6 = ATA_PIO5 | (1 << 6),
+ ATA_PIO4_ONLY = (1 << 4),
+
ATA_SWDMA0 = (1 << 0),
ATA_SWDMA1 = ATA_SWDMA0 | (1 << 1),
ATA_SWDMA2 = ATA_SWDMA1 | (1 << 2),
@@ -117,6 +122,8 @@ enum {
ATA_MWDMA0 = (1 << 0),
ATA_MWDMA1 = ATA_MWDMA0 | (1 << 1),
ATA_MWDMA2 = ATA_MWDMA1 | (1 << 2),
+ ATA_MWDMA3 = ATA_MWDMA2 | (1 << 3),
+ ATA_MWDMA4 = ATA_MWDMA3 | (1 << 4),
ATA_MWDMA12_ONLY = (1 << 1) | (1 << 2),
ATA_MWDMA2_ONLY = (1 << 2),
@@ -131,6 +138,8 @@ enum {
ATA_UDMA7 = ATA_UDMA6 | (1 << 7),
/* ATA_UDMA7 is just for completeness... doesn't exist (yet?). */
+ ATA_UDMA24_ONLY = (1 << 2) | (1 << 4),
+
ATA_UDMA_MASK_40C = ATA_UDMA2, /* udma0-2 */
/* DMA-related */
@@ -242,10 +251,9 @@ enum {
ATA_CMD_SMART = 0xB0,
ATA_CMD_MEDIA_LOCK = 0xDE,
ATA_CMD_MEDIA_UNLOCK = 0xDF,
+ ATA_CMD_DSM = 0x06,
/* marked obsolete in the ATA/ATAPI-7 spec */
ATA_CMD_RESTORE = 0x10,
- /* EXABYTE specific */
- ATA_EXABYTE_ENABLE_NEST = 0xF0,
/* READ_LOG_EXT pages */
ATA_LOG_SATA_NCQ = 0x10,
@@ -317,6 +325,9 @@ enum {
ATA_SMART_READ_VALUES = 0xD0,
ATA_SMART_READ_THRESHOLDS = 0xD1,
+ /* feature values for Data Set Management */
+ ATA_DSM_TRIM = 0x01,
+
/* password used in LBA Mid / LBA High for executing SMART commands */
ATA_SMART_LBAM_PASS = 0x4F,
ATA_SMART_LBAH_PASS = 0xC2,
@@ -719,6 +730,42 @@ static inline int ata_id_has_unload(const u16 *id)
return 0;
}
+static inline int ata_id_form_factor(const u16 *id)
+{
+ u16 val = id[168];
+
+ if (ata_id_major_version(id) < 7 || val == 0 || val == 0xffff)
+ return 0;
+
+ val &= 0xf;
+
+ if (val > 5)
+ return 0;
+
+ return val;
+}
+
+static inline int ata_id_rotation_rate(const u16 *id)
+{
+ u16 val = id[217];
+
+ if (ata_id_major_version(id) < 7 || val == 0 || val == 0xffff)
+ return 0;
+
+ if (val > 1 && val < 0x401)
+ return 0;
+
+ return val;
+}
+
+static inline int ata_id_has_trim(const u16 *id)
+{
+ if (ata_id_major_version(id) >= 7 &&
+ (id[ATA_ID_DATA_SET_MGMT] & 1))
+ return 1;
+ return 0;
+}
+
static inline int ata_id_current_chs_valid(const u16 *id)
{
/* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command
@@ -753,6 +800,20 @@ static inline int ata_id_is_ssd(const u16 *id)
return id[ATA_ID_ROT_SPEED] == 0x01;
}
+static inline int ata_id_pio_need_iordy(const u16 *id, const u8 pio)
+{
+ /* CF spec. r4.1 Table 22 says no IORDY on PIO5 and PIO6. */
+ if (pio > 4 && ata_id_is_cfa(id))
+ return 0;
+ /* For PIO3 and higher it is mandatory. */
+ if (pio > 2)
+ return 1;
+ /* Turn it on when possible. */
+ if (ata_id_has_iordy(id))
+ return 1;
+ return 0;
+}
+
static inline int ata_drive_40wire(const u16 *dev_id)
{
if (ata_id_is_sata(dev_id))
@@ -859,6 +920,32 @@ static inline void ata_id_to_hd_driveid(u16 *id)
#endif
}
+/*
+ * Write up to 'max' LBA Range Entries to the buffer that will cover the
+ * extent from sector to sector + count. This is used for TRIM and for
+ * ADD LBA(S) TO NV CACHE PINNED SET.
+ */
+static inline unsigned ata_set_lba_range_entries(void *_buffer, unsigned max,
+ u64 sector, unsigned long count)
+{
+ __le64 *buffer = _buffer;
+ unsigned i = 0;
+
+ while (i < max) {
+ u64 entry = sector |
+ ((u64)(count > 0xffff ? 0xffff : count) << 48);
+ buffer[i++] = __cpu_to_le64(entry);
+ if (count <= 0xffff)
+ break;
+ count -= 0xffff;
+ sector += 0xffff;
+ }
+
+ max = ALIGN(i * 8, 512);
+ memset(buffer + i, 0, max - i * 8);
+ return max;
+}
+
static inline int is_multi_taskfile(struct ata_taskfile *tf)
{
return (tf->command == ATA_CMD_READ_MULTI) ||
diff --git a/include/linux/ath9k_platform.h b/include/linux/ath9k_platform.h
new file mode 100644
index 00000000000..b847fc7b93f
--- /dev/null
+++ b/include/linux/ath9k_platform.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2008 Atheros Communications Inc.
+ * Copyright (c) 2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (c) 2009 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _LINUX_ATH9K_PLATFORM_H
+#define _LINUX_ATH9K_PLATFORM_H
+
+#define ATH9K_PLAT_EEP_MAX_WORDS 2048
+
+struct ath9k_platform_data {
+ u16 eeprom_data[ATH9K_PLAT_EEP_MAX_WORDS];
+};
+
+#endif /* _LINUX_ATH9K_PLATFORM_H */
diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h
index 2f1f95737ac..57b1846a3c8 100644
--- a/include/linux/atmel-mci.h
+++ b/include/linux/atmel-mci.h
@@ -10,6 +10,7 @@
* @bus_width: Number of data lines wired up the slot
* @detect_pin: GPIO pin wired to the card detect switch
* @wp_pin: GPIO pin wired to the write protect sensor
+ * @detect_is_active_high: The state of the detect pin when it is active
*
* If a given slot is not present on the board, @bus_width should be
* set to 0. The other fields are ignored in this case.
@@ -24,6 +25,7 @@ struct mci_slot_pdata {
unsigned int bus_width;
int detect_pin;
int wp_pin;
+ bool detect_is_active_high;
};
/**
diff --git a/include/linux/atmlec.h b/include/linux/atmlec.h
index 6f5a1bab8f5..39c917fd1b9 100644
--- a/include/linux/atmlec.h
+++ b/include/linux/atmlec.h
@@ -11,6 +11,7 @@
#include <linux/atmioc.h>
#include <linux/atm.h>
#include <linux/if_ether.h>
+#include <linux/types.h>
/* ATM lec daemon control socket */
#define ATMLEC_CTRL _IO('a', ATMIOC_LANE)
@@ -78,8 +79,8 @@ struct atmlec_msg {
} normal;
struct atmlec_config_msg config;
struct {
- uint16_t lec_id; /* requestor lec_id */
- uint32_t tran_id; /* transaction id */
+ __u16 lec_id; /* requestor lec_id */
+ __u32 tran_id; /* transaction id */
unsigned char mac_addr[ETH_ALEN]; /* dst mac addr */
unsigned char atm_addr[ATM_ESA_LEN]; /* reqestor ATM addr */
} proxy; /*
diff --git a/include/linux/atmmpc.h b/include/linux/atmmpc.h
index ea1650425a1..2aba5787fa6 100644
--- a/include/linux/atmmpc.h
+++ b/include/linux/atmmpc.h
@@ -4,6 +4,7 @@
#include <linux/atmapi.h>
#include <linux/atmioc.h>
#include <linux/atm.h>
+#include <linux/types.h>
#define ATMMPC_CTRL _IO('a', ATMIOC_MPOA)
#define ATMMPC_DATA _IO('a', ATMIOC_MPOA+1)
@@ -18,39 +19,39 @@ struct atmmpc_ioc {
};
typedef struct in_ctrl_info {
- uint8_t Last_NHRP_CIE_code;
- uint8_t Last_Q2931_cause_value;
- uint8_t eg_MPC_ATM_addr[ATM_ESA_LEN];
+ __u8 Last_NHRP_CIE_code;
+ __u8 Last_Q2931_cause_value;
+ __u8 eg_MPC_ATM_addr[ATM_ESA_LEN];
__be32 tag;
__be32 in_dst_ip; /* IP address this ingress MPC sends packets to */
- uint16_t holding_time;
- uint32_t request_id;
+ __u16 holding_time;
+ __u32 request_id;
} in_ctrl_info;
typedef struct eg_ctrl_info {
- uint8_t DLL_header[256];
- uint8_t DH_length;
+ __u8 DLL_header[256];
+ __u8 DH_length;
__be32 cache_id;
__be32 tag;
__be32 mps_ip;
__be32 eg_dst_ip; /* IP address to which ingress MPC sends packets */
- uint8_t in_MPC_data_ATM_addr[ATM_ESA_LEN];
- uint16_t holding_time;
+ __u8 in_MPC_data_ATM_addr[ATM_ESA_LEN];
+ __u16 holding_time;
} eg_ctrl_info;
struct mpc_parameters {
- uint16_t mpc_p1; /* Shortcut-Setup Frame Count */
- uint16_t mpc_p2; /* Shortcut-Setup Frame Time */
- uint8_t mpc_p3[8]; /* Flow-detection Protocols */
- uint16_t mpc_p4; /* MPC Initial Retry Time */
- uint16_t mpc_p5; /* MPC Retry Time Maximum */
- uint16_t mpc_p6; /* Hold Down Time */
+ __u16 mpc_p1; /* Shortcut-Setup Frame Count */
+ __u16 mpc_p2; /* Shortcut-Setup Frame Time */
+ __u8 mpc_p3[8]; /* Flow-detection Protocols */
+ __u16 mpc_p4; /* MPC Initial Retry Time */
+ __u16 mpc_p5; /* MPC Retry Time Maximum */
+ __u16 mpc_p6; /* Hold Down Time */
} ;
struct k_message {
- uint16_t type;
+ __u16 type;
__be32 ip_mask;
- uint8_t MPS_ctrl[ATM_ESA_LEN];
+ __u8 MPS_ctrl[ATM_ESA_LEN];
union {
in_ctrl_info in_info;
eg_ctrl_info eg_info;
@@ -61,11 +62,11 @@ struct k_message {
struct llc_snap_hdr {
/* RFC 1483 LLC/SNAP encapsulation for routed IP PDUs */
- uint8_t dsap; /* Destination Service Access Point (0xAA) */
- uint8_t ssap; /* Source Service Access Point (0xAA) */
- uint8_t ui; /* Unnumbered Information (0x03) */
- uint8_t org[3]; /* Organizational identification (0x000000) */
- uint8_t type[2]; /* Ether type (for IP) (0x0800) */
+ __u8 dsap; /* Destination Service Access Point (0xAA) */
+ __u8 ssap; /* Source Service Access Point (0xAA) */
+ __u8 ui; /* Unnumbered Information (0x03) */
+ __u8 org[3]; /* Organizational identification (0x000000) */
+ __u8 type[2]; /* Ether type (for IP) (0x0800) */
};
/* TLVs this MPC recognizes */
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 67e5dbfc296..4fa2810b675 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -36,7 +36,8 @@
* 1500 - 1599 kernel LSPP events
* 1600 - 1699 kernel crypto events
* 1700 - 1799 kernel anomaly records
- * 1800 - 1999 future kernel use (maybe integrity labels and related events)
+ * 1800 - 1899 kernel integrity events
+ * 1900 - 1999 future kernel use
* 2000 is for otherwise unclassified kernel audit messages (legacy)
* 2001 - 2099 unused (kernel)
* 2100 - 2199 user space anomaly records
@@ -125,6 +126,12 @@
#define AUDIT_LAST_KERN_ANOM_MSG 1799
#define AUDIT_ANOM_PROMISCUOUS 1700 /* Device changed promiscuous mode */
#define AUDIT_ANOM_ABEND 1701 /* Process ended abnormally */
+#define AUDIT_INTEGRITY_DATA 1800 /* Data integrity verification */
+#define AUDIT_INTEGRITY_METADATA 1801 /* Metadata integrity verification */
+#define AUDIT_INTEGRITY_STATUS 1802 /* Integrity enable status */
+#define AUDIT_INTEGRITY_HASH 1803 /* Integrity HASH type */
+#define AUDIT_INTEGRITY_PCR 1804 /* PCR invalidation msgs */
+#define AUDIT_INTEGRITY_RULE 1805 /* policy rule */
#define AUDIT_KERNEL 2000 /* Asynchronous audit record. NOT A REQUEST. */
diff --git a/include/linux/auto_dev-ioctl.h b/include/linux/auto_dev-ioctl.h
index 91a773993a5..850f39b33e7 100644
--- a/include/linux/auto_dev-ioctl.h
+++ b/include/linux/auto_dev-ioctl.h
@@ -10,8 +10,13 @@
#ifndef _LINUX_AUTO_DEV_IOCTL_H
#define _LINUX_AUTO_DEV_IOCTL_H
+#include <linux/auto_fs.h>
+
+#ifdef __KERNEL__
#include <linux/string.h>
-#include <linux/types.h>
+#else
+#include <string.h>
+#endif /* __KERNEL__ */
#define AUTOFS_DEVICE_NAME "autofs"
diff --git a/include/linux/auto_fs.h b/include/linux/auto_fs.h
index c21e5972a3e..7b09c8348fd 100644
--- a/include/linux/auto_fs.h
+++ b/include/linux/auto_fs.h
@@ -14,13 +14,14 @@
#ifndef _LINUX_AUTO_FS_H
#define _LINUX_AUTO_FS_H
+#include <linux/types.h>
#ifdef __KERNEL__
#include <linux/fs.h>
#include <linux/limits.h>
-#include <asm/types.h>
-#endif /* __KERNEL__ */
-
#include <linux/ioctl.h>
+#else
+#include <sys/ioctl.h>
+#endif /* __KERNEL__ */
/* This file describes autofs v3 */
#define AUTOFS_PROTO_VERSION 3
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index bee52abb8a4..0ec2c594868 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -24,8 +24,8 @@ struct dentry;
*/
enum bdi_state {
BDI_pdflush, /* A pdflush thread is working this device */
- BDI_write_congested, /* The write queue is getting full */
- BDI_read_congested, /* The read queue is getting full */
+ BDI_async_congested, /* The async (write) queue is getting full */
+ BDI_sync_congested, /* The sync queue is getting full */
BDI_unused, /* Available bits start here */
};
@@ -215,18 +215,18 @@ static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits)
static inline int bdi_read_congested(struct backing_dev_info *bdi)
{
- return bdi_congested(bdi, 1 << BDI_read_congested);
+ return bdi_congested(bdi, 1 << BDI_sync_congested);
}
static inline int bdi_write_congested(struct backing_dev_info *bdi)
{
- return bdi_congested(bdi, 1 << BDI_write_congested);
+ return bdi_congested(bdi, 1 << BDI_async_congested);
}
static inline int bdi_rw_congested(struct backing_dev_info *bdi)
{
- return bdi_congested(bdi, (1 << BDI_read_congested)|
- (1 << BDI_write_congested));
+ return bdi_congested(bdi, (1 << BDI_sync_congested) |
+ (1 << BDI_async_congested));
}
void clear_bdi_congested(struct backing_dev_info *bdi, int rw);
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 77b4a9e4600..61ee18c1bdb 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -35,8 +35,7 @@ struct linux_binprm{
#endif
struct mm_struct *mm;
unsigned long p; /* current top of mem */
- unsigned int sh_bang:1,
- misc_bang:1,
+ unsigned int
cred_prepared:1,/* true if creds already prepared (multiple
* preps happen for interpreters) */
cap_effective:1;/* true if has elevated effective capabilities,
@@ -83,7 +82,19 @@ struct linux_binfmt {
int hasvdso;
};
-extern int register_binfmt(struct linux_binfmt *);
+extern int __register_binfmt(struct linux_binfmt *fmt, int insert);
+
+/* Registration of default binfmt handlers */
+static inline int register_binfmt(struct linux_binfmt *fmt)
+{
+ return __register_binfmt(fmt, 0);
+}
+/* Same as above, but adds a new binfmt at the top of the list */
+static inline int insert_binfmt(struct linux_binfmt *fmt)
+{
+ return __register_binfmt(fmt, 1);
+}
+
extern void unregister_binfmt(struct linux_binfmt *);
extern int prepare_binprm(struct linux_binprm *);
diff --git a/include/linux/bio.h b/include/linux/bio.h
index d8bd43bfdcf..2a04eb54c0d 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -132,6 +132,7 @@ struct bio {
* top 4 bits of bio flags indicate the pool this bio came from
*/
#define BIO_POOL_BITS (4)
+#define BIO_POOL_NONE ((1UL << BIO_POOL_BITS) - 1)
#define BIO_POOL_OFFSET (BITS_PER_LONG - BIO_POOL_BITS)
#define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET)
#define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET)
@@ -145,20 +146,21 @@ struct bio {
* bit 2 -- barrier
* Insert a serialization point in the IO queue, forcing previously
* submitted IO to be completed before this one is issued.
- * bit 3 -- synchronous I/O hint: the block layer will unplug immediately
- * Note that this does NOT indicate that the IO itself is sync, just
- * that the block layer will not postpone issue of this IO by plugging.
- * bit 4 -- metadata request
+ * bit 3 -- synchronous I/O hint.
+ * bit 4 -- Unplug the device immediately after submitting this bio.
+ * bit 5 -- metadata request
* Used for tracing to differentiate metadata and data IO. May also
* get some preferential treatment in the IO scheduler
- * bit 5 -- discard sectors
+ * bit 6 -- discard sectors
* Informs the lower level device that this range of sectors is no longer
* used by the file system and may thus be freed by the device. Used
* for flash based storage.
- * bit 6 -- fail fast device errors
- * bit 7 -- fail fast transport errors
- * bit 8 -- fail fast driver errors
+ * bit 7 -- fail fast device errors
+ * bit 8 -- fail fast transport errors
+ * bit 9 -- fail fast driver errors
* Don't want driver retries for any fast fail whatever the reason.
+ * bit 10 -- Tell the IO scheduler not to wait for more requests after this
+ one has been submitted, even if it is a SYNC request.
*/
#define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */
#define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */
@@ -170,6 +172,7 @@ struct bio {
#define BIO_RW_FAILFAST_DEV 7
#define BIO_RW_FAILFAST_TRANSPORT 8
#define BIO_RW_FAILFAST_DRIVER 9
+#define BIO_RW_NOIDLE 10
#define bio_rw_flagged(bio, flag) ((bio)->bi_rw & (1 << (flag)))
@@ -188,6 +191,7 @@ struct bio {
#define bio_rw_ahead(bio) bio_rw_flagged(bio, BIO_RW_AHEAD)
#define bio_rw_meta(bio) bio_rw_flagged(bio, BIO_RW_META)
#define bio_discard(bio) bio_rw_flagged(bio, BIO_RW_DISCARD)
+#define bio_noidle(bio) bio_rw_flagged(bio, BIO_RW_NOIDLE)
/*
* upper 16 bits of bi_rw define the io priority of this bio
@@ -214,12 +218,12 @@ struct bio {
#define bio_sectors(bio) ((bio)->bi_size >> 9)
#define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio))
-static inline unsigned int bio_cur_sectors(struct bio *bio)
+static inline unsigned int bio_cur_bytes(struct bio *bio)
{
if (bio->bi_vcnt)
- return bio_iovec(bio)->bv_len >> 9;
+ return bio_iovec(bio)->bv_len;
else /* dataless requests such as discard */
- return bio->bi_size >> 9;
+ return bio->bi_size;
}
static inline void *bio_data(struct bio *bio)
@@ -275,7 +279,7 @@ static inline int bio_has_allocated_vec(struct bio *bio)
#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
(((addr1) | (mask)) == (((addr2) - 1) | (mask)))
#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
- __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, (q)->seg_boundary_mask)
+ __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
#define BIO_SEG_BOUNDARY(q, b1, b2) \
BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
@@ -426,9 +430,6 @@ struct bio_set {
unsigned int front_pad;
mempool_t *bio_pool;
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
- mempool_t *bio_integrity_pool;
-#endif
mempool_t *bvec_pool;
};
@@ -504,6 +505,120 @@ static inline int bio_has_data(struct bio *bio)
return bio && bio->bi_io_vec != NULL;
}
+/*
+ * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
+ *
+ * A bio_list anchors a singly-linked list of bios chained through the bi_next
+ * member of the bio. The bio_list also caches the last list member to allow
+ * fast access to the tail.
+ */
+struct bio_list {
+ struct bio *head;
+ struct bio *tail;
+};
+
+static inline int bio_list_empty(const struct bio_list *bl)
+{
+ return bl->head == NULL;
+}
+
+static inline void bio_list_init(struct bio_list *bl)
+{
+ bl->head = bl->tail = NULL;
+}
+
+#define bio_list_for_each(bio, bl) \
+ for (bio = (bl)->head; bio; bio = bio->bi_next)
+
+static inline unsigned bio_list_size(const struct bio_list *bl)
+{
+ unsigned sz = 0;
+ struct bio *bio;
+
+ bio_list_for_each(bio, bl)
+ sz++;
+
+ return sz;
+}
+
+static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
+{
+ bio->bi_next = NULL;
+
+ if (bl->tail)
+ bl->tail->bi_next = bio;
+ else
+ bl->head = bio;
+
+ bl->tail = bio;
+}
+
+static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio)
+{
+ bio->bi_next = bl->head;
+
+ bl->head = bio;
+
+ if (!bl->tail)
+ bl->tail = bio;
+}
+
+static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
+{
+ if (!bl2->head)
+ return;
+
+ if (bl->tail)
+ bl->tail->bi_next = bl2->head;
+ else
+ bl->head = bl2->head;
+
+ bl->tail = bl2->tail;
+}
+
+static inline void bio_list_merge_head(struct bio_list *bl,
+ struct bio_list *bl2)
+{
+ if (!bl2->head)
+ return;
+
+ if (bl->head)
+ bl2->tail->bi_next = bl->head;
+ else
+ bl->tail = bl2->tail;
+
+ bl->head = bl2->head;
+}
+
+static inline struct bio *bio_list_peek(struct bio_list *bl)
+{
+ return bl->head;
+}
+
+static inline struct bio *bio_list_pop(struct bio_list *bl)
+{
+ struct bio *bio = bl->head;
+
+ if (bio) {
+ bl->head = bl->head->bi_next;
+ if (!bl->head)
+ bl->tail = NULL;
+
+ bio->bi_next = NULL;
+ }
+
+ return bio;
+}
+
+static inline struct bio *bio_list_get(struct bio_list *bl)
+{
+ struct bio *bio = bl->head;
+
+ bl->head = bl->tail = NULL;
+
+ return bio;
+}
+
#if defined(CONFIG_BLK_DEV_INTEGRITY)
#define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)]))
@@ -519,9 +634,8 @@ static inline int bio_has_data(struct bio *bio)
#define bio_integrity(bio) (bio->bi_integrity != NULL)
-extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *);
extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
-extern void bio_integrity_free(struct bio *, struct bio_set *);
+extern void bio_integrity_free(struct bio *);
extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
extern int bio_integrity_enabled(struct bio *bio);
extern int bio_integrity_set_tag(struct bio *, void *, unsigned int);
@@ -531,27 +645,21 @@ extern void bio_integrity_endio(struct bio *, int);
extern void bio_integrity_advance(struct bio *, unsigned int);
extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
extern void bio_integrity_split(struct bio *, struct bio_pair *, int);
-extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t, struct bio_set *);
-extern int bioset_integrity_create(struct bio_set *, int);
-extern void bioset_integrity_free(struct bio_set *);
-extern void bio_integrity_init_slab(void);
+extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
#else /* CONFIG_BLK_DEV_INTEGRITY */
#define bio_integrity(a) (0)
-#define bioset_integrity_create(a, b) (0)
#define bio_integrity_prep(a) (0)
#define bio_integrity_enabled(a) (0)
-#define bio_integrity_clone(a, b, c,d ) (0)
-#define bioset_integrity_free(a) do { } while (0)
-#define bio_integrity_free(a, b) do { } while (0)
+#define bio_integrity_clone(a, b, c) (0)
+#define bio_integrity_free(a) do { } while (0)
#define bio_integrity_endio(a, b) do { } while (0)
#define bio_integrity_advance(a, b) do { } while (0)
#define bio_integrity_trim(a, b, c) do { } while (0)
#define bio_integrity_split(a, b, c) do { } while (0)
#define bio_integrity_set_tag(a, b, c) do { } while (0)
#define bio_integrity_get_tag(a, b, c) do { } while (0)
-#define bio_integrity_init_slab(a) do { } while (0)
#endif /* CONFIG_BLK_DEV_INTEGRITY */
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 61829139795..c05a29cb9bb 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -112,6 +112,25 @@ static inline unsigned fls_long(unsigned long l)
return fls64(l);
}
+/**
+ * __ffs64 - find first set bit in a 64 bit word
+ * @word: The 64 bit word
+ *
+ * On 64 bit arches this is a synomyn for __ffs
+ * The result is not defined if no bits are set, so check that @word
+ * is non-zero before calling this.
+ */
+static inline unsigned long __ffs64(u64 word)
+{
+#if BITS_PER_LONG == 32
+ if (((u32)word) == 0UL)
+ return __ffs((u32)(word >> 32)) + 32;
+#elif BITS_PER_LONG != 64
+#error BITS_PER_LONG not 32 or 64
+#endif
+ return __ffs((unsigned long)word);
+}
+
#ifdef __KERNEL__
#ifdef CONFIG_GENERIC_FIND_FIRST_BIT
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 465d6babc84..8963d9149b5 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -38,6 +38,10 @@ struct request;
typedef void (rq_end_io_fn)(struct request *, int);
struct request_list {
+ /*
+ * count[], starved[], and wait[] are indexed by
+ * BLK_RW_SYNC/BLK_RW_ASYNC
+ */
int count[2];
int starved[2];
int elvpriv;
@@ -66,6 +70,11 @@ enum rq_cmd_type_bits {
REQ_TYPE_ATA_PC,
};
+enum {
+ BLK_RW_ASYNC = 0,
+ BLK_RW_SYNC = 1,
+};
+
/*
* For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being
* sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a
@@ -103,12 +112,13 @@ enum rq_flag_bits {
__REQ_QUIET, /* don't worry about errors */
__REQ_PREEMPT, /* set for "ide_preempt" requests */
__REQ_ORDERED_COLOR, /* is before or after barrier */
- __REQ_RW_SYNC, /* request is sync (O_DIRECT) */
+ __REQ_RW_SYNC, /* request is sync (sync write or read) */
__REQ_ALLOCED, /* request came from our alloc pool */
__REQ_RW_META, /* metadata io request */
__REQ_COPY_USER, /* contains copies of user pages */
__REQ_INTEGRITY, /* integrity metadata has been remapped */
- __REQ_UNPLUG, /* unplug queue on submission */
+ __REQ_NOIDLE, /* Don't anticipate more IO after this one */
+ __REQ_IO_STAT, /* account I/O stat */
__REQ_NR_BITS, /* stops here */
};
@@ -135,7 +145,8 @@ enum rq_flag_bits {
#define REQ_RW_META (1 << __REQ_RW_META)
#define REQ_COPY_USER (1 << __REQ_COPY_USER)
#define REQ_INTEGRITY (1 << __REQ_INTEGRITY)
-#define REQ_UNPLUG (1 << __REQ_UNPLUG)
+#define REQ_NOIDLE (1 << __REQ_NOIDLE)
+#define REQ_IO_STAT (1 << __REQ_IO_STAT)
#define BLK_MAX_CDB 16
@@ -155,19 +166,9 @@ struct request {
enum rq_cmd_type_bits cmd_type;
unsigned long atomic_flags;
- /* Maintain bio traversal state for part by part I/O submission.
- * hard_* are block layer internals, no driver should touch them!
- */
-
- sector_t sector; /* next sector to submit */
- sector_t hard_sector; /* next sector to complete */
- unsigned long nr_sectors; /* no. of sectors left to submit */
- unsigned long hard_nr_sectors; /* no. of sectors left to complete */
- /* no. of sectors left to submit in the current segment */
- unsigned int current_nr_sectors;
-
- /* no. of sectors left to complete in the current segment */
- unsigned int hard_cur_sectors;
+ /* the following two fields are internal, NEVER access directly */
+ sector_t __sector; /* sector cursor */
+ unsigned int __data_len; /* total data len */
struct bio *bio;
struct bio *biotail;
@@ -200,8 +201,8 @@ struct request {
unsigned short ioprio;
- void *special;
- char *buffer;
+ void *special; /* opaque pointer available for LLD use */
+ char *buffer; /* kaddr of the current segment if available */
int tag;
int errors;
@@ -215,10 +216,9 @@ struct request {
unsigned char __cmd[BLK_MAX_CDB];
unsigned char *cmd;
- unsigned int data_len;
unsigned int extra_len; /* length of alignment and padding */
unsigned int sense_len;
- void *data;
+ unsigned int resid_len; /* residual count */
void *sense;
unsigned long deadline;
@@ -307,6 +307,26 @@ struct blk_cmd_filter {
struct kobject kobj;
};
+struct queue_limits {
+ unsigned long bounce_pfn;
+ unsigned long seg_boundary_mask;
+
+ unsigned int max_hw_sectors;
+ unsigned int max_sectors;
+ unsigned int max_segment_size;
+ unsigned int physical_block_size;
+ unsigned int alignment_offset;
+ unsigned int io_min;
+ unsigned int io_opt;
+
+ unsigned short logical_block_size;
+ unsigned short max_hw_segments;
+ unsigned short max_phys_segments;
+
+ unsigned char misaligned;
+ unsigned char no_cluster;
+};
+
struct request_queue
{
/*
@@ -358,7 +378,6 @@ struct request_queue
/*
* queue needs bounce pages for pages above this limit
*/
- unsigned long bounce_pfn;
gfp_t bounce_gfp;
/*
@@ -387,14 +406,6 @@ struct request_queue
unsigned int nr_congestion_off;
unsigned int nr_batching;
- unsigned int max_sectors;
- unsigned int max_hw_sectors;
- unsigned short max_phys_segments;
- unsigned short max_hw_segments;
- unsigned short hardsect_size;
- unsigned int max_segment_size;
-
- unsigned long seg_boundary_mask;
void *dma_drain_buffer;
unsigned int dma_drain_size;
unsigned int dma_pad_mask;
@@ -404,12 +415,14 @@ struct request_queue
struct list_head tag_busy_list;
unsigned int nr_sorted;
- unsigned int in_flight;
+ unsigned int in_flight[2];
unsigned int rq_timeout;
struct timer_list timeout;
struct list_head timeout_list;
+ struct queue_limits limits;
+
/*
* sg stuff
*/
@@ -438,8 +451,8 @@ struct request_queue
#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
-#define QUEUE_FLAG_READFULL 3 /* read queue has been filled */
-#define QUEUE_FLAG_WRITEFULL 4 /* write queue has been filled */
+#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
+#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */
#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */
@@ -511,6 +524,11 @@ static inline void queue_flag_clear_unlocked(unsigned int flag,
__clear_bit(flag, &q->queue_flags);
}
+static inline int queue_in_flight(struct request_queue *q)
+{
+ return q->in_flight[0] + q->in_flight[1];
+}
+
static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
{
WARN_ON_ONCE(!queue_is_locked(q));
@@ -589,6 +607,8 @@ enum {
blk_failfast_transport(rq) || \
blk_failfast_driver(rq))
#define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED)
+#define blk_rq_io_stat(rq) ((rq)->cmd_flags & REQ_IO_STAT)
+#define blk_rq_quiet(rq) ((rq)->cmd_flags & REQ_QUIET)
#define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq)))
@@ -611,32 +631,42 @@ enum {
#define rq_data_dir(rq) ((rq)->cmd_flags & 1)
/*
- * We regard a request as sync, if it's a READ or a SYNC write.
+ * We regard a request as sync, if either a read or a sync write
*/
-#define rq_is_sync(rq) (rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC)
+static inline bool rw_is_sync(unsigned int rw_flags)
+{
+ return !(rw_flags & REQ_RW) || (rw_flags & REQ_RW_SYNC);
+}
+
+static inline bool rq_is_sync(struct request *rq)
+{
+ return rw_is_sync(rq->cmd_flags);
+}
+
#define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META)
+#define rq_noidle(rq) ((rq)->cmd_flags & REQ_NOIDLE)
-static inline int blk_queue_full(struct request_queue *q, int rw)
+static inline int blk_queue_full(struct request_queue *q, int sync)
{
- if (rw == READ)
- return test_bit(QUEUE_FLAG_READFULL, &q->queue_flags);
- return test_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags);
+ if (sync)
+ return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags);
+ return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags);
}
-static inline void blk_set_queue_full(struct request_queue *q, int rw)
+static inline void blk_set_queue_full(struct request_queue *q, int sync)
{
- if (rw == READ)
- queue_flag_set(QUEUE_FLAG_READFULL, q);
+ if (sync)
+ queue_flag_set(QUEUE_FLAG_SYNCFULL, q);
else
- queue_flag_set(QUEUE_FLAG_WRITEFULL, q);
+ queue_flag_set(QUEUE_FLAG_ASYNCFULL, q);
}
-static inline void blk_clear_queue_full(struct request_queue *q, int rw)
+static inline void blk_clear_queue_full(struct request_queue *q, int sync)
{
- if (rw == READ)
- queue_flag_clear(QUEUE_FLAG_READFULL, q);
+ if (sync)
+ queue_flag_clear(QUEUE_FLAG_SYNCFULL, q);
else
- queue_flag_clear(QUEUE_FLAG_WRITEFULL, q);
+ queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q);
}
@@ -729,10 +759,17 @@ extern void blk_rq_init(struct request_queue *q, struct request *rq);
extern void blk_put_request(struct request *);
extern void __blk_put_request(struct request_queue *, struct request *);
extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
+extern struct request *blk_make_request(struct request_queue *, struct bio *,
+ gfp_t);
extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
extern void blk_requeue_request(struct request_queue *, struct request *);
extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
extern int blk_lld_busy(struct request_queue *q);
+extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
+ struct bio_set *bs, gfp_t gfp_mask,
+ int (*bio_ctr)(struct bio *, struct bio *, void *),
+ void *data);
+extern void blk_rq_unprep_clone(struct request *rq);
extern int blk_insert_cloned_request(struct request_queue *q,
struct request *rq);
extern void blk_plug_device(struct request_queue *);
@@ -745,12 +782,6 @@ extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
struct scsi_ioctl_command __user *);
/*
- * Temporary export, until SCSI gets fixed up.
- */
-extern int blk_rq_append_bio(struct request_queue *q, struct request *rq,
- struct bio *bio);
-
-/*
* A queue has just exitted congestion. Note this in the global counter of
* congested queues, and wake up anyone who was waiting for requests to be
* put back.
@@ -775,7 +806,6 @@ extern void blk_sync_queue(struct request_queue *q);
extern void __blk_stop_queue(struct request_queue *q);
extern void __blk_run_queue(struct request_queue *);
extern void blk_run_queue(struct request_queue *);
-extern void blk_start_queueing(struct request_queue *);
extern int blk_rq_map_user(struct request_queue *, struct request *,
struct rq_map_data *, void __user *, unsigned long,
gfp_t);
@@ -808,41 +838,73 @@ static inline void blk_run_address_space(struct address_space *mapping)
blk_run_backing_dev(mapping->backing_dev_info, NULL);
}
-extern void blkdev_dequeue_request(struct request *req);
+/*
+ * blk_rq_pos() : the current sector
+ * blk_rq_bytes() : bytes left in the entire request
+ * blk_rq_cur_bytes() : bytes left in the current segment
+ * blk_rq_sectors() : sectors left in the entire request
+ * blk_rq_cur_sectors() : sectors left in the current segment
+ */
+static inline sector_t blk_rq_pos(const struct request *rq)
+{
+ return rq->__sector;
+}
+
+static inline unsigned int blk_rq_bytes(const struct request *rq)
+{
+ return rq->__data_len;
+}
+
+static inline int blk_rq_cur_bytes(const struct request *rq)
+{
+ return rq->bio ? bio_cur_bytes(rq->bio) : 0;
+}
+
+static inline unsigned int blk_rq_sectors(const struct request *rq)
+{
+ return blk_rq_bytes(rq) >> 9;
+}
+
+static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
+{
+ return blk_rq_cur_bytes(rq) >> 9;
+}
/*
- * blk_end_request() and friends.
- * __blk_end_request() and end_request() must be called with
- * the request queue spinlock acquired.
+ * Request issue related functions.
+ */
+extern struct request *blk_peek_request(struct request_queue *q);
+extern void blk_start_request(struct request *rq);
+extern struct request *blk_fetch_request(struct request_queue *q);
+
+/*
+ * Request completion related functions.
+ *
+ * blk_update_request() completes given number of bytes and updates
+ * the request without completing it.
+ *
+ * blk_end_request() and friends. __blk_end_request() must be called
+ * with the request queue spinlock acquired.
*
* Several drivers define their own end_request and call
* blk_end_request() for parts of the original function.
* This prevents code duplication in drivers.
*/
-extern int blk_end_request(struct request *rq, int error,
- unsigned int nr_bytes);
-extern int __blk_end_request(struct request *rq, int error,
- unsigned int nr_bytes);
-extern int blk_end_bidi_request(struct request *rq, int error,
- unsigned int nr_bytes, unsigned int bidi_bytes);
-extern void end_request(struct request *, int);
-extern int blk_end_request_callback(struct request *rq, int error,
- unsigned int nr_bytes,
- int (drv_callback)(struct request *));
+extern bool blk_update_request(struct request *rq, int error,
+ unsigned int nr_bytes);
+extern bool blk_end_request(struct request *rq, int error,
+ unsigned int nr_bytes);
+extern void blk_end_request_all(struct request *rq, int error);
+extern bool blk_end_request_cur(struct request *rq, int error);
+extern bool __blk_end_request(struct request *rq, int error,
+ unsigned int nr_bytes);
+extern void __blk_end_request_all(struct request *rq, int error);
+extern bool __blk_end_request_cur(struct request *rq, int error);
+
extern void blk_complete_request(struct request *);
extern void __blk_complete_request(struct request *);
extern void blk_abort_request(struct request *);
extern void blk_abort_queue(struct request_queue *);
-extern void blk_update_request(struct request *rq, int error,
- unsigned int nr_bytes);
-
-/*
- * blk_end_request() takes bytes instead of sectors as a complete size.
- * blk_rq_bytes() returns bytes left to complete in the entire request.
- * blk_rq_cur_bytes() returns bytes left to complete in the current segment.
- */
-extern unsigned int blk_rq_bytes(struct request *rq);
-extern unsigned int blk_rq_cur_bytes(struct request *rq);
/*
* Access functions for manipulating queue properties
@@ -854,10 +916,21 @@ extern void blk_cleanup_queue(struct request_queue *);
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64);
extern void blk_queue_max_sectors(struct request_queue *, unsigned int);
+extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
-extern void blk_queue_hardsect_size(struct request_queue *, unsigned short);
+extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
+extern void blk_queue_physical_block_size(struct request_queue *, unsigned short);
+extern void blk_queue_alignment_offset(struct request_queue *q,
+ unsigned int alignment);
+extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
+extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
+extern void blk_set_default_limits(struct queue_limits *lim);
+extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
+ sector_t offset);
+extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
+ sector_t offset);
extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
@@ -944,19 +1017,87 @@ extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter);
#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
-static inline int queue_hardsect_size(struct request_queue *q)
+static inline unsigned long queue_bounce_pfn(struct request_queue *q)
+{
+ return q->limits.bounce_pfn;
+}
+
+static inline unsigned long queue_segment_boundary(struct request_queue *q)
+{
+ return q->limits.seg_boundary_mask;
+}
+
+static inline unsigned int queue_max_sectors(struct request_queue *q)
+{
+ return q->limits.max_sectors;
+}
+
+static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
+{
+ return q->limits.max_hw_sectors;
+}
+
+static inline unsigned short queue_max_hw_segments(struct request_queue *q)
+{
+ return q->limits.max_hw_segments;
+}
+
+static inline unsigned short queue_max_phys_segments(struct request_queue *q)
+{
+ return q->limits.max_phys_segments;
+}
+
+static inline unsigned int queue_max_segment_size(struct request_queue *q)
+{
+ return q->limits.max_segment_size;
+}
+
+static inline unsigned short queue_logical_block_size(struct request_queue *q)
{
int retval = 512;
- if (q && q->hardsect_size)
- retval = q->hardsect_size;
+ if (q && q->limits.logical_block_size)
+ retval = q->limits.logical_block_size;
return retval;
}
-static inline int bdev_hardsect_size(struct block_device *bdev)
+static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
+{
+ return queue_logical_block_size(bdev_get_queue(bdev));
+}
+
+static inline unsigned int queue_physical_block_size(struct request_queue *q)
+{
+ return q->limits.physical_block_size;
+}
+
+static inline unsigned int queue_io_min(struct request_queue *q)
+{
+ return q->limits.io_min;
+}
+
+static inline unsigned int queue_io_opt(struct request_queue *q)
+{
+ return q->limits.io_opt;
+}
+
+static inline int queue_alignment_offset(struct request_queue *q)
+{
+ if (q && q->limits.misaligned)
+ return -1;
+
+ if (q && q->limits.alignment_offset)
+ return q->limits.alignment_offset;
+
+ return 0;
+}
+
+static inline int queue_sector_alignment_offset(struct request_queue *q,
+ sector_t sector)
{
- return queue_hardsect_size(bdev_get_queue(bdev));
+ return ((sector << 9) - q->limits.alignment_offset)
+ & (q->limits.io_min - 1);
}
static inline int queue_dma_alignment(struct request_queue *q)
@@ -1086,6 +1227,8 @@ struct block_device_operations {
int (*direct_access) (struct block_device *, sector_t,
void **, unsigned long *);
int (*media_changed) (struct gendisk *);
+ unsigned long long (*set_capacity) (struct gendisk *,
+ unsigned long long);
int (*revalidate_disk) (struct gendisk *);
int (*getgeo)(struct block_device *, struct hd_geometry *);
struct module *owner;
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 6e915878e88..7e4350ece0f 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -116,9 +116,9 @@ struct blk_io_trace {
* The remap event
*/
struct blk_io_trace_remap {
- __be32 device;
__be32 device_from;
- __be64 sector;
+ __be32 device_to;
+ __be64 sector_from;
};
enum {
@@ -144,6 +144,9 @@ struct blk_user_trace_setup {
#ifdef __KERNEL__
#if defined(CONFIG_BLK_DEV_IO_TRACE)
+
+#include <linux/sysfs.h>
+
struct blk_trace {
int trace_state;
struct rchan *rchan;
@@ -162,8 +165,9 @@ struct blk_trace {
extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
extern void blk_trace_shutdown(struct request_queue *);
-extern int do_blk_trace_setup(struct request_queue *q,
- char *name, dev_t dev, struct blk_user_trace_setup *buts);
+extern int do_blk_trace_setup(struct request_queue *q, char *name,
+ dev_t dev, struct block_device *bdev,
+ struct blk_user_trace_setup *buts);
extern void __trace_note_message(struct blk_trace *, const char *fmt, ...);
/**
@@ -190,20 +194,42 @@ extern void __trace_note_message(struct blk_trace *, const char *fmt, ...);
extern void blk_add_driver_data(struct request_queue *q, struct request *rq,
void *data, size_t len);
extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
+ struct block_device *bdev,
char __user *arg);
extern int blk_trace_startstop(struct request_queue *q, int start);
extern int blk_trace_remove(struct request_queue *q);
+extern int blk_trace_init_sysfs(struct device *dev);
+
+extern struct attribute_group blk_trace_attr_group;
#else /* !CONFIG_BLK_DEV_IO_TRACE */
-#define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
-#define blk_trace_shutdown(q) do { } while (0)
-#define do_blk_trace_setup(q, name, dev, buts) (-ENOTTY)
-#define blk_add_driver_data(q, rq, data, len) do {} while (0)
-#define blk_trace_setup(q, name, dev, arg) (-ENOTTY)
-#define blk_trace_startstop(q, start) (-ENOTTY)
-#define blk_trace_remove(q) (-ENOTTY)
-#define blk_add_trace_msg(q, fmt, ...) do { } while (0)
+# define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
+# define blk_trace_shutdown(q) do { } while (0)
+# define do_blk_trace_setup(q, name, dev, bdev, buts) (-ENOTTY)
+# define blk_add_driver_data(q, rq, data, len) do {} while (0)
+# define blk_trace_setup(q, name, dev, bdev, arg) (-ENOTTY)
+# define blk_trace_startstop(q, start) (-ENOTTY)
+# define blk_trace_remove(q) (-ENOTTY)
+# define blk_add_trace_msg(q, fmt, ...) do { } while (0)
+static inline int blk_trace_init_sysfs(struct device *dev)
+{
+ return 0;
+}
#endif /* CONFIG_BLK_DEV_IO_TRACE */
+
+#if defined(CONFIG_EVENT_TRACING) && defined(CONFIG_BLOCK)
+
+static inline int blk_cmd_buf_len(struct request *rq)
+{
+ return blk_pc_request(rq) ? rq->cmd_len * 3 : 1;
+}
+
+extern void blk_dump_cmd(char *buf, struct request *rq);
+extern void blk_fill_rwbs(char *rwbs, u32 rw, int bytes);
+extern void blk_fill_rwbs_rq(char *rwbs, struct request *rq);
+
+#endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */
+
#endif /* __KERNEL__ */
#endif
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 95837bfb525..bc3ab707369 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -65,23 +65,20 @@ extern void free_bootmem(unsigned long addr, unsigned long size);
#define BOOTMEM_DEFAULT 0
#define BOOTMEM_EXCLUSIVE (1<<0)
+extern int reserve_bootmem(unsigned long addr,
+ unsigned long size,
+ int flags);
extern int reserve_bootmem_node(pg_data_t *pgdat,
- unsigned long physaddr,
- unsigned long size,
- int flags);
-#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
-extern int reserve_bootmem(unsigned long addr, unsigned long size, int flags);
-#endif
+ unsigned long physaddr,
+ unsigned long size,
+ int flags);
-extern void *__alloc_bootmem_nopanic(unsigned long size,
+extern void *__alloc_bootmem(unsigned long size,
unsigned long align,
unsigned long goal);
-extern void *__alloc_bootmem(unsigned long size,
+extern void *__alloc_bootmem_nopanic(unsigned long size,
unsigned long align,
unsigned long goal);
-extern void *__alloc_bootmem_low(unsigned long size,
- unsigned long align,
- unsigned long goal);
extern void *__alloc_bootmem_node(pg_data_t *pgdat,
unsigned long size,
unsigned long align,
@@ -90,30 +87,35 @@ extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat,
unsigned long size,
unsigned long align,
unsigned long goal);
+extern void *__alloc_bootmem_low(unsigned long size,
+ unsigned long align,
+ unsigned long goal);
extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
unsigned long size,
unsigned long align,
unsigned long goal);
-#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
+
#define alloc_bootmem(x) \
__alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_nopanic(x) \
__alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
-#define alloc_bootmem_low(x) \
- __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0)
#define alloc_bootmem_pages(x) \
__alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_pages_nopanic(x) \
__alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
-#define alloc_bootmem_low_pages(x) \
- __alloc_bootmem_low(x, PAGE_SIZE, 0)
#define alloc_bootmem_node(pgdat, x) \
__alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_pages_node(pgdat, x) \
__alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
+#define alloc_bootmem_pages_node_nopanic(pgdat, x) \
+ __alloc_bootmem_node_nopanic(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
+
+#define alloc_bootmem_low(x) \
+ __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0)
+#define alloc_bootmem_low_pages(x) \
+ __alloc_bootmem_low(x, PAGE_SIZE, 0)
#define alloc_bootmem_low_pages_node(pgdat, x) \
__alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0)
-#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
extern int reserve_bootmem_generic(unsigned long addr, unsigned long size,
int flags);
@@ -144,10 +146,10 @@ extern void *alloc_large_system_hash(const char *tablename,
#define HASH_EARLY 0x00000001 /* Allocating during early boot? */
-/* Only NUMA needs hash distribution.
- * IA64 and x86_64 have sufficient vmalloc space.
+/* Only NUMA needs hash distribution. 64bit NUMA architectures have
+ * sufficient vmalloc space.
*/
-#if defined(CONFIG_NUMA) && (defined(CONFIG_IA64) || defined(CONFIG_X86_64))
+#if defined(CONFIG_NUMA) && defined(CONFIG_64BIT)
#define HASHDIST_DEFAULT 1
#else
#define HASHDIST_DEFAULT 0
diff --git a/include/linux/bsg.h b/include/linux/bsg.h
index cf0303a6061..ecb4730d086 100644
--- a/include/linux/bsg.h
+++ b/include/linux/bsg.h
@@ -1,12 +1,22 @@
#ifndef BSG_H
#define BSG_H
+#include <linux/types.h>
+
#define BSG_PROTOCOL_SCSI 0
#define BSG_SUB_PROTOCOL_SCSI_CMD 0
#define BSG_SUB_PROTOCOL_SCSI_TMF 1
#define BSG_SUB_PROTOCOL_SCSI_TRANSPORT 2
+/*
+ * For flags member below
+ * sg.h sg_io_hdr also has bits defined for it's flags member. However
+ * none of these bits are implemented/used by bsg. The bits below are
+ * allocated to not conflict with sg.h ones anyway.
+ */
+#define BSG_FLAG_Q_AT_TAIL 0x10 /* default, == 0 at this bit, is Q_AT_HEAD */
+
struct sg_io_v4 {
__s32 guard; /* [i] 'Q' to differentiate from v3 */
__u32 protocol; /* [i] 0 -> SCSI , .... */
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index bd7ac793be1..16ed0284d78 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -155,6 +155,7 @@ void create_empty_buffers(struct page *, unsigned long,
unsigned long b_state);
void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
+void end_buffer_async_write(struct buffer_head *bh, int uptodate);
/* Things to do with buffers at mapping->private_list */
void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
@@ -165,15 +166,8 @@ int sync_mapping_buffers(struct address_space *mapping);
void unmap_underlying_metadata(struct block_device *bdev, sector_t block);
void mark_buffer_async_write(struct buffer_head *bh);
-void invalidate_bdev(struct block_device *);
-int sync_blockdev(struct block_device *bdev);
void __wait_on_buffer(struct buffer_head *);
wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
-int fsync_bdev(struct block_device *);
-struct super_block *freeze_bdev(struct block_device *);
-int thaw_bdev(struct block_device *, struct super_block *);
-int fsync_super(struct super_block *);
-int fsync_no_super(struct block_device *);
struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
unsigned size);
struct buffer_head *__getblk(struct block_device *bdev, sector_t block,
@@ -204,6 +198,8 @@ extern int buffer_heads_over_limit;
void block_invalidatepage(struct page *page, unsigned long offset);
int block_write_full_page(struct page *page, get_block_t *get_block,
struct writeback_control *wbc);
+int block_write_full_page_endio(struct page *page, get_block_t *get_block,
+ struct writeback_control *wbc, bh_end_io_t *handler);
int block_read_full_page(struct page*, get_block_t*);
int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
unsigned long from);
@@ -223,7 +219,7 @@ int cont_write_begin(struct file *, struct address_space *, loff_t,
get_block_t *, loff_t *);
int generic_cont_expand_simple(struct inode *inode, loff_t size);
int block_commit_write(struct page *page, unsigned from, unsigned to);
-int block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
+int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
get_block_t get_block);
void block_sync_page(struct page *);
sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
@@ -339,22 +335,10 @@ extern int __set_page_dirty_buffers(struct page *page);
static inline void buffer_init(void) {}
static inline int try_to_free_buffers(struct page *page) { return 1; }
-static inline int sync_blockdev(struct block_device *bdev) { return 0; }
static inline int inode_has_buffers(struct inode *inode) { return 0; }
static inline void invalidate_inode_buffers(struct inode *inode) {}
static inline int remove_inode_buffers(struct inode *inode) { return 1; }
static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
-static inline void invalidate_bdev(struct block_device *bdev) {}
-
-static inline struct super_block *freeze_bdev(struct block_device *sb)
-{
- return NULL;
-}
-
-static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb)
-{
- return 0;
-}
#endif /* CONFIG_BLOCK */
#endif /* _LINUX_BUFFER_HEAD_H */
diff --git a/include/linux/bug.h b/include/linux/bug.h
index 54398d2c6d8..d276b5510c8 100644
--- a/include/linux/bug.h
+++ b/include/linux/bug.h
@@ -1,7 +1,6 @@
#ifndef _LINUX_BUG_H
#define _LINUX_BUG_H
-#include <linux/module.h>
#include <asm/bug.h>
enum bug_trap_type {
@@ -24,10 +23,6 @@ const struct bug_entry *find_bug(unsigned long bugaddr);
enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs);
-int module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *,
- struct module *);
-void module_bug_cleanup(struct module *);
-
/* These are defined by the architecture */
int is_valid_bugaddr(unsigned long addr);
@@ -38,13 +33,6 @@ static inline enum bug_trap_type report_bug(unsigned long bug_addr,
{
return BUG_TRAP_TYPE_BUG;
}
-static inline int module_bug_finalize(const Elf_Ehdr *hdr,
- const Elf_Shdr *sechdrs,
- struct module *mod)
-{
- return 0;
-}
-static inline void module_bug_cleanup(struct module *mod) {}
#endif /* CONFIG_GENERIC_BUG */
#endif /* _LINUX_BUG_H */
diff --git a/include/linux/c2port.h b/include/linux/c2port.h
index 7b5a2388ba6..2a5cd867c36 100644
--- a/include/linux/c2port.h
+++ b/include/linux/c2port.h
@@ -10,6 +10,7 @@
*/
#include <linux/device.h>
+#include <linux/kmemcheck.h>
#define C2PORT_NAME_LEN 32
@@ -20,8 +21,10 @@
/* Main struct */
struct c2port_ops;
struct c2port_device {
+ kmemcheck_bitfield_begin(flags);
unsigned int access:1;
unsigned int flash_access:1;
+ kmemcheck_bitfield_end(flags);
int id;
char name[C2PORT_NAME_LEN];
diff --git a/include/linux/can/Kbuild b/include/linux/can/Kbuild
index eff898aac02..8cb05aae661 100644
--- a/include/linux/can/Kbuild
+++ b/include/linux/can/Kbuild
@@ -1,3 +1,4 @@
header-y += raw.h
header-y += bcm.h
header-y += error.h
+header-y += netlink.h
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
new file mode 100644
index 00000000000..4a37a56f6cd
--- /dev/null
+++ b/include/linux/can/dev.h
@@ -0,0 +1,70 @@
+/*
+ * linux/can/dev.h
+ *
+ * Definitions for the CAN network device driver interface
+ *
+ * Copyright (C) 2006 Andrey Volkov <avolkov@varma-el.com>
+ * Varma Electronics Oy
+ *
+ * Copyright (C) 2008 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * Send feedback to <socketcan-users@lists.berlios.de>
+ */
+
+#ifndef CAN_DEV_H
+#define CAN_DEV_H
+
+#include <linux/can/netlink.h>
+#include <linux/can/error.h>
+
+/*
+ * CAN mode
+ */
+enum can_mode {
+ CAN_MODE_STOP = 0,
+ CAN_MODE_START,
+ CAN_MODE_SLEEP
+};
+
+/*
+ * CAN common private data
+ */
+#define CAN_ECHO_SKB_MAX 4
+
+struct can_priv {
+ struct can_device_stats can_stats;
+
+ struct can_bittiming bittiming;
+ struct can_bittiming_const *bittiming_const;
+ struct can_clock clock;
+
+ enum can_state state;
+ u32 ctrlmode;
+
+ int restart_ms;
+ struct timer_list restart_timer;
+
+ struct sk_buff *echo_skb[CAN_ECHO_SKB_MAX];
+
+ int (*do_set_bittiming)(struct net_device *dev);
+ int (*do_set_mode)(struct net_device *dev, enum can_mode mode);
+ int (*do_get_state)(const struct net_device *dev,
+ enum can_state *state);
+};
+
+struct net_device *alloc_candev(int sizeof_priv);
+void free_candev(struct net_device *dev);
+
+int open_candev(struct net_device *dev);
+void close_candev(struct net_device *dev);
+
+int register_candev(struct net_device *dev);
+void unregister_candev(struct net_device *dev);
+
+int can_restart_now(struct net_device *dev);
+void can_bus_off(struct net_device *dev);
+
+void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, int idx);
+void can_get_echo_skb(struct net_device *dev, int idx);
+
+#endif /* CAN_DEV_H */
diff --git a/include/linux/can/netlink.h b/include/linux/can/netlink.h
new file mode 100644
index 00000000000..9ecbb7871c0
--- /dev/null
+++ b/include/linux/can/netlink.h
@@ -0,0 +1,113 @@
+/*
+ * linux/can/netlink.h
+ *
+ * Definitions for the CAN netlink interface
+ *
+ * Copyright (c) 2009 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * Send feedback to <socketcan-users@lists.berlios.de>
+ *
+ */
+
+#ifndef CAN_NETLINK_H
+#define CAN_NETLINK_H
+
+#include <linux/types.h>
+
+/*
+ * CAN bit-timing parameters
+ *
+ * For futher information, please read chapter "8 BIT TIMING
+ * REQUIREMENTS" of the "Bosch CAN Specification version 2.0"
+ * at http://www.semiconductors.bosch.de/pdf/can2spec.pdf.
+ */
+struct can_bittiming {
+ __u32 bitrate; /* Bit-rate in bits/second */
+ __u32 sample_point; /* Sample point in one-tenth of a percent */
+ __u32 tq; /* Time quanta (TQ) in nanoseconds */
+ __u32 prop_seg; /* Propagation segment in TQs */
+ __u32 phase_seg1; /* Phase buffer segment 1 in TQs */
+ __u32 phase_seg2; /* Phase buffer segment 2 in TQs */
+ __u32 sjw; /* Synchronisation jump width in TQs */
+ __u32 brp; /* Bit-rate prescaler */
+};
+
+/*
+ * CAN harware-dependent bit-timing constant
+ *
+ * Used for calculating and checking bit-timing parameters
+ */
+struct can_bittiming_const {
+ char name[16]; /* Name of the CAN controller hardware */
+ __u32 tseg1_min; /* Time segement 1 = prop_seg + phase_seg1 */
+ __u32 tseg1_max;
+ __u32 tseg2_min; /* Time segement 2 = phase_seg2 */
+ __u32 tseg2_max;
+ __u32 sjw_max; /* Synchronisation jump width */
+ __u32 brp_min; /* Bit-rate prescaler */
+ __u32 brp_max;
+ __u32 brp_inc;
+};
+
+/*
+ * CAN clock parameters
+ */
+struct can_clock {
+ __u32 freq; /* CAN system clock frequency in Hz */
+};
+
+/*
+ * CAN operational and error states
+ */
+enum can_state {
+ CAN_STATE_ERROR_ACTIVE = 0, /* RX/TX error count < 96 */
+ CAN_STATE_ERROR_WARNING, /* RX/TX error count < 128 */
+ CAN_STATE_ERROR_PASSIVE, /* RX/TX error count < 256 */
+ CAN_STATE_BUS_OFF, /* RX/TX error count >= 256 */
+ CAN_STATE_STOPPED, /* Device is stopped */
+ CAN_STATE_SLEEPING, /* Device is sleeping */
+ CAN_STATE_MAX
+};
+
+/*
+ * CAN controller mode
+ */
+struct can_ctrlmode {
+ __u32 mask;
+ __u32 flags;
+};
+
+#define CAN_CTRLMODE_LOOPBACK 0x1 /* Loopback mode */
+#define CAN_CTRLMODE_LISTENONLY 0x2 /* Listen-only mode */
+#define CAN_CTRLMODE_3_SAMPLES 0x4 /* Triple sampling mode */
+
+/*
+ * CAN device statistics
+ */
+struct can_device_stats {
+ __u32 bus_error; /* Bus errors */
+ __u32 error_warning; /* Changes to error warning state */
+ __u32 error_passive; /* Changes to error passive state */
+ __u32 bus_off; /* Changes to bus off state */
+ __u32 arbitration_lost; /* Arbitration lost errors */
+ __u32 restarts; /* CAN controller re-starts */
+};
+
+/*
+ * CAN netlink interface
+ */
+enum {
+ IFLA_CAN_UNSPEC,
+ IFLA_CAN_BITTIMING,
+ IFLA_CAN_BITTIMING_CONST,
+ IFLA_CAN_CLOCK,
+ IFLA_CAN_STATE,
+ IFLA_CAN_CTRLMODE,
+ IFLA_CAN_RESTART_MS,
+ IFLA_CAN_RESTART,
+ __IFLA_CAN_MAX
+};
+
+#define IFLA_CAN_MAX (__IFLA_CAN_MAX - 1)
+
+#endif /* CAN_NETLINK_H */
diff --git a/include/linux/can/platform/sja1000.h b/include/linux/can/platform/sja1000.h
new file mode 100644
index 00000000000..01ee2aeb048
--- /dev/null
+++ b/include/linux/can/platform/sja1000.h
@@ -0,0 +1,35 @@
+#ifndef _CAN_PLATFORM_SJA1000_H_
+#define _CAN_PLATFORM_SJA1000_H_
+
+/* clock divider register */
+#define CDR_CLKOUT_MASK 0x07
+#define CDR_CLK_OFF 0x08 /* Clock off (CLKOUT pin) */
+#define CDR_RXINPEN 0x20 /* TX1 output is RX irq output */
+#define CDR_CBP 0x40 /* CAN input comparator bypass */
+#define CDR_PELICAN 0x80 /* PeliCAN mode */
+
+/* output control register */
+#define OCR_MODE_BIPHASE 0x00
+#define OCR_MODE_TEST 0x01
+#define OCR_MODE_NORMAL 0x02
+#define OCR_MODE_CLOCK 0x03
+#define OCR_MODE_MASK 0x07
+#define OCR_TX0_INVERT 0x04
+#define OCR_TX0_PULLDOWN 0x08
+#define OCR_TX0_PULLUP 0x10
+#define OCR_TX0_PUSHPULL 0x18
+#define OCR_TX1_INVERT 0x20
+#define OCR_TX1_PULLDOWN 0x40
+#define OCR_TX1_PULLUP 0x80
+#define OCR_TX1_PUSHPULL 0xc0
+#define OCR_TX_MASK 0xfc
+#define OCR_TX_SHIFT 2
+
+struct sja1000_platform_data {
+ u32 clock; /* CAN bus oscillator frequency in Hz */
+
+ u8 ocr; /* output control register */
+ u8 cdr; /* clock divider register */
+};
+
+#endif /* !_CAN_PLATFORM_SJA1000_H_ */
diff --git a/include/linux/capability.h b/include/linux/capability.h
index 4864a43b2b4..c3021105edc 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -377,7 +377,21 @@ struct cpu_vfs_cap_data {
#define CAP_FOR_EACH_U32(__capi) \
for (__capi = 0; __capi < _KERNEL_CAPABILITY_U32S; ++__capi)
+/*
+ * CAP_FS_MASK and CAP_NFSD_MASKS:
+ *
+ * The fs mask is all the privileges that fsuid==0 historically meant.
+ * At one time in the past, that included CAP_MKNOD and CAP_LINUX_IMMUTABLE.
+ *
+ * It has never meant setting security.* and trusted.* xattrs.
+ *
+ * We could also define fsmask as follows:
+ * 1. CAP_FS_MASK is the privilege to bypass all fs-related DAC permissions
+ * 2. The security.* and trusted.* xattrs are fs-related MAC permissions
+ */
+
# define CAP_FS_MASK_B0 (CAP_TO_MASK(CAP_CHOWN) \
+ | CAP_TO_MASK(CAP_MKNOD) \
| CAP_TO_MASK(CAP_DAC_OVERRIDE) \
| CAP_TO_MASK(CAP_DAC_READ_SEARCH) \
| CAP_TO_MASK(CAP_FOWNER) \
@@ -392,11 +406,12 @@ struct cpu_vfs_cap_data {
# define CAP_EMPTY_SET ((kernel_cap_t){{ 0, 0 }})
# define CAP_FULL_SET ((kernel_cap_t){{ ~0, ~0 }})
# define CAP_INIT_EFF_SET ((kernel_cap_t){{ ~CAP_TO_MASK(CAP_SETPCAP), ~0 }})
-# define CAP_FS_SET ((kernel_cap_t){{ CAP_FS_MASK_B0, CAP_FS_MASK_B1 } })
+# define CAP_FS_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \
+ | CAP_TO_MASK(CAP_LINUX_IMMUTABLE), \
+ CAP_FS_MASK_B1 } })
# define CAP_NFSD_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \
- | CAP_TO_MASK(CAP_SYS_RESOURCE) \
- | CAP_TO_MASK(CAP_MKNOD), \
- CAP_FS_MASK_B1 } })
+ | CAP_TO_MASK(CAP_SYS_RESOURCE), \
+ CAP_FS_MASK_B1 } })
#endif /* _KERNEL_CAPABILITY_U32S != 2 */
diff --git a/include/linux/cb710.h b/include/linux/cb710.h
new file mode 100644
index 00000000000..63bc9a4d292
--- /dev/null
+++ b/include/linux/cb710.h
@@ -0,0 +1,231 @@
+/*
+ * cb710/cb710.h
+ *
+ * Copyright by Michał Mirosław, 2008-2009
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef LINUX_CB710_DRIVER_H
+#define LINUX_CB710_DRIVER_H
+
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/mmc/host.h>
+
+struct cb710_slot;
+
+typedef int (*cb710_irq_handler_t)(struct cb710_slot *);
+
+/* per-virtual-slot structure */
+struct cb710_slot {
+ struct platform_device pdev;
+ void __iomem *iobase;
+ cb710_irq_handler_t irq_handler;
+};
+
+/* per-device structure */
+struct cb710_chip {
+ struct pci_dev *pdev;
+ void __iomem *iobase;
+ unsigned platform_id;
+#ifdef CONFIG_CB710_DEBUG_ASSUMPTIONS
+ atomic_t slot_refs_count;
+#endif
+ unsigned slot_mask;
+ unsigned slots;
+ spinlock_t irq_lock;
+ struct cb710_slot slot[0];
+};
+
+/* NOTE: cb710_chip.slots is modified only during device init/exit and
+ * they are all serialized wrt themselves */
+
+/* cb710_chip.slot_mask values */
+#define CB710_SLOT_MMC 1
+#define CB710_SLOT_MS 2
+#define CB710_SLOT_SM 4
+
+/* slot port accessors - so the logic is more clear in the code */
+#define CB710_PORT_ACCESSORS(t) \
+static inline void cb710_write_port_##t(struct cb710_slot *slot, \
+ unsigned port, u##t value) \
+{ \
+ iowrite##t(value, slot->iobase + port); \
+} \
+ \
+static inline u##t cb710_read_port_##t(struct cb710_slot *slot, \
+ unsigned port) \
+{ \
+ return ioread##t(slot->iobase + port); \
+} \
+ \
+static inline void cb710_modify_port_##t(struct cb710_slot *slot, \
+ unsigned port, u##t set, u##t clear) \
+{ \
+ iowrite##t( \
+ (ioread##t(slot->iobase + port) & ~clear)|set, \
+ slot->iobase + port); \
+}
+
+CB710_PORT_ACCESSORS(8)
+CB710_PORT_ACCESSORS(16)
+CB710_PORT_ACCESSORS(32)
+
+void cb710_pci_update_config_reg(struct pci_dev *pdev,
+ int reg, uint32_t and, uint32_t xor);
+void cb710_set_irq_handler(struct cb710_slot *slot,
+ cb710_irq_handler_t handler);
+
+/* some device struct walking */
+
+static inline struct cb710_slot *cb710_pdev_to_slot(
+ struct platform_device *pdev)
+{
+ return container_of(pdev, struct cb710_slot, pdev);
+}
+
+static inline struct cb710_chip *cb710_slot_to_chip(struct cb710_slot *slot)
+{
+ return dev_get_drvdata(slot->pdev.dev.parent);
+}
+
+static inline struct device *cb710_slot_dev(struct cb710_slot *slot)
+{
+ return &slot->pdev.dev;
+}
+
+static inline struct device *cb710_chip_dev(struct cb710_chip *chip)
+{
+ return &chip->pdev->dev;
+}
+
+/* debugging aids */
+
+#ifdef CONFIG_CB710_DEBUG
+void cb710_dump_regs(struct cb710_chip *chip, unsigned dump);
+#else
+#define cb710_dump_regs(c, d) do {} while (0)
+#endif
+
+#define CB710_DUMP_REGS_MMC 0x0F
+#define CB710_DUMP_REGS_MS 0x30
+#define CB710_DUMP_REGS_SM 0xC0
+#define CB710_DUMP_REGS_ALL 0xFF
+#define CB710_DUMP_REGS_MASK 0xFF
+
+#define CB710_DUMP_ACCESS_8 0x100
+#define CB710_DUMP_ACCESS_16 0x200
+#define CB710_DUMP_ACCESS_32 0x400
+#define CB710_DUMP_ACCESS_ALL 0x700
+#define CB710_DUMP_ACCESS_MASK 0x700
+
+#endif /* LINUX_CB710_DRIVER_H */
+/*
+ * cb710/sgbuf2.h
+ *
+ * Copyright by Michał Mirosław, 2008-2009
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef LINUX_CB710_SG_H
+#define LINUX_CB710_SG_H
+
+#include <linux/highmem.h>
+#include <linux/scatterlist.h>
+
+/**
+ * cb710_sg_miter_stop_writing - stop mapping iteration after writing
+ * @miter: sg mapping iter to be stopped
+ *
+ * Description:
+ * Stops mapping iterator @miter. @miter should have been started
+ * started using sg_miter_start(). A stopped iteration can be
+ * resumed by calling sg_miter_next() on it. This is useful when
+ * resources (kmap) need to be released during iteration.
+ *
+ * This is a convenience wrapper that will be optimized out for arches
+ * that don't need flush_kernel_dcache_page().
+ *
+ * Context:
+ * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise.
+ */
+static inline void cb710_sg_miter_stop_writing(struct sg_mapping_iter *miter)
+{
+ if (miter->page)
+ flush_kernel_dcache_page(miter->page);
+ sg_miter_stop(miter);
+}
+
+/*
+ * 32-bit PIO mapping sg iterator
+ *
+ * Hides scatterlist access issues - fragment boundaries, alignment, page
+ * mapping - for drivers using 32-bit-word-at-a-time-PIO (ie. PCI devices
+ * without DMA support).
+ *
+ * Best-case reading (transfer from device):
+ * sg_miter_start();
+ * cb710_sg_dwiter_write_from_io();
+ * cb710_sg_miter_stop_writing();
+ *
+ * Best-case writing (transfer to device):
+ * sg_miter_start();
+ * cb710_sg_dwiter_read_to_io();
+ * sg_miter_stop();
+ */
+
+uint32_t cb710_sg_dwiter_read_next_block(struct sg_mapping_iter *miter);
+void cb710_sg_dwiter_write_next_block(struct sg_mapping_iter *miter, uint32_t data);
+
+/**
+ * cb710_sg_dwiter_write_from_io - transfer data to mapped buffer from 32-bit IO port
+ * @miter: sg mapping iter
+ * @port: PIO port - IO or MMIO address
+ * @count: number of 32-bit words to transfer
+ *
+ * Description:
+ * Reads @count 32-bit words from register @port and stores it in
+ * buffer iterated by @miter. Data that would overflow the buffer
+ * is silently ignored. Iterator is advanced by 4*@count bytes
+ * or to the buffer's end whichever is closer.
+ *
+ * Context:
+ * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise.
+ */
+static inline void cb710_sg_dwiter_write_from_io(struct sg_mapping_iter *miter,
+ void __iomem *port, size_t count)
+{
+ while (count-- > 0)
+ cb710_sg_dwiter_write_next_block(miter, ioread32(port));
+}
+
+/**
+ * cb710_sg_dwiter_read_to_io - transfer data to 32-bit IO port from mapped buffer
+ * @miter: sg mapping iter
+ * @port: PIO port - IO or MMIO address
+ * @count: number of 32-bit words to transfer
+ *
+ * Description:
+ * Writes @count 32-bit words to register @port from buffer iterated
+ * through @miter. If buffer ends before @count words are written
+ * missing data is replaced by zeroes. @miter is advanced by 4*@count
+ * bytes or to the buffer's end whichever is closer.
+ *
+ * Context:
+ * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise.
+ */
+static inline void cb710_sg_dwiter_read_to_io(struct sg_mapping_iter *miter,
+ void __iomem *port, size_t count)
+{
+ while (count-- > 0)
+ iowrite32(cb710_sg_dwiter_read_next_block(miter), port);
+}
+
+#endif /* LINUX_CB710_SG_H */
diff --git a/include/linux/cdev.h b/include/linux/cdev.h
index fb4591977b0..f389e319a45 100644
--- a/include/linux/cdev.h
+++ b/include/linux/cdev.h
@@ -28,6 +28,8 @@ int cdev_add(struct cdev *, dev_t, unsigned);
void cdev_del(struct cdev *);
+int cdev_index(struct inode *inode);
+
void cd_forget(struct inode *);
extern struct backing_dev_info directly_mappable_cdev_bdi;
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 499900d0cee..665fa70e409 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -15,6 +15,7 @@
#include <linux/cgroupstats.h>
#include <linux/prio_heap.h>
#include <linux/rwsem.h>
+#include <linux/idr.h>
#ifdef CONFIG_CGROUPS
@@ -22,6 +23,7 @@ struct cgroupfs_root;
struct cgroup_subsys;
struct inode;
struct cgroup;
+struct css_id;
extern int cgroup_init_early(void);
extern int cgroup_init(void);
@@ -47,18 +49,24 @@ enum cgroup_subsys_id {
/* Per-subsystem/per-cgroup state maintained by the system. */
struct cgroup_subsys_state {
- /* The cgroup that this subsystem is attached to. Useful
+ /*
+ * The cgroup that this subsystem is attached to. Useful
* for subsystems that want to know about the cgroup
- * hierarchy structure */
+ * hierarchy structure
+ */
struct cgroup *cgroup;
- /* State maintained by the cgroup system to allow subsystems
+ /*
+ * State maintained by the cgroup system to allow subsystems
* to be "busy". Should be accessed via css_get(),
- * css_tryget() and and css_put(). */
+ * css_tryget() and and css_put().
+ */
atomic_t refcnt;
unsigned long flags;
+ /* ID for this css, if possible */
+ struct css_id *id;
};
/* bits in struct cgroup_subsys_state flags field */
@@ -120,19 +128,26 @@ static inline void css_put(struct cgroup_subsys_state *css)
enum {
/* Control Group is dead */
CGRP_REMOVED,
- /* Control Group has previously had a child cgroup or a task,
- * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set) */
+ /*
+ * Control Group has previously had a child cgroup or a task,
+ * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set)
+ */
CGRP_RELEASABLE,
/* Control Group requires release notifications to userspace */
CGRP_NOTIFY_ON_RELEASE,
+ /*
+ * A thread in rmdir() is wating for this cgroup.
+ */
+ CGRP_WAIT_ON_RMDIR,
};
struct cgroup {
unsigned long flags; /* "unsigned long" so bitops work */
- /* count users of this cgroup. >0 means busy, but doesn't
- * necessarily indicate the number of tasks in the
- * cgroup */
+ /*
+ * count users of this cgroup. >0 means busy, but doesn't
+ * necessarily indicate the number of tasks in the cgroup
+ */
atomic_t count;
/*
@@ -142,7 +157,7 @@ struct cgroup {
struct list_head sibling; /* my parent's children */
struct list_head children; /* my children */
- struct cgroup *parent; /* my parent */
+ struct cgroup *parent; /* my parent */
struct dentry *dentry; /* cgroup fs entry, RCU protected */
/* Private pointers for each registered subsystem */
@@ -177,11 +192,12 @@ struct cgroup {
struct rcu_head rcu_head;
};
-/* A css_set is a structure holding pointers to a set of
+/*
+ * A css_set is a structure holding pointers to a set of
* cgroup_subsys_state objects. This saves space in the task struct
* object and speeds up fork()/exit(), since a single inc/dec and a
- * list_add()/del() can bump the reference count on the entire
- * cgroup set for a task.
+ * list_add()/del() can bump the reference count on the entire cgroup
+ * set for a task.
*/
struct css_set {
@@ -226,13 +242,8 @@ struct cgroup_map_cb {
void *state;
};
-/* struct cftype:
- *
- * The files in the cgroup filesystem mostly have a very simple read/write
- * handling, some common function will take care of it. Nevertheless some cases
- * (read tasks) are special and therefore I define this structure for every
- * kind of file.
- *
+/*
+ * struct cftype: handler definitions for cgroup control files
*
* When reading/writing to a file:
* - the cgroup to use is file->f_dentry->d_parent->d_fsdata
@@ -241,10 +252,17 @@ struct cgroup_map_cb {
#define MAX_CFTYPE_NAME 64
struct cftype {
- /* By convention, the name should begin with the name of the
- * subsystem, followed by a period */
+ /*
+ * By convention, the name should begin with the name of the
+ * subsystem, followed by a period
+ */
char name[MAX_CFTYPE_NAME];
int private;
+ /*
+ * If not 0, file mode is set to this value, otherwise it will
+ * be figured out automatically
+ */
+ mode_t mode;
/*
* If non-zero, defines the maximum length of string that can
@@ -319,15 +337,20 @@ struct cgroup_scanner {
void (*process_task)(struct task_struct *p,
struct cgroup_scanner *scan);
struct ptr_heap *heap;
+ void *data;
};
-/* Add a new file to the given cgroup directory. Should only be
- * called by subsystems from within a populate() method */
+/*
+ * Add a new file to the given cgroup directory. Should only be
+ * called by subsystems from within a populate() method
+ */
int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys,
const struct cftype *cft);
-/* Add a set of new files to the given cgroup directory. Should
- * only be called by subsystems from within a populate() method */
+/*
+ * Add a set of new files to the given cgroup directory. Should
+ * only be called by subsystems from within a populate() method
+ */
int cgroup_add_files(struct cgroup *cgrp,
struct cgroup_subsys *subsys,
const struct cftype cft[],
@@ -339,15 +362,18 @@ int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen);
int cgroup_task_count(const struct cgroup *cgrp);
-/* Return true if the cgroup is a descendant of the current cgroup */
-int cgroup_is_descendant(const struct cgroup *cgrp);
+/* Return true if cgrp is a descendant of the task's cgroup */
+int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task);
-/* Control Group subsystem type. See Documentation/cgroups.txt for details */
+/*
+ * Control Group subsystem type.
+ * See Documentation/cgroups/cgroups.txt for details
+ */
struct cgroup_subsys {
struct cgroup_subsys_state *(*create)(struct cgroup_subsys *ss,
struct cgroup *cgrp);
- void (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
+ int (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
int (*can_attach)(struct cgroup_subsys *ss,
struct cgroup *cgrp, struct task_struct *tsk);
@@ -364,6 +390,11 @@ struct cgroup_subsys {
int active;
int disabled;
int early_init;
+ /*
+ * True if this subsys uses ID. ID is not available before cgroup_init()
+ * (not available in early_init time.)
+ */
+ bool use_id;
#define MAX_CGROUP_TYPE_NAMELEN 32
const char *name;
@@ -386,6 +417,9 @@ struct cgroup_subsys {
*/
struct cgroupfs_root *root;
struct list_head sibling;
+ /* used when use_id == true */
+ struct idr idr;
+ spinlock_t id_lock;
};
#define SUBSYS(_x) extern struct cgroup_subsys _x ## _subsys;
@@ -419,7 +453,8 @@ struct cgroup_iter {
struct list_head *task;
};
-/* To iterate across the tasks in a cgroup:
+/*
+ * To iterate across the tasks in a cgroup:
*
* 1) call cgroup_iter_start to intialize an iterator
*
@@ -428,9 +463,10 @@ struct cgroup_iter {
*
* 3) call cgroup_iter_end() to destroy the iterator.
*
- * Or, call cgroup_scan_tasks() to iterate through every task in a cpuset.
- * - cgroup_scan_tasks() holds the css_set_lock when calling the test_task()
- * callback, but not while calling the process_task() callback.
+ * Or, call cgroup_scan_tasks() to iterate through every task in a
+ * cgroup - cgroup_scan_tasks() holds the css_set_lock when calling
+ * the test_task() callback, but not while calling the process_task()
+ * callback.
*/
void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it);
struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
@@ -439,6 +475,44 @@ void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it);
int cgroup_scan_tasks(struct cgroup_scanner *scan);
int cgroup_attach_task(struct cgroup *, struct task_struct *);
+/*
+ * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works
+ * if cgroup_subsys.use_id == true. It can be used for looking up and scanning.
+ * CSS ID is assigned at cgroup allocation (create) automatically
+ * and removed when subsys calls free_css_id() function. This is because
+ * the lifetime of cgroup_subsys_state is subsys's matter.
+ *
+ * Looking up and scanning function should be called under rcu_read_lock().
+ * Taking cgroup_mutex()/hierarchy_mutex() is not necessary for following calls.
+ * But the css returned by this routine can be "not populated yet" or "being
+ * destroyed". The caller should check css and cgroup's status.
+ */
+
+/*
+ * Typically Called at ->destroy(), or somewhere the subsys frees
+ * cgroup_subsys_state.
+ */
+void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css);
+
+/* Find a cgroup_subsys_state which has given ID */
+
+struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id);
+
+/*
+ * Get a cgroup whose id is greater than or equal to id under tree of root.
+ * Returning a cgroup_subsys_state or NULL.
+ */
+struct cgroup_subsys_state *css_get_next(struct cgroup_subsys *ss, int id,
+ struct cgroup_subsys_state *root, int *foundid);
+
+/* Returns true if root is ancestor of cg */
+bool css_is_ancestor(struct cgroup_subsys_state *cg,
+ const struct cgroup_subsys_state *root);
+
+/* Get id and depth of css */
+unsigned short css_id(struct cgroup_subsys_state *css);
+unsigned short css_depth(struct cgroup_subsys_state *css);
+
#else /* !CONFIG_CGROUPS */
static inline int cgroup_init_early(void) { return 0; }
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 778777316ea..1d37f42ac29 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -125,4 +125,34 @@ int clk_set_parent(struct clk *clk, struct clk *parent);
*/
struct clk *clk_get_parent(struct clk *clk);
+/**
+ * clk_get_sys - get a clock based upon the device name
+ * @dev_id: device name
+ * @con_id: connection ID
+ *
+ * Returns a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev_id and @con_id to determine the clock consumer, and
+ * thereby the clock producer. In contrast to clk_get() this function
+ * takes the device name instead of the device itself for identification.
+ *
+ * Drivers must assume that the clock source is not enabled.
+ *
+ * clk_get_sys should not be called from within interrupt context.
+ */
+struct clk *clk_get_sys(const char *dev_id, const char *con_id);
+
+/**
+ * clk_add_alias - add a new clock alias
+ * @alias: name for clock alias
+ * @alias_dev_name: device name
+ * @id: platform specific clock name
+ * @dev: device
+ *
+ * Allows using generic clock names for drivers by adding a new alias.
+ * Assumes clkdev, see clkdev.h for more info.
+ */
+int clk_add_alias(const char *alias, const char *alias_dev_name, char *id,
+ struct device *dev);
+
#endif
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 3a1dbba4d3a..20a100fe2b4 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -143,3 +143,12 @@ extern void clockevents_notify(unsigned long reason, void *arg);
#endif
#endif
+
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+extern ktime_t clockevents_get_next_event(int cpu);
+#else
+static inline ktime_t clockevents_get_next_event(int cpu)
+{
+ return (ktime_t) { .tv64 = KTIME_MAX };
+}
+#endif
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index f88d32f8ff7..c56457c8334 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -22,8 +22,109 @@ typedef u64 cycle_t;
struct clocksource;
/**
+ * struct cyclecounter - hardware abstraction for a free running counter
+ * Provides completely state-free accessors to the underlying hardware.
+ * Depending on which hardware it reads, the cycle counter may wrap
+ * around quickly. Locking rules (if necessary) have to be defined
+ * by the implementor and user of specific instances of this API.
+ *
+ * @read: returns the current cycle value
+ * @mask: bitmask for two's complement
+ * subtraction of non 64 bit counters,
+ * see CLOCKSOURCE_MASK() helper macro
+ * @mult: cycle to nanosecond multiplier
+ * @shift: cycle to nanosecond divisor (power of two)
+ */
+struct cyclecounter {
+ cycle_t (*read)(const struct cyclecounter *cc);
+ cycle_t mask;
+ u32 mult;
+ u32 shift;
+};
+
+/**
+ * struct timecounter - layer above a %struct cyclecounter which counts nanoseconds
+ * Contains the state needed by timecounter_read() to detect
+ * cycle counter wrap around. Initialize with
+ * timecounter_init(). Also used to convert cycle counts into the
+ * corresponding nanosecond counts with timecounter_cyc2time(). Users
+ * of this code are responsible for initializing the underlying
+ * cycle counter hardware, locking issues and reading the time
+ * more often than the cycle counter wraps around. The nanosecond
+ * counter will only wrap around after ~585 years.
+ *
+ * @cc: the cycle counter used by this instance
+ * @cycle_last: most recent cycle counter value seen by
+ * timecounter_read()
+ * @nsec: continuously increasing count
+ */
+struct timecounter {
+ const struct cyclecounter *cc;
+ cycle_t cycle_last;
+ u64 nsec;
+};
+
+/**
+ * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds
+ * @tc: Pointer to cycle counter.
+ * @cycles: Cycles
+ *
+ * XXX - This could use some mult_lxl_ll() asm optimization. Same code
+ * as in cyc2ns, but with unsigned result.
+ */
+static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc,
+ cycle_t cycles)
+{
+ u64 ret = (u64)cycles;
+ ret = (ret * cc->mult) >> cc->shift;
+ return ret;
+}
+
+/**
+ * timecounter_init - initialize a time counter
+ * @tc: Pointer to time counter which is to be initialized/reset
+ * @cc: A cycle counter, ready to be used.
+ * @start_tstamp: Arbitrary initial time stamp.
+ *
+ * After this call the current cycle register (roughly) corresponds to
+ * the initial time stamp. Every call to timecounter_read() increments
+ * the time stamp counter by the number of elapsed nanoseconds.
+ */
+extern void timecounter_init(struct timecounter *tc,
+ const struct cyclecounter *cc,
+ u64 start_tstamp);
+
+/**
+ * timecounter_read - return nanoseconds elapsed since timecounter_init()
+ * plus the initial time stamp
+ * @tc: Pointer to time counter.
+ *
+ * In other words, keeps track of time since the same epoch as
+ * the function which generated the initial time stamp.
+ */
+extern u64 timecounter_read(struct timecounter *tc);
+
+/**
+ * timecounter_cyc2time - convert a cycle counter to same
+ * time base as values returned by
+ * timecounter_read()
+ * @tc: Pointer to time counter.
+ * @cycle: a value returned by tc->cc->read()
+ *
+ * Cycle counts that are converted correctly as long as they
+ * fall into the interval [-1/2 max cycle count, +1/2 max cycle count],
+ * with "max cycle count" == cs->mask+1.
+ *
+ * This allows conversion of cycle counter values which were generated
+ * in the past.
+ */
+extern u64 timecounter_cyc2time(struct timecounter *tc,
+ cycle_t cycle_tstamp);
+
+/**
* struct clocksource - hardware abstraction for a free running counter
* Provides mostly state-free accessors to the underlying hardware.
+ * This is the structure used for system time.
*
* @name: ptr to clocksource name
* @list: list head for registration
@@ -42,7 +143,9 @@ struct clocksource;
* 400-499: Perfect
* The ideal clocksource. A must-use where
* available.
- * @read: returns a cycle value
+ * @read: returns a cycle value, passes clocksource as argument
+ * @enable: optional function to enable the clocksource
+ * @disable: optional function to disable the clocksource
* @mask: bitmask for two's complement
* subtraction of non 64 bit counters
* @mult: cycle to nanosecond multiplier (adjusted by NTP)
@@ -61,7 +164,9 @@ struct clocksource {
char *name;
struct list_head list;
int rating;
- cycle_t (*read)(void);
+ cycle_t (*read)(struct clocksource *cs);
+ int (*enable)(struct clocksource *cs);
+ void (*disable)(struct clocksource *cs);
cycle_t mask;
u32 mult;
u32 mult_orig;
@@ -170,7 +275,42 @@ static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant)
*/
static inline cycle_t clocksource_read(struct clocksource *cs)
{
- return cs->read();
+ return cs->read(cs);
+}
+
+/**
+ * clocksource_enable: - enable clocksource
+ * @cs: pointer to clocksource
+ *
+ * Enables the specified clocksource. The clocksource callback
+ * function should start up the hardware and setup mult and field
+ * members of struct clocksource to reflect hardware capabilities.
+ */
+static inline int clocksource_enable(struct clocksource *cs)
+{
+ int ret = 0;
+
+ if (cs->enable)
+ ret = cs->enable(cs);
+
+ /* save mult_orig on enable */
+ cs->mult_orig = cs->mult;
+
+ return ret;
+}
+
+/**
+ * clocksource_disable: - disable clocksource
+ * @cs: pointer to clocksource
+ *
+ * Disables the specified clocksource. The clocksource callback
+ * function should power down the now unused hardware block to
+ * save power.
+ */
+static inline void clocksource_disable(struct clocksource *cs)
+{
+ if (cs->disable)
+ cs->disable(cs);
}
/**
diff --git a/include/linux/cm4000_cs.h b/include/linux/cm4000_cs.h
index 605ebe24bb2..72bfefdbd76 100644
--- a/include/linux/cm4000_cs.h
+++ b/include/linux/cm4000_cs.h
@@ -1,6 +1,8 @@
#ifndef _CM4000_H_
#define _CM4000_H_
+#include <linux/types.h>
+
#define MAX_ATR 33
#define CM4000_MAX_DEV 4
@@ -10,9 +12,9 @@
* not to break compilation of userspace apps. -HW */
typedef struct atreq {
- int32_t atr_len;
+ __s32 atr_len;
unsigned char atr[64];
- int32_t power_act;
+ __s32 power_act;
unsigned char bIFSD;
unsigned char bIFSC;
} atreq_t;
@@ -22,13 +24,13 @@ typedef struct atreq {
* member sizes. This leads to CONFIG_COMPAT breakage, since 32bit userspace
* will lay out the structure members differently than the 64bit kernel.
*
- * I've changed "ptsreq.protocol" from "unsigned long" to "u_int32_t".
+ * I've changed "ptsreq.protocol" from "unsigned long" to "__u32".
* On 32bit this will make no difference. With 64bit kernels, it will make
* 32bit apps work, too.
*/
typedef struct ptsreq {
- u_int32_t protocol; /*T=0: 2^0, T=1: 2^1*/
+ __u32 protocol; /*T=0: 2^0, T=1: 2^1*/
unsigned char flags;
unsigned char pts1;
unsigned char pts2;
diff --git a/include/linux/cn_proc.h b/include/linux/cn_proc.h
index 1c86d65bc4b..b8125b2eb66 100644
--- a/include/linux/cn_proc.h
+++ b/include/linux/cn_proc.h
@@ -65,20 +65,20 @@ struct proc_event {
} ack;
struct fork_proc_event {
- pid_t parent_pid;
- pid_t parent_tgid;
- pid_t child_pid;
- pid_t child_tgid;
+ __kernel_pid_t parent_pid;
+ __kernel_pid_t parent_tgid;
+ __kernel_pid_t child_pid;
+ __kernel_pid_t child_tgid;
} fork;
struct exec_proc_event {
- pid_t process_pid;
- pid_t process_tgid;
+ __kernel_pid_t process_pid;
+ __kernel_pid_t process_tgid;
} exec;
struct id_proc_event {
- pid_t process_pid;
- pid_t process_tgid;
+ __kernel_pid_t process_pid;
+ __kernel_pid_t process_tgid;
union {
__u32 ruid; /* task uid */
__u32 rgid; /* task gid */
@@ -90,8 +90,8 @@ struct proc_event {
} id;
struct exit_proc_event {
- pid_t process_pid;
- pid_t process_tgid;
+ __kernel_pid_t process_pid;
+ __kernel_pid_t process_tgid;
__u32 exit_code, exit_signal;
} exit;
} event_data;
diff --git a/include/linux/coda_psdev.h b/include/linux/coda_psdev.h
index 07ae8f84605..5b5d4731f95 100644
--- a/include/linux/coda_psdev.h
+++ b/include/linux/coda_psdev.h
@@ -6,6 +6,7 @@
#define CODA_PSDEV_MAJOR 67
#define MAX_CODADEVS 5 /* how many do we allow */
+#ifdef __KERNEL__
struct kstatfs;
/* communication pending/processing queues */
@@ -24,7 +25,6 @@ static inline struct venus_comm *coda_vcp(struct super_block *sb)
return (struct venus_comm *)((sb)->s_fs_info);
}
-
/* upcalls */
int venus_rootfid(struct super_block *sb, struct CodaFid *fidp);
int venus_getattr(struct super_block *sb, struct CodaFid *fid,
@@ -64,6 +64,12 @@ int coda_downcall(int opcode, union outputArgs *out, struct super_block *sb);
int venus_fsync(struct super_block *sb, struct CodaFid *fid);
int venus_statfs(struct dentry *dentry, struct kstatfs *sfs);
+/*
+ * Statistics
+ */
+
+extern struct venus_comm coda_comms[];
+#endif /* __KERNEL__ */
/* messages between coda filesystem in kernel and Venus */
struct upc_req {
@@ -82,11 +88,4 @@ struct upc_req {
#define REQ_WRITE 0x4
#define REQ_ABORT 0x8
-
-/*
- * Statistics
- */
-
-extern struct venus_comm coda_comms[];
-
#endif
diff --git a/include/linux/com20020.h b/include/linux/com20020.h
index ac6d9a43e08..5dcfb944b6c 100644
--- a/include/linux/com20020.h
+++ b/include/linux/com20020.h
@@ -29,6 +29,7 @@
int com20020_check(struct net_device *dev);
int com20020_found(struct net_device *dev, int shared);
+extern const struct net_device_ops com20020_netdev_ops;
/* The number of low I/O ports used by the card. */
#define ARCNET_TOTAL_SIZE 8
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 3fd2194ff57..af931ee43dd 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -125,6 +125,13 @@ struct compat_dirent {
char d_name[256];
};
+struct compat_ustat {
+ compat_daddr_t f_tfree;
+ compat_ino_t f_tinode;
+ char f_fname[6];
+ char f_fpack[6];
+};
+
typedef union compat_sigval {
compat_int_t sival_int;
compat_uptr_t sival_ptr;
@@ -178,11 +185,18 @@ long compat_sys_semtimedop(int semid, struct sembuf __user *tsems,
unsigned nsems, const struct compat_timespec __user *timeout);
asmlinkage long compat_sys_keyctl(u32 option,
u32 arg2, u32 arg3, u32 arg4, u32 arg5);
+asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
asmlinkage ssize_t compat_sys_readv(unsigned long fd,
const struct compat_iovec __user *vec, unsigned long vlen);
asmlinkage ssize_t compat_sys_writev(unsigned long fd,
const struct compat_iovec __user *vec, unsigned long vlen);
+asmlinkage ssize_t compat_sys_preadv(unsigned long fd,
+ const struct compat_iovec __user *vec,
+ unsigned long vlen, u32 pos_low, u32 pos_high);
+asmlinkage ssize_t compat_sys_pwritev(unsigned long fd,
+ const struct compat_iovec __user *vec,
+ unsigned long vlen, u32 pos_low, u32 pos_high);
int compat_do_execve(char * filename, compat_uptr_t __user *argv,
compat_uptr_t __user *envp, struct pt_regs * regs);
@@ -208,6 +222,8 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from);
int copy_siginfo_to_user32(struct compat_siginfo __user *to, siginfo_t *from);
int get_compat_sigevent(struct sigevent *event,
const struct compat_sigevent __user *u_event);
+long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, compat_pid_t pid, int sig,
+ struct compat_siginfo __user *uinfo);
static inline int compat_timeval_compare(struct compat_timeval *lhs,
struct compat_timeval *rhs)
diff --git a/include/linux/compiler-gcc3.h b/include/linux/compiler-gcc3.h
index 8005effc04f..b721129e046 100644
--- a/include/linux/compiler-gcc3.h
+++ b/include/linux/compiler-gcc3.h
@@ -16,6 +16,12 @@
#define __must_check __attribute__((warn_unused_result))
#endif
+#ifdef CONFIG_GCOV_KERNEL
+# if __GNUC_MINOR__ < 4
+# error "GCOV profiling support for gcc versions below 3.4 not included"
+# endif /* __GNUC_MINOR__ */
+#endif /* CONFIG_GCOV_KERNEL */
+
/*
* A trick to suppress uninitialized variable warning without generating any
* code
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
index 09992718f9e..450fa597c94 100644
--- a/include/linux/compiler-gcc4.h
+++ b/include/linux/compiler-gcc4.h
@@ -3,8 +3,10 @@
#endif
/* GCC 4.1.[01] miscompiles __weak */
-#if __GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ <= 1
-# error Your version of gcc miscompiles the __weak directive
+#ifdef __KERNEL__
+# if __GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ <= 1
+# error Your version of gcc miscompiles the __weak directive
+# endif
#endif
#define __used __attribute__((__used__))
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index d95da1020f1..04fb5135b4e 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -68,6 +68,7 @@ struct ftrace_branch_data {
unsigned long miss;
unsigned long hit;
};
+ unsigned long miss_hit[2];
};
};
@@ -75,7 +76,8 @@ struct ftrace_branch_data {
* Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
* to disable branch tracing on a per file basis.
*/
-#if defined(CONFIG_TRACE_BRANCH_PROFILING) && !defined(DISABLE_BRANCH_PROFILING)
+#if defined(CONFIG_TRACE_BRANCH_PROFILING) \
+ && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
#define likely_notrace(x) __builtin_expect(!!(x), 1)
@@ -113,7 +115,9 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
* "Define 'is'", Bill Clinton
* "Define 'if'", Steven Rostedt
*/
-#define if(cond) if (__builtin_constant_p((cond)) ? !!(cond) : \
+#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
+#define __trace_if(cond) \
+ if (__builtin_constant_p((cond)) ? !!(cond) : \
({ \
int ______r; \
static struct ftrace_branch_data \
@@ -125,10 +129,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
.line = __LINE__, \
}; \
______r = !!(cond); \
- if (______r) \
- ______f.hit++; \
- else \
- ______f.miss++; \
+ ______f.miss_hit[______r]++; \
______r; \
}))
#endif /* CONFIG_PROFILE_ALL_BRANCHES */
@@ -260,6 +261,11 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
# define __section(S) __attribute__ ((__section__(#S)))
#endif
+/* Are two types/vars the same type (ignoring qualifiers)? */
+#ifndef __same_type
+# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
+#endif
+
/*
* Prevent the compiler from merging or refetching accesses. The compiler
* is also forbidden from reordering successive instances of ACCESS_ONCE(),
diff --git a/include/linux/connector.h b/include/linux/connector.h
index 34f2789d9b9..b9966e64604 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -39,8 +39,10 @@
#define CN_IDX_V86D 0x4
#define CN_VAL_V86D_UVESAFB 0x1
#define CN_IDX_BB 0x5 /* BlackBoard, from the TSP GPL sampling framework */
+#define CN_DST_IDX 0x6
+#define CN_DST_VAL 0x1
-#define CN_NETLINK_USERS 6
+#define CN_NETLINK_USERS 7
/*
* Maximum connector's message size.
@@ -109,6 +111,12 @@ struct cn_queue_dev {
unsigned char name[CN_CBQ_NAMELEN];
struct workqueue_struct *cn_queue;
+ /* Sent to kevent to create cn_queue only when needed */
+ struct work_struct wq_creation;
+ /* Tell if the wq_creation job is pending/completed */
+ atomic_t wq_requested;
+ /* Wait for cn_queue to be created */
+ wait_queue_head_t wq_created;
struct list_head queue_list;
spinlock_t queue_lock;
@@ -164,6 +172,8 @@ int cn_netlink_send(struct cn_msg *, u32, gfp_t);
int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(void *));
void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id);
+int queue_cn_work(struct cn_callback_entry *cbq, struct work_struct *work);
+
struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *);
void cn_queue_free_dev(struct cn_queue_dev *dev);
diff --git a/include/linux/console.h b/include/linux/console.h
index a67a90cf826..dcca5339ceb 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -137,8 +137,8 @@ extern void resume_console(void);
int mda_console_init(void);
void prom_con_init(void);
-void vcs_make_sysfs(struct tty_struct *tty);
-void vcs_remove_sysfs(struct tty_struct *tty);
+void vcs_make_sysfs(int index);
+void vcs_remove_sysfs(int index);
/* Some debug stub to catch some of the obvious races in the VT code */
#if 1
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index c2747ac2ae4..4d668e05d45 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -23,7 +23,6 @@
#include <linux/node.h>
#include <linux/compiler.h>
#include <linux/cpumask.h>
-#include <linux/mutex.h>
struct cpu {
int node_id; /* The node which contains the CPU */
@@ -70,7 +69,6 @@ static inline void unregister_cpu_notifier(struct notifier_block *nb)
int cpu_up(unsigned int cpu);
void notify_cpu_starting(unsigned int cpu);
-extern void cpu_hotplug_init(void);
extern void cpu_maps_update_begin(void);
extern void cpu_maps_update_done(void);
@@ -85,10 +83,6 @@ static inline void unregister_cpu_notifier(struct notifier_block *nb)
{
}
-static inline void cpu_hotplug_init(void)
-{
-}
-
static inline void cpu_maps_update_begin(void)
{
}
@@ -103,16 +97,6 @@ extern struct sysdev_class cpu_sysdev_class;
#ifdef CONFIG_HOTPLUG_CPU
/* Stop CPUs going up and down. */
-static inline void cpuhotplug_mutex_lock(struct mutex *cpu_hp_mutex)
-{
- mutex_lock(cpu_hp_mutex);
-}
-
-static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex)
-{
- mutex_unlock(cpu_hp_mutex);
-}
-
extern void get_online_cpus(void);
extern void put_online_cpus(void);
#define hotcpu_notifier(fn, pri) { \
@@ -126,11 +110,6 @@ int cpu_down(unsigned int cpu);
#else /* CONFIG_HOTPLUG_CPU */
-static inline void cpuhotplug_mutex_lock(struct mutex *cpu_hp_mutex)
-{ }
-static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex)
-{ }
-
#define get_online_cpus() do { } while (0)
#define put_online_cpus() do { } while (0)
#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 9f315382610..c5ac87ca7bc 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -1022,6 +1022,8 @@ typedef struct cpumask *cpumask_var_t;
bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
+bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
+bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
void free_cpumask_var(cpumask_var_t mask);
void free_bootmem_cpumask_var(cpumask_var_t mask);
@@ -1040,6 +1042,19 @@ static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
return true;
}
+static inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
+{
+ cpumask_clear(*mask);
+ return true;
+}
+
+static inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
+ int node)
+{
+ cpumask_clear(*mask);
+ return true;
+}
+
static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
{
}
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 90c6074a36c..a5740fc4d04 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -12,12 +12,12 @@
#include <linux/cpumask.h>
#include <linux/nodemask.h>
#include <linux/cgroup.h>
+#include <linux/mm.h>
#ifdef CONFIG_CPUSETS
extern int number_of_cpusets; /* How many cpusets are defined in system? */
-extern int cpuset_init_early(void);
extern int cpuset_init(void);
extern void cpuset_init_smp(void);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
@@ -26,22 +26,31 @@ extern void cpuset_cpus_allowed_locked(struct task_struct *p,
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
#define cpuset_current_mems_allowed (current->mems_allowed)
void cpuset_init_current_mems_allowed(void);
-void cpuset_update_task_memory_state(void);
int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
-extern int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask);
-extern int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask);
+extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask);
+extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask);
-static int inline cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
+static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
{
return number_of_cpusets <= 1 ||
- __cpuset_zone_allowed_softwall(z, gfp_mask);
+ __cpuset_node_allowed_softwall(node, gfp_mask);
}
-static int inline cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
+static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
{
return number_of_cpusets <= 1 ||
- __cpuset_zone_allowed_hardwall(z, gfp_mask);
+ __cpuset_node_allowed_hardwall(node, gfp_mask);
+}
+
+static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
+{
+ return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask);
+}
+
+static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
+{
+ return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask);
}
extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
@@ -81,21 +90,25 @@ extern void rebuild_sched_domains(void);
extern void cpuset_print_task_mems_allowed(struct task_struct *p);
+static inline void set_mems_allowed(nodemask_t nodemask)
+{
+ current->mems_allowed = nodemask;
+}
+
#else /* !CONFIG_CPUSETS */
-static inline int cpuset_init_early(void) { return 0; }
static inline int cpuset_init(void) { return 0; }
static inline void cpuset_init_smp(void) {}
static inline void cpuset_cpus_allowed(struct task_struct *p,
struct cpumask *mask)
{
- *mask = cpu_possible_map;
+ cpumask_copy(mask, cpu_possible_mask);
}
static inline void cpuset_cpus_allowed_locked(struct task_struct *p,
struct cpumask *mask)
{
- *mask = cpu_possible_map;
+ cpumask_copy(mask, cpu_possible_mask);
}
static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
@@ -105,13 +118,22 @@ static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
#define cpuset_current_mems_allowed (node_states[N_HIGH_MEMORY])
static inline void cpuset_init_current_mems_allowed(void) {}
-static inline void cpuset_update_task_memory_state(void) {}
static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
{
return 1;
}
+static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
+{
+ return 1;
+}
+
+static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
+{
+ return 1;
+}
+
static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
{
return 1;
@@ -167,6 +189,10 @@ static inline void cpuset_print_task_mems_allowed(struct task_struct *p)
{
}
+static inline void set_mems_allowed(nodemask_t nodemask)
+{
+}
+
#endif /* !CONFIG_CPUSETS */
#endif /* _LINUX_CPUSET_H */
diff --git a/include/linux/cramfs_fs.h b/include/linux/cramfs_fs.h
index 3be4e5a27d8..6fc2bed368b 100644
--- a/include/linux/cramfs_fs.h
+++ b/include/linux/cramfs_fs.h
@@ -2,9 +2,8 @@
#define __CRAMFS_H
#include <linux/types.h>
+#include <linux/magic.h>
-#define CRAMFS_MAGIC 0x28cd3d45 /* some random number */
-#define CRAMFS_MAGIC_WEND 0x453dcd28 /* magic number with the wrong endianess */
#define CRAMFS_SIGNATURE "Compressed ROMFS"
/*
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 3282ee4318e..4fa99969631 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -13,6 +13,7 @@
#define _LINUX_CRED_H
#include <linux/capability.h>
+#include <linux/init.h>
#include <linux/key.h>
#include <asm/atomic.h>
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 1f2e9020acc..ec29fa268b9 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -40,6 +40,7 @@
#define CRYPTO_ALG_TYPE_SHASH 0x00000009
#define CRYPTO_ALG_TYPE_AHASH 0x0000000a
#define CRYPTO_ALG_TYPE_RNG 0x0000000c
+#define CRYPTO_ALG_TYPE_PCOMPRESS 0x0000000f
#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000c
@@ -548,9 +549,6 @@ struct crypto_attr_u32 {
* Transform user interface.
*/
-struct crypto_tfm *crypto_alloc_tfm(const char *alg_name,
- const struct crypto_type *frontend,
- u32 type, u32 mask);
struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask);
void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm);
diff --git a/include/linux/cyclades.h b/include/linux/cyclades.h
index d06fbf28634..1fbdea4f08e 100644
--- a/include/linux/cyclades.h
+++ b/include/linux/cyclades.h
@@ -82,9 +82,9 @@ struct cyclades_monitor {
* open)
*/
struct cyclades_idle_stats {
- time_t in_use; /* Time device has been in use (secs) */
- time_t recv_idle; /* Time since last char received (secs) */
- time_t xmit_idle; /* Time since last char transmitted (secs) */
+ __kernel_time_t in_use; /* Time device has been in use (secs) */
+ __kernel_time_t recv_idle; /* Time since last char received (secs) */
+ __kernel_time_t xmit_idle; /* Time since last char transmitted (secs) */
unsigned long recv_bytes; /* Bytes received */
unsigned long xmit_bytes; /* Bytes transmitted */
unsigned long overruns; /* Input overruns */
@@ -142,19 +142,6 @@ struct CYZ_BOOT_CTRL {
#ifndef DP_WINDOW_SIZE
-/* #include "cyclomz.h" */
-/****************** ****************** *******************/
-/*
- * The data types defined below are used in all ZFIRM interface
- * data structures. They accomodate differences between HW
- * architectures and compilers.
- */
-
-typedef __u64 ucdouble; /* 64 bits, unsigned */
-typedef __u32 uclong; /* 32 bits, unsigned */
-typedef __u16 ucshort; /* 16 bits, unsigned */
-typedef __u8 ucchar; /* 8 bits, unsigned */
-
/*
* Memory Window Sizes
*/
@@ -507,16 +494,20 @@ struct ZFW_CTRL {
/* Per card data structure */
struct cyclades_card {
- void __iomem *base_addr;
- void __iomem *ctl_addr;
- int irq;
- unsigned int num_chips; /* 0 if card absent, -1 if Z/PCI, else Y */
- unsigned int first_line; /* minor number of first channel on card */
- unsigned int nports; /* Number of ports in the card */
- int bus_index; /* address shift - 0 for ISA, 1 for PCI */
- int intr_enabled; /* FW Interrupt flag - 0 disabled, 1 enabled */
- spinlock_t card_lock;
- struct cyclades_port *ports;
+ void __iomem *base_addr;
+ union {
+ void __iomem *p9050;
+ struct RUNTIME_9060 __iomem *p9060;
+ } ctl_addr;
+ int irq;
+ unsigned int num_chips; /* 0 if card absent, -1 if Z/PCI, else Y */
+ unsigned int first_line; /* minor number of first channel on card */
+ unsigned int nports; /* Number of ports in the card */
+ int bus_index; /* address shift - 0 for ISA, 1 for PCI */
+ int intr_enabled; /* FW Interrupt flag - 0 disabled, 1 enabled */
+ u32 hw_ver;
+ spinlock_t card_lock;
+ struct cyclades_port *ports;
};
/***************************************
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index c66d22487bf..30b93b2a01a 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -112,7 +112,7 @@ struct dentry {
struct list_head d_subdirs; /* our children */
struct list_head d_alias; /* inode alias list */
unsigned long d_time; /* used by d_revalidate */
- struct dentry_operations *d_op;
+ const struct dentry_operations *d_op;
struct super_block *d_sb; /* The root of the dentry tree */
void *d_fsdata; /* fs-specific data */
@@ -180,10 +180,12 @@ d_iput: no no no yes
#define DCACHE_REFERENCED 0x0008 /* Recently used, don't discard. */
#define DCACHE_UNHASHED 0x0010
-#define DCACHE_INOTIFY_PARENT_WATCHED 0x0020 /* Parent inode is watched */
+#define DCACHE_INOTIFY_PARENT_WATCHED 0x0020 /* Parent inode is watched by inotify */
#define DCACHE_COOKIE 0x0040 /* For use by dcookie subsystem */
+#define DCACHE_FSNOTIFY_PARENT_WATCHED 0x0080 /* Parent inode is watched by some fsnotify listener */
+
extern spinlock_t dcache_lock;
extern seqlock_t rename_lock;
@@ -351,6 +353,11 @@ static inline int d_unhashed(struct dentry *dentry)
return (dentry->d_flags & DCACHE_UNHASHED);
}
+static inline int d_unlinked(struct dentry *dentry)
+{
+ return d_unhashed(dentry) && !IS_ROOT(dentry);
+}
+
static inline struct dentry *dget_parent(struct dentry *dentry)
{
struct dentry *ret;
@@ -368,7 +375,7 @@ static inline int d_mountpoint(struct dentry *dentry)
return dentry->d_mounted;
}
-extern struct vfsmount *lookup_mnt(struct vfsmount *, struct dentry *);
+extern struct vfsmount *lookup_mnt(struct path *);
extern struct dentry *lookup_create(struct nameidata *nd, int is_dir);
extern int sysctl_vfs_cache_pressure;
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index 61734e27abb..7434a8353e2 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -355,46 +355,6 @@ static inline unsigned int dccp_hdr_len(const struct sk_buff *skb)
return __dccp_hdr_len(dccp_hdr(skb));
}
-
-/* initial values for each feature */
-#define DCCPF_INITIAL_SEQUENCE_WINDOW 100
-#define DCCPF_INITIAL_ACK_RATIO 2
-#define DCCPF_INITIAL_CCID DCCPC_CCID2
-/* FIXME: for now we're default to 1 but it should really be 0 */
-#define DCCPF_INITIAL_SEND_NDP_COUNT 1
-
-/**
- * struct dccp_minisock - Minimal DCCP connection representation
- *
- * Will be used to pass the state from dccp_request_sock to dccp_sock.
- *
- * @dccpms_sequence_window - Sequence Window Feature (section 7.5.2)
- * @dccpms_pending - List of features being negotiated
- * @dccpms_conf -
- */
-struct dccp_minisock {
- __u64 dccpms_sequence_window;
- struct list_head dccpms_pending;
- struct list_head dccpms_conf;
-};
-
-struct dccp_opt_conf {
- __u8 *dccpoc_val;
- __u8 dccpoc_len;
-};
-
-struct dccp_opt_pend {
- struct list_head dccpop_node;
- __u8 dccpop_type;
- __u8 dccpop_feat;
- __u8 *dccpop_val;
- __u8 dccpop_len;
- int dccpop_conf;
- struct dccp_opt_conf *dccpop_sc;
-};
-
-extern void dccp_minisock_init(struct dccp_minisock *dmsk);
-
/**
* struct dccp_request_sock - represent DCCP-specific connection request
* @dreq_inet_rsk: structure inherited from
@@ -483,13 +443,14 @@ struct dccp_ackvec;
* @dccps_timestamp_time - time of receiving latest @dccps_timestamp_echo
* @dccps_l_ack_ratio - feature-local Ack Ratio
* @dccps_r_ack_ratio - feature-remote Ack Ratio
+ * @dccps_l_seq_win - local Sequence Window (influences ack number validity)
+ * @dccps_r_seq_win - remote Sequence Window (influences seq number validity)
* @dccps_pcslen - sender partial checksum coverage (via sockopt)
* @dccps_pcrlen - receiver partial checksum coverage (via sockopt)
* @dccps_send_ndp_count - local Send NDP Count feature (7.7.2)
* @dccps_ndp_count - number of Non Data Packets since last data packet
* @dccps_mss_cache - current value of MSS (path MTU minus header sizes)
* @dccps_rate_last - timestamp for rate-limiting DCCP-Sync (RFC 4340, 7.5.4)
- * @dccps_minisock - associated minisock (accessed via dccp_msk)
* @dccps_featneg - tracks feature-negotiation state (mostly during handshake)
* @dccps_hc_rx_ackvec - rx half connection ack vector
* @dccps_hc_rx_ccid - CCID used for the receiver (or receiving half-connection)
@@ -523,12 +484,13 @@ struct dccp_sock {
__u32 dccps_timestamp_time;
__u16 dccps_l_ack_ratio;
__u16 dccps_r_ack_ratio;
+ __u64 dccps_l_seq_win:48;
+ __u64 dccps_r_seq_win:48;
__u8 dccps_pcslen:4;
__u8 dccps_pcrlen:4;
__u8 dccps_send_ndp_count:1;
__u64 dccps_ndp_count:48;
unsigned long dccps_rate_last;
- struct dccp_minisock dccps_minisock;
struct list_head dccps_featneg;
struct dccp_ackvec *dccps_hc_rx_ackvec;
struct ccid *dccps_hc_rx_ccid;
@@ -546,11 +508,6 @@ static inline struct dccp_sock *dccp_sk(const struct sock *sk)
return (struct dccp_sock *)sk;
}
-static inline struct dccp_minisock *dccp_msk(const struct sock *sk)
-{
- return (struct dccp_minisock *)&dccp_sk(sk)->dccps_minisock;
-}
-
static inline const char *dccp_role(const struct sock *sk)
{
switch (dccp_sk(sk)->dccps_role) {
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
index 096476f1fb3..29b3ce3f2a1 100644
--- a/include/linux/debug_locks.h
+++ b/include/linux/debug_locks.h
@@ -2,12 +2,20 @@
#define __LINUX_DEBUG_LOCKING_H
#include <linux/kernel.h>
+#include <asm/atomic.h>
+#include <asm/system.h>
struct task_struct;
extern int debug_locks;
extern int debug_locks_silent;
+
+static inline int __debug_locks_off(void)
+{
+ return xchg(&debug_locks, 0);
+}
+
/*
* Generic 'turn off all lock debugging' function:
*/
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index af0e01d4c66..eb5c2ba2f81 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -71,6 +71,9 @@ struct dentry *debugfs_create_bool(const char *name, mode_t mode,
struct dentry *debugfs_create_blob(const char *name, mode_t mode,
struct dentry *parent,
struct debugfs_blob_wrapper *blob);
+
+bool debugfs_initialized(void);
+
#else
#include <linux/err.h>
@@ -183,6 +186,11 @@ static inline struct dentry *debugfs_create_blob(const char *name, mode_t mode,
return ERR_PTR(-ENODEV);
}
+static inline bool debugfs_initialized(void)
+{
+ return false;
+}
+
#endif
#endif
diff --git a/include/linux/decompress/bunzip2.h b/include/linux/decompress/bunzip2.h
new file mode 100644
index 00000000000..115272137a9
--- /dev/null
+++ b/include/linux/decompress/bunzip2.h
@@ -0,0 +1,10 @@
+#ifndef DECOMPRESS_BUNZIP2_H
+#define DECOMPRESS_BUNZIP2_H
+
+int bunzip2(unsigned char *inbuf, int len,
+ int(*fill)(void*, unsigned int),
+ int(*flush)(void*, unsigned int),
+ unsigned char *output,
+ int *pos,
+ void(*error)(char *x));
+#endif
diff --git a/include/linux/decompress/generic.h b/include/linux/decompress/generic.h
new file mode 100644
index 00000000000..6dfb856327b
--- /dev/null
+++ b/include/linux/decompress/generic.h
@@ -0,0 +1,33 @@
+#ifndef DECOMPRESS_GENERIC_H
+#define DECOMPRESS_GENERIC_H
+
+/* Minimal chunksize to be read.
+ *Bzip2 prefers at least 4096
+ *Lzma prefers 0x10000 */
+#define COMPR_IOBUF_SIZE 4096
+
+typedef int (*decompress_fn) (unsigned char *inbuf, int len,
+ int(*fill)(void*, unsigned int),
+ int(*writebb)(void*, unsigned int),
+ unsigned char *output,
+ int *posp,
+ void(*error)(char *x));
+
+/* inbuf - input buffer
+ *len - len of pre-read data in inbuf
+ *fill - function to fill inbuf if empty
+ *writebb - function to write out outbug
+ *posp - if non-null, input position (number of bytes read) will be
+ * returned here
+ *
+ *If len != 0, the inbuf is initialized (with as much data), and fill
+ *should not be called
+ *If len = 0, the inbuf is allocated, but empty. Its size is IOBUF_SIZE
+ *fill should be called (repeatedly...) to read data, at most IOBUF_SIZE
+ */
+
+/* Utility routine to detect the decompression method */
+decompress_fn decompress_method(const unsigned char *inbuf, int len,
+ const char **name);
+
+#endif
diff --git a/include/linux/decompress/inflate.h b/include/linux/decompress/inflate.h
new file mode 100644
index 00000000000..f9b06ccc3e5
--- /dev/null
+++ b/include/linux/decompress/inflate.h
@@ -0,0 +1,13 @@
+#ifndef INFLATE_H
+#define INFLATE_H
+
+/* Other housekeeping constants */
+#define INBUFSIZ 4096
+
+int gunzip(unsigned char *inbuf, int len,
+ int(*fill)(void*, unsigned int),
+ int(*flush)(void*, unsigned int),
+ unsigned char *output,
+ int *pos,
+ void(*error_fn)(char *x));
+#endif
diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
new file mode 100644
index 00000000000..12ff8c3f1d0
--- /dev/null
+++ b/include/linux/decompress/mm.h
@@ -0,0 +1,87 @@
+/*
+ * linux/compr_mm.h
+ *
+ * Memory management for pre-boot and ramdisk uncompressors
+ *
+ * Authors: Alain Knaff <alain@knaff.lu>
+ *
+ */
+
+#ifndef DECOMPR_MM_H
+#define DECOMPR_MM_H
+
+#ifdef STATIC
+
+/* Code active when included from pre-boot environment: */
+
+/* A trivial malloc implementation, adapted from
+ * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
+ */
+static unsigned long malloc_ptr;
+static int malloc_count;
+
+static void *malloc(int size)
+{
+ void *p;
+
+ if (size < 0)
+ error("Malloc error");
+ if (!malloc_ptr)
+ malloc_ptr = free_mem_ptr;
+
+ malloc_ptr = (malloc_ptr + 3) & ~3; /* Align */
+
+ p = (void *)malloc_ptr;
+ malloc_ptr += size;
+
+ if (free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr)
+ error("Out of memory");
+
+ malloc_count++;
+ return p;
+}
+
+static void free(void *where)
+{
+ malloc_count--;
+ if (!malloc_count)
+ malloc_ptr = free_mem_ptr;
+}
+
+#define large_malloc(a) malloc(a)
+#define large_free(a) free(a)
+
+#define set_error_fn(x)
+
+#define INIT
+
+#else /* STATIC */
+
+/* Code active when compiled standalone for use when loading ramdisk: */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+/* Use defines rather than static inline in order to avoid spurious
+ * warnings when not needed (indeed large_malloc / large_free are not
+ * needed by inflate */
+
+#define malloc(a) kmalloc(a, GFP_KERNEL)
+#define free(a) kfree(a)
+
+#define large_malloc(a) vmalloc(a)
+#define large_free(a) vfree(a)
+
+static void(*error)(char *m);
+#define set_error_fn(x) error = x;
+
+#define INIT __init
+#define STATIC
+
+#include <linux/init.h>
+
+#endif /* STATIC */
+
+#endif /* DECOMPR_MM_H */
diff --git a/include/linux/decompress/unlzma.h b/include/linux/decompress/unlzma.h
new file mode 100644
index 00000000000..7796538f1bf
--- /dev/null
+++ b/include/linux/decompress/unlzma.h
@@ -0,0 +1,12 @@
+#ifndef DECOMPRESS_UNLZMA_H
+#define DECOMPRESS_UNLZMA_H
+
+int unlzma(unsigned char *, int,
+ int(*fill)(void*, unsigned int),
+ int(*flush)(void*, unsigned int),
+ unsigned char *output,
+ int *posp,
+ void(*error)(char *x)
+ );
+
+#endif
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 8209e08969f..49c2362977f 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -116,7 +116,6 @@ void dm_put_device(struct dm_target *ti, struct dm_dev *d);
/*
* Target features
*/
-#define DM_TARGET_SUPPORTS_BARRIERS 0x00000001
struct target_type {
uint64_t features;
@@ -139,6 +138,9 @@ struct target_type {
dm_ioctl_fn ioctl;
dm_merge_fn merge;
dm_busy_fn busy;
+
+ /* For internal device-mapper use. */
+ struct list_head list;
};
struct io_restrictions {
@@ -147,7 +149,7 @@ struct io_restrictions {
unsigned max_hw_sectors;
unsigned max_sectors;
unsigned max_segment_size;
- unsigned short hardsect_size;
+ unsigned short logical_block_size;
unsigned short max_hw_segments;
unsigned short max_phys_segments;
unsigned char no_cluster; /* inverted so that 0 is default */
diff --git a/include/linux/device.h b/include/linux/device.h
index 47f343c7bdd..ed4e39f2c42 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -28,6 +28,7 @@
#define BUS_ID_SIZE 20
struct device;
+struct device_private;
struct device_driver;
struct driver_private;
struct class;
@@ -61,8 +62,6 @@ struct bus_type {
void (*shutdown)(struct device *dev);
int (*suspend)(struct device *dev, pm_message_t state);
- int (*suspend_late)(struct device *dev, pm_message_t state);
- int (*resume_early)(struct device *dev);
int (*resume)(struct device *dev);
struct dev_pm_ops *pm;
@@ -115,6 +114,8 @@ extern int bus_unregister_notifier(struct bus_type *bus,
#define BUS_NOTIFY_BOUND_DRIVER 0x00000003 /* driver bound to device */
#define BUS_NOTIFY_UNBIND_DRIVER 0x00000004 /* driver about to be
unbound */
+#define BUS_NOTIFY_UNBOUND_DRIVER 0x00000005 /* driver is unbound
+ from the device */
extern struct kset *bus_get_kset(struct bus_type *bus);
extern struct klist *bus_get_device_klist(struct bus_type *bus);
@@ -147,7 +148,7 @@ extern void put_driver(struct device_driver *drv);
extern struct device_driver *driver_find(const char *name,
struct bus_type *bus);
extern int driver_probe_done(void);
-extern int wait_for_device_probe(void);
+extern void wait_for_device_probe(void);
/* sysfs interface for exporting driver attributes */
@@ -193,6 +194,7 @@ struct class {
struct kobject *dev_kobj;
int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env);
+ char *(*nodename)(struct device *dev);
void (*class_release)(struct class *class);
void (*dev_release)(struct device *dev);
@@ -288,11 +290,9 @@ struct device_type {
const char *name;
struct attribute_group **groups;
int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
+ char *(*nodename)(struct device *dev);
void (*release)(struct device *dev);
- int (*suspend)(struct device *dev, pm_message_t state);
- int (*resume)(struct device *dev);
-
struct dev_pm_ops *pm;
};
@@ -367,15 +367,11 @@ struct device_dma_parameters {
};
struct device {
- struct klist klist_children;
- struct klist_node knode_parent; /* node in sibling list */
- struct klist_node knode_driver;
- struct klist_node knode_bus;
struct device *parent;
+ struct device_private *p;
+
struct kobject kobj;
- char bus_id[BUS_ID_SIZE]; /* position on parent bus */
- unsigned uevent_suppress:1;
const char *init_name; /* initial name of the device */
struct device_type *type;
@@ -427,8 +423,7 @@ struct device {
static inline const char *dev_name(const struct device *dev)
{
- /* will be changed into kobject_name(&dev->kobj) in the near future */
- return dev->bus_id;
+ return kobject_name(&dev->kobj);
}
extern int dev_set_name(struct device *dev, const char *name, ...)
@@ -463,6 +458,16 @@ static inline void dev_set_drvdata(struct device *dev, void *data)
dev->driver_data = data;
}
+static inline unsigned int dev_get_uevent_suppress(const struct device *dev)
+{
+ return dev->kobj.uevent_suppress;
+}
+
+static inline void dev_set_uevent_suppress(struct device *dev, int val)
+{
+ dev->kobj.uevent_suppress = val;
+}
+
static inline int device_is_registered(struct device *dev)
{
return dev->kobj.state_in_sysfs;
@@ -483,7 +488,9 @@ extern int device_for_each_child(struct device *dev, void *data,
extern struct device *device_find_child(struct device *dev, void *data,
int (*match)(struct device *dev, void *data));
extern int device_rename(struct device *dev, char *new_name);
-extern int device_move(struct device *dev, struct device *new_parent);
+extern int device_move(struct device *dev, struct device *new_parent,
+ enum dpm_order dpm_order);
+extern const char *device_get_nodename(struct device *dev, const char **tmp);
/*
* Root device objects for grouping under /sys/devices
@@ -539,6 +546,7 @@ extern int (*platform_notify_remove)(struct device *dev);
extern struct device *get_device(struct device *dev);
extern void put_device(struct device *dev);
+extern void wait_for_device_probe(void);
/* drivers/base/power/shutdown.c */
extern void device_shutdown(void);
@@ -570,7 +578,7 @@ extern const char *dev_driver_string(const struct device *dev);
#if defined(DEBUG)
#define dev_dbg(dev, format, arg...) \
dev_printk(KERN_DEBUG , dev , format , ## arg)
-#elif defined(CONFIG_DYNAMIC_PRINTK_DEBUG)
+#elif defined(CONFIG_DYNAMIC_DEBUG)
#define dev_dbg(dev, format, ...) do { \
dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \
} while (0)
diff --git a/include/linux/dlm.h b/include/linux/dlm.h
index b9cd38603fd..0b3518c4235 100644
--- a/include/linux/dlm.h
+++ b/include/linux/dlm.h
@@ -81,8 +81,8 @@ struct dlm_lksb {
* the cluster, the calling node joins it.
*/
-int dlm_new_lockspace(char *name, int namelen, dlm_lockspace_t **lockspace,
- uint32_t flags, int lvblen);
+int dlm_new_lockspace(const char *name, int namelen,
+ dlm_lockspace_t **lockspace, uint32_t flags, int lvblen);
/*
* dlm_release_lockspace
diff --git a/include/linux/dlm_netlink.h b/include/linux/dlm_netlink.h
index 19276332707..647c8ef2722 100644
--- a/include/linux/dlm_netlink.h
+++ b/include/linux/dlm_netlink.h
@@ -9,6 +9,8 @@
#ifndef _DLM_NETLINK_H
#define _DLM_NETLINK_H
+#include <linux/types.h>
+
enum {
DLM_STATUS_WAITING = 1,
DLM_STATUS_GRANTED = 2,
@@ -18,16 +20,16 @@ enum {
#define DLM_LOCK_DATA_VERSION 1
struct dlm_lock_data {
- uint16_t version;
- uint32_t lockspace_id;
+ __u16 version;
+ __u32 lockspace_id;
int nodeid;
int ownpid;
- uint32_t id;
- uint32_t remid;
- uint64_t xid;
- int8_t status;
- int8_t grmode;
- int8_t rqmode;
+ __u32 id;
+ __u32 remid;
+ __u64 xid;
+ __s8 status;
+ __s8 grmode;
+ __s8 rqmode;
unsigned long timestamp;
int resource_namelen;
char resource_name[DLM_RESNAME_MAXLEN];
diff --git a/include/linux/dm-dirty-log.h b/include/linux/dm-dirty-log.h
index 600c5fb2daa..5e8b11d88f6 100644
--- a/include/linux/dm-dirty-log.h
+++ b/include/linux/dm-dirty-log.h
@@ -28,6 +28,9 @@ struct dm_dirty_log_type {
const char *name;
struct module *module;
+ /* For internal device-mapper use */
+ struct list_head list;
+
int (*ctr)(struct dm_dirty_log *log, struct dm_target *ti,
unsigned argc, char **argv);
void (*dtr)(struct dm_dirty_log *log);
@@ -113,6 +116,16 @@ struct dm_dirty_log_type {
*/
int (*status)(struct dm_dirty_log *log, status_type_t status_type,
char *result, unsigned maxlen);
+
+ /*
+ * is_remote_recovering is necessary for cluster mirroring. It provides
+ * a way to detect recovery on another node, so we aren't writing
+ * concurrently. This function is likely to block (when a cluster log
+ * is used).
+ *
+ * Returns: 0, 1
+ */
+ int (*is_remote_recovering)(struct dm_dirty_log *log, region_t region);
};
int dm_dirty_log_type_register(struct dm_dirty_log_type *type);
diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h
index 28c2940eb30..48e44ee2b46 100644
--- a/include/linux/dm-ioctl.h
+++ b/include/linux/dm-ioctl.h
@@ -113,20 +113,20 @@ struct dm_ioctl {
* return -ENOTTY) fill out this field, even if the
* command failed.
*/
- uint32_t version[3]; /* in/out */
- uint32_t data_size; /* total size of data passed in
+ __u32 version[3]; /* in/out */
+ __u32 data_size; /* total size of data passed in
* including this struct */
- uint32_t data_start; /* offset to start of data
+ __u32 data_start; /* offset to start of data
* relative to start of this struct */
- uint32_t target_count; /* in/out */
- int32_t open_count; /* out */
- uint32_t flags; /* in/out */
- uint32_t event_nr; /* in/out */
- uint32_t padding;
+ __u32 target_count; /* in/out */
+ __s32 open_count; /* out */
+ __u32 flags; /* in/out */
+ __u32 event_nr; /* in/out */
+ __u32 padding;
- uint64_t dev; /* in/out */
+ __u64 dev; /* in/out */
char name[DM_NAME_LEN]; /* device name */
char uuid[DM_UUID_LEN]; /* unique identifier for
@@ -139,9 +139,9 @@ struct dm_ioctl {
* dm_ioctl.
*/
struct dm_target_spec {
- uint64_t sector_start;
- uint64_t length;
- int32_t status; /* used when reading from kernel only */
+ __u64 sector_start;
+ __u64 length;
+ __s32 status; /* used when reading from kernel only */
/*
* Location of the next dm_target_spec.
@@ -153,7 +153,7 @@ struct dm_target_spec {
* (that follows the dm_ioctl struct) to the start of the "next"
* dm_target_spec.
*/
- uint32_t next;
+ __u32 next;
char target_type[DM_MAX_TYPE_NAME];
@@ -168,17 +168,17 @@ struct dm_target_spec {
* Used to retrieve the target dependencies.
*/
struct dm_target_deps {
- uint32_t count; /* Array size */
- uint32_t padding; /* unused */
- uint64_t dev[0]; /* out */
+ __u32 count; /* Array size */
+ __u32 padding; /* unused */
+ __u64 dev[0]; /* out */
};
/*
* Used to get a list of all dm devices.
*/
struct dm_name_list {
- uint64_t dev;
- uint32_t next; /* offset to the next record from
+ __u64 dev;
+ __u32 next; /* offset to the next record from
the _start_ of this */
char name[0];
};
@@ -187,8 +187,8 @@ struct dm_name_list {
* Used to retrieve the target versions
*/
struct dm_target_versions {
- uint32_t next;
- uint32_t version[3];
+ __u32 next;
+ __u32 version[3];
char name[0];
};
@@ -197,7 +197,7 @@ struct dm_target_versions {
* Used to pass message to a target
*/
struct dm_target_msg {
- uint64_t sector; /* Device sector */
+ __u64 sector; /* Device sector */
char message[0];
};
diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h
new file mode 100644
index 00000000000..171ad8aedc8
--- /dev/null
+++ b/include/linux/dma-debug.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2008 Advanced Micro Devices, Inc.
+ *
+ * Author: Joerg Roedel <joerg.roedel@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __DMA_DEBUG_H
+#define __DMA_DEBUG_H
+
+#include <linux/types.h>
+
+struct device;
+struct scatterlist;
+struct bus_type;
+
+#ifdef CONFIG_DMA_API_DEBUG
+
+extern void dma_debug_add_bus(struct bus_type *bus);
+
+extern void dma_debug_init(u32 num_entries);
+
+extern int dma_debug_resize_entries(u32 num_entries);
+
+extern void debug_dma_map_page(struct device *dev, struct page *page,
+ size_t offset, size_t size,
+ int direction, dma_addr_t dma_addr,
+ bool map_single);
+
+extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
+ size_t size, int direction, bool map_single);
+
+extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, int mapped_ents, int direction);
+
+extern void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
+ int nelems, int dir);
+
+extern void debug_dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t dma_addr, void *virt);
+
+extern void debug_dma_free_coherent(struct device *dev, size_t size,
+ void *virt, dma_addr_t addr);
+
+extern void debug_dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ int direction);
+
+extern void debug_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle,
+ size_t size, int direction);
+
+extern void debug_dma_sync_single_range_for_cpu(struct device *dev,
+ dma_addr_t dma_handle,
+ unsigned long offset,
+ size_t size,
+ int direction);
+
+extern void debug_dma_sync_single_range_for_device(struct device *dev,
+ dma_addr_t dma_handle,
+ unsigned long offset,
+ size_t size, int direction);
+
+extern void debug_dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sg,
+ int nelems, int direction);
+
+extern void debug_dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg,
+ int nelems, int direction);
+
+extern void debug_dma_dump_mappings(struct device *dev);
+
+#else /* CONFIG_DMA_API_DEBUG */
+
+static inline void dma_debug_add_bus(struct bus_type *bus)
+{
+}
+
+static inline void dma_debug_init(u32 num_entries)
+{
+}
+
+static inline int dma_debug_resize_entries(u32 num_entries)
+{
+ return 0;
+}
+
+static inline void debug_dma_map_page(struct device *dev, struct page *page,
+ size_t offset, size_t size,
+ int direction, dma_addr_t dma_addr,
+ bool map_single)
+{
+}
+
+static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
+ size_t size, int direction,
+ bool map_single)
+{
+}
+
+static inline void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, int mapped_ents, int direction)
+{
+}
+
+static inline void debug_dma_unmap_sg(struct device *dev,
+ struct scatterlist *sglist,
+ int nelems, int dir)
+{
+}
+
+static inline void debug_dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t dma_addr, void *virt)
+{
+}
+
+static inline void debug_dma_free_coherent(struct device *dev, size_t size,
+ void *virt, dma_addr_t addr)
+{
+}
+
+static inline void debug_dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle,
+ size_t size, int direction)
+{
+}
+
+static inline void debug_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle,
+ size_t size, int direction)
+{
+}
+
+static inline void debug_dma_sync_single_range_for_cpu(struct device *dev,
+ dma_addr_t dma_handle,
+ unsigned long offset,
+ size_t size,
+ int direction)
+{
+}
+
+static inline void debug_dma_sync_single_range_for_device(struct device *dev,
+ dma_addr_t dma_handle,
+ unsigned long offset,
+ size_t size,
+ int direction)
+{
+}
+
+static inline void debug_dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sg,
+ int nelems, int direction)
+{
+}
+
+static inline void debug_dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg,
+ int nelems, int direction)
+{
+}
+
+static inline void debug_dma_dump_mappings(struct device *dev)
+{
+}
+
+#endif /* CONFIG_DMA_API_DEBUG */
+
+#endif /* __DMA_DEBUG_H */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index ba9114ec5d3..07dfd460d28 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -3,6 +3,8 @@
#include <linux/device.h>
#include <linux/err.h>
+#include <linux/dma-attrs.h>
+#include <linux/scatterlist.h>
/* These definitions mirror those in pci.h, so they can be used
* interchangeably with their PCI_ counterparts */
@@ -13,26 +15,74 @@ enum dma_data_direction {
DMA_NONE = 3,
};
+struct dma_map_ops {
+ void* (*alloc_coherent)(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp);
+ void (*free_coherent)(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle);
+ dma_addr_t (*map_page)(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs);
+ void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs);
+ int (*map_sg)(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs);
+ void (*unmap_sg)(struct device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs);
+ void (*sync_single_for_cpu)(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction dir);
+ void (*sync_single_for_device)(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction dir);
+ void (*sync_single_range_for_cpu)(struct device *dev,
+ dma_addr_t dma_handle,
+ unsigned long offset,
+ size_t size,
+ enum dma_data_direction dir);
+ void (*sync_single_range_for_device)(struct device *dev,
+ dma_addr_t dma_handle,
+ unsigned long offset,
+ size_t size,
+ enum dma_data_direction dir);
+ void (*sync_sg_for_cpu)(struct device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction dir);
+ void (*sync_sg_for_device)(struct device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction dir);
+ int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
+ int (*dma_supported)(struct device *dev, u64 mask);
+ int is_phys;
+};
+
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
+typedef u64 DMA_nnBIT_MASK __deprecated;
+
/*
* NOTE: do not use the below macros in new code and do not add new definitions
* here.
*
* Instead, just open-code DMA_BIT_MASK(n) within your driver
*/
-#define DMA_64BIT_MASK DMA_BIT_MASK(64)
-#define DMA_48BIT_MASK DMA_BIT_MASK(48)
-#define DMA_47BIT_MASK DMA_BIT_MASK(47)
-#define DMA_40BIT_MASK DMA_BIT_MASK(40)
-#define DMA_39BIT_MASK DMA_BIT_MASK(39)
-#define DMA_35BIT_MASK DMA_BIT_MASK(35)
-#define DMA_32BIT_MASK DMA_BIT_MASK(32)
-#define DMA_31BIT_MASK DMA_BIT_MASK(31)
-#define DMA_30BIT_MASK DMA_BIT_MASK(30)
-#define DMA_29BIT_MASK DMA_BIT_MASK(29)
-#define DMA_28BIT_MASK DMA_BIT_MASK(28)
-#define DMA_24BIT_MASK DMA_BIT_MASK(24)
+#define DMA_64BIT_MASK (DMA_nnBIT_MASK)DMA_BIT_MASK(64)
+#define DMA_48BIT_MASK (DMA_nnBIT_MASK)DMA_BIT_MASK(48)
+#define DMA_47BIT_MASK (DMA_nnBIT_MASK)DMA_BIT_MASK(47)
+#define DMA_40BIT_MASK (DMA_nnBIT_MASK)DMA_BIT_MASK(40)
+#define DMA_39BIT_MASK (DMA_nnBIT_MASK)DMA_BIT_MASK(39)
+#define DMA_35BIT_MASK (DMA_nnBIT_MASK)DMA_BIT_MASK(35)
+#define DMA_32BIT_MASK (DMA_nnBIT_MASK)DMA_BIT_MASK(32)
+#define DMA_31BIT_MASK (DMA_nnBIT_MASK)DMA_BIT_MASK(31)
+#define DMA_30BIT_MASK (DMA_nnBIT_MASK)DMA_BIT_MASK(30)
+#define DMA_29BIT_MASK (DMA_nnBIT_MASK)DMA_BIT_MASK(29)
+#define DMA_28BIT_MASK (DMA_nnBIT_MASK)DMA_BIT_MASK(28)
+#define DMA_24BIT_MASK (DMA_nnBIT_MASK)DMA_BIT_MASK(24)
#define DMA_MASK_NONE 0x0ULL
@@ -59,15 +109,26 @@ static inline int is_buffer_dma_capable(u64 mask, dma_addr_t addr, size_t size)
#include <asm-generic/dma-mapping-broken.h>
#endif
-/* Backwards compat, remove in 2.7.x */
-#define dma_sync_single dma_sync_single_for_cpu
-#define dma_sync_sg dma_sync_sg_for_cpu
+/* for backwards compatibility, removed soon */
+static inline void __deprecated dma_sync_single(struct device *dev,
+ dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+{
+ dma_sync_single_for_cpu(dev, addr, size, dir);
+}
+
+static inline void __deprecated dma_sync_sg(struct device *dev,
+ struct scatterlist *sg, int nelems,
+ enum dma_data_direction dir)
+{
+ dma_sync_sg_for_cpu(dev, sg, nelems, dir);
+}
static inline u64 dma_get_mask(struct device *dev)
{
if (dev && dev->dma_mask && *dev->dma_mask)
return *dev->dma_mask;
- return DMA_32BIT_MASK;
+ return DMA_BIT_MASK(32);
}
extern u64 dma_get_required_mask(struct device *dev);
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h
index af1dab41674..1a455f1f86d 100644
--- a/include/linux/dma_remapping.h
+++ b/include/linux/dma_remapping.h
@@ -11,6 +11,7 @@
#define DMA_PTE_READ (1)
#define DMA_PTE_WRITE (2)
+#define DMA_PTE_SNP (1 << 11)
struct intel_iommu;
struct dmar_domain;
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 1956c8d46d3..ffefba81c81 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -23,9 +23,6 @@
#include <linux/device.h>
#include <linux/uio.h>
-#include <linux/kref.h>
-#include <linux/completion.h>
-#include <linux/rcupdate.h>
#include <linux/dma-mapping.h>
/**
@@ -81,12 +78,18 @@ enum dma_transaction_type {
* dependency chains
* @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s)
* @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s)
+ * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single
+ * (if not set, do the source dma-unmapping as page)
+ * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single
+ * (if not set, do the destination dma-unmapping as page)
*/
enum dma_ctrl_flags {
DMA_PREP_INTERRUPT = (1 << 0),
DMA_CTRL_ACK = (1 << 1),
DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2),
DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3),
+ DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4),
+ DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5),
};
/**
@@ -205,6 +208,7 @@ struct dma_async_tx_descriptor {
/**
* struct dma_device - info on the entity supplying DMA services
* @chancnt: how many DMA channels are supported
+ * @privatecnt: how many DMA channels are requested by dma_request_channel
* @channels: the list of struct dma_chan
* @global_node: list_head for global dma_device_list
* @cap_mask: one or more dma_capability flags
@@ -227,6 +231,7 @@ struct dma_async_tx_descriptor {
struct dma_device {
unsigned int chancnt;
+ unsigned int privatecnt;
struct list_head channels;
struct list_head global_node;
dma_cap_mask_t cap_mask;
@@ -291,6 +296,24 @@ static inline void net_dmaengine_put(void)
}
#endif
+#ifdef CONFIG_ASYNC_TX_DMA
+#define async_dmaengine_get() dmaengine_get()
+#define async_dmaengine_put() dmaengine_put()
+#define async_dma_find_channel(type) dma_find_channel(type)
+#else
+static inline void async_dmaengine_get(void)
+{
+}
+static inline void async_dmaengine_put(void)
+{
+}
+static inline struct dma_chan *
+async_dma_find_channel(enum dma_transaction_type type)
+{
+ return NULL;
+}
+#endif
+
dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
void *dest, void *src, size_t len);
dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
@@ -337,6 +360,13 @@ __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
set_bit(tx_type, dstp->bits);
}
+#define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
+static inline void
+__dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
+{
+ clear_bit(tx_type, dstp->bits);
+}
+
#define dma_cap_zero(mask) __dma_cap_zero(&(mask))
static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
{
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index f28440784cf..10ff5c49882 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -24,16 +24,17 @@
#include <linux/acpi.h>
#include <linux/types.h>
#include <linux/msi.h>
+#include <linux/irqreturn.h>
-#if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP)
struct intel_iommu;
-
+#if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP)
struct dmar_drhd_unit {
struct list_head list; /* list of drhd units */
struct acpi_dmar_header *hdr; /* ACPI header */
u64 reg_base_addr; /* register base address*/
struct pci_dev **devices; /* target device array */
int devices_cnt; /* target device count */
+ u16 segment; /* PCI domain */
u8 ignored:1; /* ignore drhd */
u8 include_all:1;
struct intel_iommu *iommu;
@@ -44,12 +45,20 @@ extern struct list_head dmar_drhd_units;
#define for_each_drhd_unit(drhd) \
list_for_each_entry(drhd, &dmar_drhd_units, list)
+#define for_each_active_iommu(i, drhd) \
+ list_for_each_entry(drhd, &dmar_drhd_units, list) \
+ if (i=drhd->iommu, drhd->ignored) {} else
+
+#define for_each_iommu(i, drhd) \
+ list_for_each_entry(drhd, &dmar_drhd_units, list) \
+ if (i=drhd->iommu, 0) {} else
+
extern int dmar_table_init(void);
extern int dmar_dev_scope_init(void);
/* Intel IOMMU detection */
extern void detect_intel_iommu(void);
-
+extern int enable_drhd_fault_handling(void);
extern int parse_ioapics_under_ir(void);
extern int alloc_iommu(struct dmar_drhd_unit *);
@@ -63,12 +72,12 @@ static inline int dmar_table_init(void)
{
return -ENODEV;
}
+static inline int enable_drhd_fault_handling(void)
+{
+ return -1;
+}
#endif /* !CONFIG_DMAR && !CONFIG_INTR_REMAP */
-#ifdef CONFIG_INTR_REMAP
-extern int intr_remapping_enabled;
-extern int enable_intr_remapping(int);
-
struct irte {
union {
struct {
@@ -97,6 +106,13 @@ struct irte {
__u64 high;
};
};
+#ifdef CONFIG_INTR_REMAP
+extern int intr_remapping_enabled;
+extern int intr_remapping_supported(void);
+extern int enable_intr_remapping(int);
+extern void disable_intr_remapping(void);
+extern int reenable_intr_remapping(int);
+
extern int get_irte(int irq, struct irte *entry);
extern int modify_irte(int irq, struct irte *irte_modified);
extern int alloc_irte(struct intel_iommu *iommu, int irq, u16 count);
@@ -111,14 +127,42 @@ extern int irq_remapped(int irq);
extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev);
extern struct intel_iommu *map_ioapic_to_ir(int apic);
#else
+static inline int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
+{
+ return -1;
+}
+static inline int modify_irte(int irq, struct irte *irte_modified)
+{
+ return -1;
+}
+static inline int free_irte(int irq)
+{
+ return -1;
+}
+static inline int map_irq_to_irte_handle(int irq, u16 *sub_handle)
+{
+ return -1;
+}
+static inline int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index,
+ u16 sub_handle)
+{
+ return -1;
+}
+static inline struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
+{
+ return NULL;
+}
+static inline struct intel_iommu *map_ioapic_to_ir(int apic)
+{
+ return NULL;
+}
#define irq_remapped(irq) (0)
#define enable_intr_remapping(mode) (-1)
+#define disable_intr_remapping() (0)
+#define reenable_intr_remapping(mode) (0)
#define intr_remapping_enabled (0)
#endif
-#ifdef CONFIG_DMAR
-extern const char *dmar_get_fault_reason(u8 fault_reason);
-
/* Can't use the common MSI interrupt functions
* since DMAR is not a pci device
*/
@@ -127,8 +171,10 @@ extern void dmar_msi_mask(unsigned int irq);
extern void dmar_msi_read(int irq, struct msi_msg *msg);
extern void dmar_msi_write(int irq, struct msi_msg *msg);
extern int dmar_set_interrupt(struct intel_iommu *iommu);
+extern irqreturn_t dmar_fault(int irq, void *dev_id);
extern int arch_setup_dmar_msi(unsigned int irq);
+#ifdef CONFIG_DMAR
extern int iommu_detected, no_iommu;
extern struct list_head dmar_rmrr_units;
struct dmar_rmrr_unit {
diff --git a/include/linux/dmi.h b/include/linux/dmi.h
index d741b9ceb0e..bb5489c82c9 100644
--- a/include/linux/dmi.h
+++ b/include/linux/dmi.h
@@ -47,7 +47,8 @@ extern int dmi_get_year(int field);
extern int dmi_name_in_vendors(const char *str);
extern int dmi_name_in_serial(const char *str);
extern int dmi_available;
-extern int dmi_walk(void (*decode)(const struct dmi_header *));
+extern int dmi_walk(void (*decode)(const struct dmi_header *, void *),
+ void *private_data);
extern bool dmi_match(enum dmi_field f, const char *str);
#else
@@ -61,8 +62,8 @@ static inline int dmi_get_year(int year) { return 0; }
static inline int dmi_name_in_vendors(const char *s) { return 0; }
static inline int dmi_name_in_serial(const char *s) { return 0; }
#define dmi_available 0
-static inline int dmi_walk(void (*decode)(const struct dmi_header *))
- { return -1; }
+static inline int dmi_walk(void (*decode)(const struct dmi_header *, void *),
+ void *private_data) { return -1; }
static inline bool dmi_match(enum dmi_field f, const char *str)
{ return false; }
static inline const struct dmi_system_id *
diff --git a/include/linux/dnotify.h b/include/linux/dnotify.h
index 102a902b439..ecc06286226 100644
--- a/include/linux/dnotify.h
+++ b/include/linux/dnotify.h
@@ -10,7 +10,7 @@
struct dnotify_struct {
struct dnotify_struct * dn_next;
- unsigned long dn_mask;
+ __u32 dn_mask;
int dn_fd;
struct file * dn_filp;
fl_owner_t dn_owner;
@@ -21,23 +21,18 @@ struct dnotify_struct {
#ifdef CONFIG_DNOTIFY
-extern void __inode_dir_notify(struct inode *, unsigned long);
+#define DNOTIFY_ALL_EVENTS (FS_DELETE | FS_DELETE_CHILD |\
+ FS_MODIFY | FS_MODIFY_CHILD |\
+ FS_ACCESS | FS_ACCESS_CHILD |\
+ FS_ATTRIB | FS_ATTRIB_CHILD |\
+ FS_CREATE | FS_DN_RENAME |\
+ FS_MOVED_FROM | FS_MOVED_TO)
+
extern void dnotify_flush(struct file *, fl_owner_t);
extern int fcntl_dirnotify(int, struct file *, unsigned long);
-extern void dnotify_parent(struct dentry *, unsigned long);
-
-static inline void inode_dir_notify(struct inode *inode, unsigned long event)
-{
- if (inode->i_dnotify_mask & (event))
- __inode_dir_notify(inode, event);
-}
#else
-static inline void __inode_dir_notify(struct inode *inode, unsigned long event)
-{
-}
-
static inline void dnotify_flush(struct file *filp, fl_owner_t id)
{
}
@@ -47,14 +42,6 @@ static inline int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
return -EINVAL;
}
-static inline void dnotify_parent(struct dentry *dentry, unsigned long event)
-{
-}
-
-static inline void inode_dir_notify(struct inode *inode, unsigned long event)
-{
-}
-
#endif /* CONFIG_DNOTIFY */
#endif /* __KERNEL __ */
diff --git a/include/linux/ds1wm.h b/include/linux/ds1wm.h
deleted file mode 100644
index d3c65e48a2e..00000000000
--- a/include/linux/ds1wm.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* platform data for the DS1WM driver */
-
-struct ds1wm_platform_data {
- int bus_shift; /* number of shifts needed to calculate the
- * offset between DS1WM registers;
- * e.g. on h5xxx and h2200 this is 2
- * (registers aligned to 4-byte boundaries),
- * while on hx4700 this is 1 */
- int active_high;
- void (*enable)(struct platform_device *pdev);
- void (*disable)(struct platform_device *pdev);
-};
diff --git a/include/linux/dst.h b/include/linux/dst.h
new file mode 100644
index 00000000000..e26fed84b1a
--- /dev/null
+++ b/include/linux/dst.h
@@ -0,0 +1,587 @@
+/*
+ * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DST_H
+#define __DST_H
+
+#include <linux/types.h>
+#include <linux/connector.h>
+
+#define DST_NAMELEN 32
+#define DST_NAME "dst"
+
+enum {
+ /* Remove node with given id from storage */
+ DST_DEL_NODE = 0,
+ /* Add remote node with given id to the storage */
+ DST_ADD_REMOTE,
+ /* Add local node with given id to the storage to be exported and used by remote peers */
+ DST_ADD_EXPORT,
+ /* Crypto initialization command (hash/cipher used to protect the connection) */
+ DST_CRYPTO,
+ /* Security attributes for given connection (permissions for example) */
+ DST_SECURITY,
+ /* Register given node in the block layer subsystem */
+ DST_START,
+ DST_CMD_MAX
+};
+
+struct dst_ctl
+{
+ /* Storage name */
+ char name[DST_NAMELEN];
+ /* Command flags */
+ __u32 flags;
+ /* Command itself (see above) */
+ __u32 cmd;
+ /* Maximum number of pages per single request in this device */
+ __u32 max_pages;
+ /* Stale/error transaction scanning timeout in milliseconds */
+ __u32 trans_scan_timeout;
+ /* Maximum number of retry sends before completing transaction as broken */
+ __u32 trans_max_retries;
+ /* Storage size */
+ __u64 size;
+};
+
+/* Reply command carries completion status */
+struct dst_ctl_ack
+{
+ struct cn_msg msg;
+ int error;
+ int unused[3];
+};
+
+/*
+ * Unfortunaltely socket address structure is not exported to userspace
+ * and is redefined there.
+ */
+#define SADDR_MAX_DATA 128
+
+struct saddr {
+ /* address family, AF_xxx */
+ unsigned short sa_family;
+ /* 14 bytes of protocol address */
+ char sa_data[SADDR_MAX_DATA];
+ /* Number of bytes used in sa_data */
+ unsigned short sa_data_len;
+};
+
+/* Address structure */
+struct dst_network_ctl
+{
+ /* Socket type: datagram, stream...*/
+ unsigned int type;
+ /* Let me guess, is it a Jupiter diameter? */
+ unsigned int proto;
+ /* Peer's address */
+ struct saddr addr;
+};
+
+struct dst_crypto_ctl
+{
+ /* Cipher and hash names */
+ char cipher_algo[DST_NAMELEN];
+ char hash_algo[DST_NAMELEN];
+
+ /* Key sizes. Can be zero for digest for example */
+ unsigned int cipher_keysize, hash_keysize;
+ /* Alignment. Calculated by the DST itself. */
+ unsigned int crypto_attached_size;
+ /* Number of threads to perform crypto operations */
+ int thread_num;
+};
+
+/* Export security attributes have this bits checked in when client connects */
+#define DST_PERM_READ (1<<0)
+#define DST_PERM_WRITE (1<<1)
+
+/*
+ * Right now it is simple model, where each remote address
+ * is assigned to set of permissions it is allowed to perform.
+ * In real world block device does not know anything but
+ * reading and writing, so it should be more than enough.
+ */
+struct dst_secure_user
+{
+ unsigned int permissions;
+ struct saddr addr;
+};
+
+/*
+ * Export control command: device to export and network address to accept
+ * clients to work with given device
+ */
+struct dst_export_ctl
+{
+ char device[DST_NAMELEN];
+ struct dst_network_ctl ctl;
+};
+
+enum {
+ DST_CFG = 1, /* Request remote configuration */
+ DST_IO, /* IO command */
+ DST_IO_RESPONSE, /* IO response */
+ DST_PING, /* Keepalive message */
+ DST_NCMD_MAX,
+};
+
+struct dst_cmd
+{
+ /* Network command itself, see above */
+ __u32 cmd;
+ /*
+ * Size of the attached data
+ * (in most cases, for READ command it means how many bytes were requested)
+ */
+ __u32 size;
+ /* Crypto size: number of attached bytes with digest/hmac */
+ __u32 csize;
+ /* Here we can carry secret data */
+ __u32 reserved;
+ /* Read/write bits, see how they are encoded in bio structure */
+ __u64 rw;
+ /* BIO flags */
+ __u64 flags;
+ /* Unique command id (like transaction ID) */
+ __u64 id;
+ /* Sector to start IO from */
+ __u64 sector;
+ /* Hash data is placed after this header */
+ __u8 hash[0];
+};
+
+/*
+ * Convert command to/from network byte order.
+ * We do not use hton*() functions, since there is
+ * no 64-bit implementation.
+ */
+static inline void dst_convert_cmd(struct dst_cmd *c)
+{
+ c->cmd = __cpu_to_be32(c->cmd);
+ c->csize = __cpu_to_be32(c->csize);
+ c->size = __cpu_to_be32(c->size);
+ c->sector = __cpu_to_be64(c->sector);
+ c->id = __cpu_to_be64(c->id);
+ c->flags = __cpu_to_be64(c->flags);
+ c->rw = __cpu_to_be64(c->rw);
+}
+
+/* Transaction id */
+typedef __u64 dst_gen_t;
+
+#ifdef __KERNEL__
+
+#include <linux/blkdev.h>
+#include <linux/bio.h>
+#include <linux/device.h>
+#include <linux/mempool.h>
+#include <linux/net.h>
+#include <linux/poll.h>
+#include <linux/rbtree.h>
+
+#ifdef CONFIG_DST_DEBUG
+#define dprintk(f, a...) printk(KERN_NOTICE f, ##a)
+#else
+static inline void __attribute__ ((format (printf, 1, 2)))
+ dprintk(const char *fmt, ...) {}
+#endif
+
+struct dst_node;
+
+struct dst_trans
+{
+ /* DST node we are working with */
+ struct dst_node *n;
+
+ /* Entry inside transaction tree */
+ struct rb_node trans_entry;
+
+ /* Merlin kills this transaction when this memory cell equals zero */
+ atomic_t refcnt;
+
+ /* How this transaction should be processed by crypto engine */
+ short enc;
+ /* How many times this transaction was resent */
+ short retries;
+ /* Completion status */
+ int error;
+
+ /* When did we send it to the remote peer */
+ long send_time;
+
+ /* My name is...
+ * Well, computers does not speak, they have unique id instead */
+ dst_gen_t gen;
+
+ /* Block IO we are working with */
+ struct bio *bio;
+
+ /* Network command for above block IO request */
+ struct dst_cmd cmd;
+};
+
+struct dst_crypto_engine
+{
+ /* What should we do with all block requests */
+ struct crypto_hash *hash;
+ struct crypto_ablkcipher *cipher;
+
+ /* Pool of pages used to encrypt data into before sending */
+ int page_num;
+ struct page **pages;
+
+ /* What to do with current request */
+ int enc;
+ /* Who we are and where do we go */
+ struct scatterlist *src, *dst;
+
+ /* Maximum timeout waiting for encryption to be completed */
+ long timeout;
+ /* IV is a 64-bit sequential counter */
+ u64 iv;
+
+ /* Secret data */
+ void *private;
+
+ /* Cached temporary data lives here */
+ int size;
+ void *data;
+};
+
+struct dst_state
+{
+ /* The main state protection */
+ struct mutex state_lock;
+
+ /* Polling machinery for sockets */
+ wait_queue_t wait;
+ wait_queue_head_t *whead;
+ /* Most of events are being waited here */
+ wait_queue_head_t thread_wait;
+
+ /* Who owns this? */
+ struct dst_node *node;
+
+ /* Network address for this state */
+ struct dst_network_ctl ctl;
+
+ /* Permissions to work with: read-only or rw connection */
+ u32 permissions;
+
+ /* Called when we need to clean private data */
+ void (* cleanup)(struct dst_state *st);
+
+ /* Used by the server: BIO completion queues BIOs here */
+ struct list_head request_list;
+ spinlock_t request_lock;
+
+ /* Guess what? No, it is not number of planets */
+ atomic_t refcnt;
+
+ /* This flags is set when connection should be dropped */
+ int need_exit;
+
+ /*
+ * Socket to work with. Second pointer is used for
+ * lockless check if socket was changed before performing
+ * next action (like working with cached polling result)
+ */
+ struct socket *socket, *read_socket;
+
+ /* Cached preallocated data */
+ void *data;
+ unsigned int size;
+
+ /* Currently processed command */
+ struct dst_cmd cmd;
+};
+
+struct dst_info
+{
+ /* Device size */
+ u64 size;
+
+ /* Local device name for export devices */
+ char local[DST_NAMELEN];
+
+ /* Network setup */
+ struct dst_network_ctl net;
+
+ /* Sysfs bits use this */
+ struct device device;
+};
+
+struct dst_node
+{
+ struct list_head node_entry;
+
+ /* Hi, my name is stored here */
+ char name[DST_NAMELEN];
+ /* My cache name is stored here */
+ char cache_name[DST_NAMELEN];
+
+ /* Block device attached to given node.
+ * Only valid for exporting nodes */
+ struct block_device *bdev;
+ /* Network state machine for given peer */
+ struct dst_state *state;
+
+ /* Block IO machinery */
+ struct request_queue *queue;
+ struct gendisk *disk;
+
+ /* Number of threads in processing pool */
+ int thread_num;
+ /* Maximum number of pages in single IO */
+ int max_pages;
+
+ /* I'm that big in bytes */
+ loff_t size;
+
+ /* Exported to userspace node information */
+ struct dst_info *info;
+
+ /*
+ * Security attribute list.
+ * Used only by exporting node currently.
+ */
+ struct list_head security_list;
+ struct mutex security_lock;
+
+ /*
+ * When this unerflows below zero, university collapses.
+ * But this will not happen, since node will be freed,
+ * when reference counter reaches zero.
+ */
+ atomic_t refcnt;
+
+ /* How precisely should I be started? */
+ int (*start)(struct dst_node *);
+
+ /* Crypto capabilities */
+ struct dst_crypto_ctl crypto;
+ u8 *hash_key;
+ u8 *cipher_key;
+
+ /* Pool of processing thread */
+ struct thread_pool *pool;
+
+ /* Transaction IDs live here */
+ atomic_long_t gen;
+
+ /*
+ * How frequently and how many times transaction
+ * tree should be scanned to drop stale objects.
+ */
+ long trans_scan_timeout;
+ int trans_max_retries;
+
+ /* Small gnomes live here */
+ struct rb_root trans_root;
+ struct mutex trans_lock;
+
+ /*
+ * Transaction cache/memory pool.
+ * It is big enough to contain not only transaction
+ * itself, but additional crypto data (digest/hmac).
+ */
+ struct kmem_cache *trans_cache;
+ mempool_t *trans_pool;
+
+ /* This entity scans transaction tree */
+ struct delayed_work trans_work;
+
+ wait_queue_head_t wait;
+};
+
+/* Kernel representation of the security attribute */
+struct dst_secure
+{
+ struct list_head sec_entry;
+ struct dst_secure_user sec;
+};
+
+int dst_process_bio(struct dst_node *n, struct bio *bio);
+
+int dst_node_init_connected(struct dst_node *n, struct dst_network_ctl *r);
+int dst_node_init_listened(struct dst_node *n, struct dst_export_ctl *le);
+
+static inline struct dst_state *dst_state_get(struct dst_state *st)
+{
+ BUG_ON(atomic_read(&st->refcnt) == 0);
+ atomic_inc(&st->refcnt);
+ return st;
+}
+
+void dst_state_put(struct dst_state *st);
+
+struct dst_state *dst_state_alloc(struct dst_node *n);
+int dst_state_socket_create(struct dst_state *st);
+void dst_state_socket_release(struct dst_state *st);
+
+void dst_state_exit_connected(struct dst_state *st);
+
+int dst_state_schedule_receiver(struct dst_state *st);
+
+void dst_dump_addr(struct socket *sk, struct sockaddr *sa, char *str);
+
+static inline void dst_state_lock(struct dst_state *st)
+{
+ mutex_lock(&st->state_lock);
+}
+
+static inline void dst_state_unlock(struct dst_state *st)
+{
+ mutex_unlock(&st->state_lock);
+}
+
+void dst_poll_exit(struct dst_state *st);
+int dst_poll_init(struct dst_state *st);
+
+static inline unsigned int dst_state_poll(struct dst_state *st)
+{
+ unsigned int revents = POLLHUP | POLLERR;
+
+ dst_state_lock(st);
+ if (st->socket)
+ revents = st->socket->ops->poll(NULL, st->socket, NULL);
+ dst_state_unlock(st);
+
+ return revents;
+}
+
+static inline int dst_thread_setup(void *private, void *data)
+{
+ return 0;
+}
+
+void dst_node_put(struct dst_node *n);
+
+static inline struct dst_node *dst_node_get(struct dst_node *n)
+{
+ atomic_inc(&n->refcnt);
+ return n;
+}
+
+int dst_data_recv(struct dst_state *st, void *data, unsigned int size);
+int dst_recv_cdata(struct dst_state *st, void *cdata);
+int dst_data_send_header(struct socket *sock,
+ void *data, unsigned int size, int more);
+
+int dst_send_bio(struct dst_state *st, struct dst_cmd *cmd, struct bio *bio);
+
+int dst_process_io(struct dst_state *st);
+int dst_export_crypto(struct dst_node *n, struct bio *bio);
+int dst_export_send_bio(struct bio *bio);
+int dst_start_export(struct dst_node *n);
+
+int __init dst_export_init(void);
+void dst_export_exit(void);
+
+/* Private structure for export block IO requests */
+struct dst_export_priv
+{
+ struct list_head request_entry;
+ struct dst_state *state;
+ struct bio *bio;
+ struct dst_cmd cmd;
+};
+
+static inline void dst_trans_get(struct dst_trans *t)
+{
+ atomic_inc(&t->refcnt);
+}
+
+struct dst_trans *dst_trans_search(struct dst_node *node, dst_gen_t gen);
+int dst_trans_remove(struct dst_trans *t);
+int dst_trans_remove_nolock(struct dst_trans *t);
+void dst_trans_put(struct dst_trans *t);
+
+/*
+ * Convert bio into network command.
+ */
+static inline void dst_bio_to_cmd(struct bio *bio, struct dst_cmd *cmd,
+ u32 command, u64 id)
+{
+ cmd->cmd = command;
+ cmd->flags = (bio->bi_flags << BIO_POOL_BITS) >> BIO_POOL_BITS;
+ cmd->rw = bio->bi_rw;
+ cmd->size = bio->bi_size;
+ cmd->csize = 0;
+ cmd->id = id;
+ cmd->sector = bio->bi_sector;
+};
+
+int dst_trans_send(struct dst_trans *t);
+int dst_trans_crypto(struct dst_trans *t);
+
+int dst_node_crypto_init(struct dst_node *n, struct dst_crypto_ctl *ctl);
+void dst_node_crypto_exit(struct dst_node *n);
+
+static inline int dst_need_crypto(struct dst_node *n)
+{
+ struct dst_crypto_ctl *c = &n->crypto;
+ /*
+ * Logical OR is appropriate here, but boolean one produces
+ * more optimal code, so it is used instead.
+ */
+ return (c->hash_algo[0] | c->cipher_algo[0]);
+}
+
+int dst_node_trans_init(struct dst_node *n, unsigned int size);
+void dst_node_trans_exit(struct dst_node *n);
+
+/*
+ * Pool of threads.
+ * Ready list contains threads currently free to be used,
+ * active one contains threads with some work scheduled for them.
+ * Caller can wait in given queue when thread is ready.
+ */
+struct thread_pool
+{
+ int thread_num;
+ struct mutex thread_lock;
+ struct list_head ready_list, active_list;
+
+ wait_queue_head_t wait;
+};
+
+void thread_pool_del_worker(struct thread_pool *p);
+void thread_pool_del_worker_id(struct thread_pool *p, unsigned int id);
+int thread_pool_add_worker(struct thread_pool *p,
+ char *name,
+ unsigned int id,
+ void *(* init)(void *data),
+ void (* cleanup)(void *data),
+ void *data);
+
+void thread_pool_destroy(struct thread_pool *p);
+struct thread_pool *thread_pool_create(int num, char *name,
+ void *(* init)(void *data),
+ void (* cleanup)(void *data),
+ void *data);
+
+int thread_pool_schedule(struct thread_pool *p,
+ int (* setup)(void *stored_private, void *setup_data),
+ int (* action)(void *stored_private, void *setup_data),
+ void *setup_data, long timeout);
+int thread_pool_schedule_private(struct thread_pool *p,
+ int (* setup)(void *private, void *data),
+ int (* action)(void *private, void *data),
+ void *data, long timeout, void *id);
+
+#endif /* __KERNEL__ */
+#endif /* __DST_H */
diff --git a/include/linux/dvb/audio.h b/include/linux/dvb/audio.h
index bb0df2aaebf..fec66bd24f2 100644
--- a/include/linux/dvb/audio.h
+++ b/include/linux/dvb/audio.h
@@ -76,7 +76,7 @@ struct audio_karaoke{ /* if Vocal1 or Vocal2 are non-zero, they get mixed */
} audio_karaoke_t; /* into left and right */
-typedef uint16_t audio_attributes_t;
+typedef __u16 audio_attributes_t;
/* bits: descr. */
/* 15-13 audio coding mode (0=ac3, 2=mpeg1, 3=mpeg2ext, 4=LPCM, 6=DTS, */
/* 12 multichannel extension */
diff --git a/include/linux/dvb/video.h b/include/linux/dvb/video.h
index bd49c3ebf91..1d750c0fd86 100644
--- a/include/linux/dvb/video.h
+++ b/include/linux/dvb/video.h
@@ -132,12 +132,12 @@ struct video_command {
#define VIDEO_VSYNC_FIELD_PROGRESSIVE (3)
struct video_event {
- int32_t type;
+ __s32 type;
#define VIDEO_EVENT_SIZE_CHANGED 1
#define VIDEO_EVENT_FRAME_RATE_CHANGED 2
#define VIDEO_EVENT_DECODER_STOPPED 3
#define VIDEO_EVENT_VSYNC 4
- time_t timestamp;
+ __kernel_time_t timestamp;
union {
video_size_t size;
unsigned int frame_rate; /* in frames per 1000sec */
@@ -157,25 +157,25 @@ struct video_status {
struct video_still_picture {
char __user *iFrame; /* pointer to a single iframe in memory */
- int32_t size;
+ __s32 size;
};
typedef
struct video_highlight {
int active; /* 1=show highlight, 0=hide highlight */
- uint8_t contrast1; /* 7- 4 Pattern pixel contrast */
+ __u8 contrast1; /* 7- 4 Pattern pixel contrast */
/* 3- 0 Background pixel contrast */
- uint8_t contrast2; /* 7- 4 Emphasis pixel-2 contrast */
+ __u8 contrast2; /* 7- 4 Emphasis pixel-2 contrast */
/* 3- 0 Emphasis pixel-1 contrast */
- uint8_t color1; /* 7- 4 Pattern pixel color */
+ __u8 color1; /* 7- 4 Pattern pixel color */
/* 3- 0 Background pixel color */
- uint8_t color2; /* 7- 4 Emphasis pixel-2 color */
+ __u8 color2; /* 7- 4 Emphasis pixel-2 color */
/* 3- 0 Emphasis pixel-1 color */
- uint32_t ypos; /* 23-22 auto action mode */
+ __u32 ypos; /* 23-22 auto action mode */
/* 21-12 start y */
/* 9- 0 end y */
- uint32_t xpos; /* 23-22 button color number */
+ __u32 xpos; /* 23-22 button color number */
/* 21-12 start x */
/* 9- 0 end x */
} video_highlight_t;
@@ -189,17 +189,17 @@ typedef struct video_spu {
typedef struct video_spu_palette { /* SPU Palette information */
int length;
- uint8_t __user *palette;
+ __u8 __user *palette;
} video_spu_palette_t;
typedef struct video_navi_pack {
int length; /* 0 ... 1024 */
- uint8_t data[1024];
+ __u8 data[1024];
} video_navi_pack_t;
-typedef uint16_t video_attributes_t;
+typedef __u16 video_attributes_t;
/* bits: descr. */
/* 15-14 Video compression mode (0=MPEG-1, 1=MPEG-2) */
/* 13-12 TV system (0=525/60, 1=625/50) */
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h
index d797dde247f..c8aad713a04 100644
--- a/include/linux/dw_dmac.h
+++ b/include/linux/dw_dmac.h
@@ -74,4 +74,23 @@ struct dw_dma_slave {
#define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */
#define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */
+/* DMA API extensions */
+struct dw_cyclic_desc {
+ struct dw_desc **desc;
+ unsigned long periods;
+ void (*period_callback)(void *param);
+ void *period_callback_param;
+};
+
+struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
+ dma_addr_t buf_addr, size_t buf_len, size_t period_len,
+ enum dma_data_direction direction);
+void dw_dma_cyclic_free(struct dma_chan *chan);
+int dw_dma_cyclic_start(struct dma_chan *chan);
+void dw_dma_cyclic_stop(struct dma_chan *chan);
+
+dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan);
+
+dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan);
+
#endif /* DW_DMAC_H */
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
new file mode 100644
index 00000000000..a0d9422a156
--- /dev/null
+++ b/include/linux/dynamic_debug.h
@@ -0,0 +1,88 @@
+#ifndef _DYNAMIC_DEBUG_H
+#define _DYNAMIC_DEBUG_H
+
+/* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which
+ * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They
+ * use independent hash functions, to reduce the chance of false positives.
+ */
+extern long long dynamic_debug_enabled;
+extern long long dynamic_debug_enabled2;
+
+/*
+ * An instance of this structure is created in a special
+ * ELF section at every dynamic debug callsite. At runtime,
+ * the special section is treated as an array of these.
+ */
+struct _ddebug {
+ /*
+ * These fields are used to drive the user interface
+ * for selecting and displaying debug callsites.
+ */
+ const char *modname;
+ const char *function;
+ const char *filename;
+ const char *format;
+ char primary_hash;
+ char secondary_hash;
+ unsigned int lineno:24;
+ /*
+ * The flags field controls the behaviour at the callsite.
+ * The bits here are changed dynamically when the user
+ * writes commands to <debugfs>/dynamic_debug/ddebug
+ */
+#define _DPRINTK_FLAGS_PRINT (1<<0) /* printk() a message using the format */
+#define _DPRINTK_FLAGS_DEFAULT 0
+ unsigned int flags:8;
+} __attribute__((aligned(8)));
+
+
+int ddebug_add_module(struct _ddebug *tab, unsigned int n,
+ const char *modname);
+
+#if defined(CONFIG_DYNAMIC_DEBUG)
+extern int ddebug_remove_module(char *mod_name);
+
+#define __dynamic_dbg_enabled(dd) ({ \
+ int __ret = 0; \
+ if (unlikely((dynamic_debug_enabled & (1LL << DEBUG_HASH)) && \
+ (dynamic_debug_enabled2 & (1LL << DEBUG_HASH2)))) \
+ if (unlikely(dd.flags)) \
+ __ret = 1; \
+ __ret; })
+
+#define dynamic_pr_debug(fmt, ...) do { \
+ static struct _ddebug descriptor \
+ __used \
+ __attribute__((section("__verbose"), aligned(8))) = \
+ { KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \
+ DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \
+ if (__dynamic_dbg_enabled(descriptor)) \
+ printk(KERN_DEBUG KBUILD_MODNAME ":" pr_fmt(fmt), \
+ ##__VA_ARGS__); \
+ } while (0)
+
+
+#define dynamic_dev_dbg(dev, fmt, ...) do { \
+ static struct _ddebug descriptor \
+ __used \
+ __attribute__((section("__verbose"), aligned(8))) = \
+ { KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \
+ DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \
+ if (__dynamic_dbg_enabled(descriptor)) \
+ dev_printk(KERN_DEBUG, dev, \
+ KBUILD_MODNAME ": " fmt, \
+ ##__VA_ARGS__); \
+ } while (0)
+
+#else
+
+static inline int ddebug_remove_module(char *mod)
+{
+ return 0;
+}
+
+#define dynamic_pr_debug(fmt, ...) do { } while (0)
+#define dynamic_dev_dbg(dev, format, ...) do { } while (0)
+#endif
+
+#endif
diff --git a/include/linux/dynamic_printk.h b/include/linux/dynamic_printk.h
deleted file mode 100644
index 2d528d00907..00000000000
--- a/include/linux/dynamic_printk.h
+++ /dev/null
@@ -1,93 +0,0 @@
-#ifndef _DYNAMIC_PRINTK_H
-#define _DYNAMIC_PRINTK_H
-
-#define DYNAMIC_DEBUG_HASH_BITS 6
-#define DEBUG_HASH_TABLE_SIZE (1 << DYNAMIC_DEBUG_HASH_BITS)
-
-#define TYPE_BOOLEAN 1
-
-#define DYNAMIC_ENABLED_ALL 0
-#define DYNAMIC_ENABLED_NONE 1
-#define DYNAMIC_ENABLED_SOME 2
-
-extern int dynamic_enabled;
-
-/* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which
- * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They
- * use independent hash functions, to reduce the chance of false positives.
- */
-extern long long dynamic_printk_enabled;
-extern long long dynamic_printk_enabled2;
-
-struct mod_debug {
- char *modname;
- char *logical_modname;
- char *flag_names;
- int type;
- int hash;
- int hash2;
-} __attribute__((aligned(8)));
-
-int register_dynamic_debug_module(char *mod_name, int type, char *share_name,
- char *flags, int hash, int hash2);
-
-#if defined(CONFIG_DYNAMIC_PRINTK_DEBUG)
-extern int unregister_dynamic_debug_module(char *mod_name);
-extern int __dynamic_dbg_enabled_helper(char *modname, int type,
- int value, int hash);
-
-#define __dynamic_dbg_enabled(module, type, value, level, hash) ({ \
- int __ret = 0; \
- if (unlikely((dynamic_printk_enabled & (1LL << DEBUG_HASH)) && \
- (dynamic_printk_enabled2 & (1LL << DEBUG_HASH2)))) \
- __ret = __dynamic_dbg_enabled_helper(module, type, \
- value, hash);\
- __ret; })
-
-#define dynamic_pr_debug(fmt, ...) do { \
- static char mod_name[] \
- __attribute__((section("__verbose_strings"))) \
- = KBUILD_MODNAME; \
- static struct mod_debug descriptor \
- __used \
- __attribute__((section("__verbose"), aligned(8))) = \
- { mod_name, mod_name, NULL, TYPE_BOOLEAN, DEBUG_HASH, DEBUG_HASH2 };\
- if (__dynamic_dbg_enabled(KBUILD_MODNAME, TYPE_BOOLEAN, \
- 0, 0, DEBUG_HASH)) \
- printk(KERN_DEBUG KBUILD_MODNAME ":" fmt, \
- ##__VA_ARGS__); \
- } while (0)
-
-#define dynamic_dev_dbg(dev, format, ...) do { \
- static char mod_name[] \
- __attribute__((section("__verbose_strings"))) \
- = KBUILD_MODNAME; \
- static struct mod_debug descriptor \
- __used \
- __attribute__((section("__verbose"), aligned(8))) = \
- { mod_name, mod_name, NULL, TYPE_BOOLEAN, DEBUG_HASH, DEBUG_HASH2 };\
- if (__dynamic_dbg_enabled(KBUILD_MODNAME, TYPE_BOOLEAN, \
- 0, 0, DEBUG_HASH)) \
- dev_printk(KERN_DEBUG, dev, \
- KBUILD_MODNAME ": " format, \
- ##__VA_ARGS__); \
- } while (0)
-
-#else
-
-static inline int unregister_dynamic_debug_module(const char *mod_name)
-{
- return 0;
-}
-static inline int __dynamic_dbg_enabled_helper(char *modname, int type,
- int value, int hash)
-{
- return 0;
-}
-
-#define __dynamic_dbg_enabled(module, type, value, level, hash) ({ 0; })
-#define dynamic_pr_debug(fmt, ...) do { } while (0)
-#define dynamic_dev_dbg(dev, format, ...) do { } while (0)
-#endif
-
-#endif
diff --git a/include/linux/efi.h b/include/linux/efi.h
index bb66feb164b..ce4581fbc08 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -101,7 +101,7 @@ typedef struct {
u64 attribute;
} efi_memory_desc_t;
-typedef int (*efi_freemem_callback_t) (unsigned long start, unsigned long end, void *arg);
+typedef int (*efi_freemem_callback_t) (u64 start, u64 end, void *arg);
/*
* Types and defines for Time Services
diff --git a/include/linux/eisa.h b/include/linux/eisa.h
index e61c0be2a45..6925249a5ac 100644
--- a/include/linux/eisa.h
+++ b/include/linux/eisa.h
@@ -78,12 +78,12 @@ static inline void eisa_driver_unregister (struct eisa_driver *edrv) { }
/* Mimics pci.h... */
static inline void *eisa_get_drvdata (struct eisa_device *edev)
{
- return edev->dev.driver_data;
+ return dev_get_drvdata(&edev->dev);
}
static inline void eisa_set_drvdata (struct eisa_device *edev, void *data)
{
- edev->dev.driver_data = data;
+ dev_set_drvdata(&edev->dev, data);
}
/* The EISA root device. There's rumours about machines with multiple
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 7a204256b15..1cb3372e65d 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -103,10 +103,8 @@ extern int elv_merge(struct request_queue *, struct request **, struct bio *);
extern void elv_merge_requests(struct request_queue *, struct request *,
struct request *);
extern void elv_merged_request(struct request_queue *, struct request *, int);
-extern void elv_dequeue_request(struct request_queue *, struct request *);
extern void elv_requeue_request(struct request_queue *, struct request *);
extern int elv_queue_empty(struct request_queue *);
-extern struct request *elv_next_request(struct request_queue *q);
extern struct request *elv_former_request(struct request_queue *, struct request *);
extern struct request *elv_latter_request(struct request_queue *, struct request *);
extern int elv_register_queue(struct request_queue *q);
@@ -116,6 +114,7 @@ extern void elv_abort_queue(struct request_queue *);
extern void elv_completed_request(struct request_queue *, struct request *);
extern int elv_set_request(struct request_queue *, struct request *, gfp_t);
extern void elv_put_request(struct request_queue *, struct request *);
+extern void elv_drain_elevator(struct request_queue *);
/*
* io scheduler registration
@@ -170,7 +169,7 @@ enum {
ELV_MQUEUE_MUST,
};
-#define rq_end_sector(rq) ((rq)->sector + (rq)->nr_sectors)
+#define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
#define rb_entry_rq(node) rb_entry((node), struct request, rb_node)
/*
diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h
index 5ca54d77079..7605c5e9589 100644
--- a/include/linux/elfcore.h
+++ b/include/linux/elfcore.h
@@ -111,6 +111,15 @@ static inline void elf_core_copy_regs(elf_gregset_t *elfregs, struct pt_regs *re
#endif
}
+static inline void elf_core_copy_kernel_regs(elf_gregset_t *elfregs, struct pt_regs *regs)
+{
+#ifdef ELF_CORE_COPY_KERNEL_REGS
+ ELF_CORE_COPY_KERNEL_REGS((*elfregs), regs);
+#else
+ elf_core_copy_regs(elfregs, regs);
+#endif
+}
+
static inline int elf_core_copy_task_regs(struct task_struct *t, elf_gregset_t* elfregs)
{
#ifdef ELF_CORE_COPY_TASK_REGS
diff --git a/include/linux/errqueue.h b/include/linux/errqueue.h
index ceb1454b697..ec12cc74366 100644
--- a/include/linux/errqueue.h
+++ b/include/linux/errqueue.h
@@ -18,6 +18,7 @@ struct sock_extended_err
#define SO_EE_ORIGIN_LOCAL 1
#define SO_EE_ORIGIN_ICMP 2
#define SO_EE_ORIGIN_ICMP6 3
+#define SO_EE_ORIGIN_TIMESTAMPING 4
#define SO_EE_OFFENDER(ee) ((struct sockaddr*)((ee)+1))
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 1cb0f0b9092..3d7a6687d24 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -182,6 +182,54 @@ static inline unsigned compare_ether_addr_64bits(const u8 addr1[6+2],
return compare_ether_addr(addr1, addr2);
#endif
}
+
+/**
+ * is_etherdev_addr - Tell if given Ethernet address belongs to the device.
+ * @dev: Pointer to a device structure
+ * @addr: Pointer to a six-byte array containing the Ethernet address
+ *
+ * Compare passed address with all addresses of the device. Return true if the
+ * address if one of the device addresses.
+ *
+ * Note that this function calls compare_ether_addr_64bits() so take care of
+ * the right padding.
+ */
+static inline bool is_etherdev_addr(const struct net_device *dev,
+ const u8 addr[6 + 2])
+{
+ struct netdev_hw_addr *ha;
+ int res = 1;
+
+ rcu_read_lock();
+ for_each_dev_addr(dev, ha) {
+ res = compare_ether_addr_64bits(addr, ha->addr);
+ if (!res)
+ break;
+ }
+ rcu_read_unlock();
+ return !res;
+}
#endif /* __KERNEL__ */
+/**
+ * compare_ether_header - Compare two Ethernet headers
+ * @a: Pointer to Ethernet header
+ * @b: Pointer to Ethernet header
+ *
+ * Compare two ethernet headers, returns 0 if equal.
+ * This assumes that the network header (i.e., IP header) is 4-byte
+ * aligned OR the platform can handle unaligned access. This is the
+ * case for all packets coming into netif_receive_skb or similar
+ * entry points.
+ */
+
+static inline int compare_ether_header(const void *a, const void *b)
+{
+ u32 *a32 = (u32 *)((u8 *)a + 2);
+ u32 *b32 = (u32 *)((u8 *)b + 2);
+
+ return (*(u16 *)a ^ *(u16 *)b) | (a32[0] ^ b32[0]) |
+ (a32[1] ^ b32[1]) | (a32[2] ^ b32[2]);
+}
+
#endif /* _LINUX_ETHERDEVICE_H */
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 27c67a54223..9b660bd2e2b 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -7,6 +7,7 @@
* Portions Copyright 2002 Intel (eli.kupermann@intel.com,
* christopher.leech@intel.com,
* scott.feldman@intel.com)
+ * Portions Copyright (C) Sun Microsystems 2008
*/
#ifndef _LINUX_ETHTOOL_H
@@ -25,11 +26,14 @@ struct ethtool_cmd {
__u8 phy_address;
__u8 transceiver; /* Which transceiver to use */
__u8 autoneg; /* Enable or disable autonegotiation */
+ __u8 mdio_support;
__u32 maxtxpkt; /* Tx pkts before generating tx int */
__u32 maxrxpkt; /* Rx pkts before generating rx int */
__u16 speed_hi;
- __u16 reserved2;
- __u32 reserved[3];
+ __u8 eth_tp_mdix;
+ __u8 reserved2;
+ __u32 lp_advertising; /* Features the link partner advertises */
+ __u32 reserved[2];
};
static inline void ethtool_cmd_speed_set(struct ethtool_cmd *ep,
@@ -287,10 +291,75 @@ enum ethtool_flags {
ETH_FLAG_LRO = (1 << 15), /* LRO is enabled */
};
-struct ethtool_rxnfc {
- __u32 cmd;
+/* The following structures are for supporting RX network flow
+ * classification configuration. Note, all multibyte fields, e.g.,
+ * ip4src, ip4dst, psrc, pdst, spi, etc. are expected to be in network
+ * byte order.
+ */
+struct ethtool_tcpip4_spec {
+ __be32 ip4src;
+ __be32 ip4dst;
+ __be16 psrc;
+ __be16 pdst;
+ __u8 tos;
+};
+
+struct ethtool_ah_espip4_spec {
+ __be32 ip4src;
+ __be32 ip4dst;
+ __be32 spi;
+ __u8 tos;
+};
+
+struct ethtool_rawip4_spec {
+ __be32 ip4src;
+ __be32 ip4dst;
+ __u8 hdata[64];
+};
+
+struct ethtool_ether_spec {
+ __be16 ether_type;
+ __u8 frame_size;
+ __u8 eframe[16];
+};
+
+#define ETH_RX_NFC_IP4 1
+#define ETH_RX_NFC_IP6 2
+
+struct ethtool_usrip4_spec {
+ __be32 ip4src;
+ __be32 ip4dst;
+ __be32 l4_4_bytes;
+ __u8 tos;
+ __u8 ip_ver;
+ __u8 proto;
+};
+
+struct ethtool_rx_flow_spec {
__u32 flow_type;
- __u64 data;
+ union {
+ struct ethtool_tcpip4_spec tcp_ip4_spec;
+ struct ethtool_tcpip4_spec udp_ip4_spec;
+ struct ethtool_tcpip4_spec sctp_ip4_spec;
+ struct ethtool_ah_espip4_spec ah_ip4_spec;
+ struct ethtool_ah_espip4_spec esp_ip4_spec;
+ struct ethtool_rawip4_spec raw_ip4_spec;
+ struct ethtool_ether_spec ether_spec;
+ struct ethtool_usrip4_spec usr_ip4_spec;
+ __u8 hdata[64];
+ } h_u, m_u; /* entry, mask */
+ __u64 ring_cookie;
+ __u32 location;
+};
+
+struct ethtool_rxnfc {
+ __u32 cmd;
+ __u32 flow_type;
+ /* The rx flow hash value or the rule DB size */
+ __u64 data;
+ struct ethtool_rx_flow_spec fs;
+ __u32 rule_cnt;
+ __u32 rule_locs[0];
};
#ifdef __KERNEL__
@@ -417,8 +486,8 @@ struct ethtool_ops {
/* the following hooks are obsolete */
int (*self_test_count)(struct net_device *);/* use get_sset_count */
int (*get_stats_count)(struct net_device *);/* use get_sset_count */
- int (*get_rxhash)(struct net_device *, struct ethtool_rxnfc *);
- int (*set_rxhash)(struct net_device *, struct ethtool_rxnfc *);
+ int (*get_rxnfc)(struct net_device *, struct ethtool_rxnfc *, void *);
+ int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *);
};
#endif /* __KERNEL__ */
@@ -469,6 +538,12 @@ struct ethtool_ops {
#define ETHTOOL_SRXFH 0x0000002a /* Set RX flow hash configuration */
#define ETHTOOL_GGRO 0x0000002b /* Get GRO enable (ethtool_value) */
#define ETHTOOL_SGRO 0x0000002c /* Set GRO enable (ethtool_value) */
+#define ETHTOOL_GRXRINGS 0x0000002d /* Get RX rings available for LB */
+#define ETHTOOL_GRXCLSRLCNT 0x0000002e /* Get RX class rule count */
+#define ETHTOOL_GRXCLSRULE 0x0000002f /* Get RX classification rule */
+#define ETHTOOL_GRXCLSRLALL 0x00000030 /* Get all RX classification rule */
+#define ETHTOOL_SRXCLSRLDEL 0x00000031 /* Delete RX classification rule */
+#define ETHTOOL_SRXCLSRLINS 0x00000032 /* Insert RX classification rule */
/* compatibility with older code */
#define SPARC_ETH_GSET ETHTOOL_GSET
@@ -491,6 +566,11 @@ struct ethtool_ops {
#define SUPPORTED_Pause (1 << 13)
#define SUPPORTED_Asym_Pause (1 << 14)
#define SUPPORTED_2500baseX_Full (1 << 15)
+#define SUPPORTED_Backplane (1 << 16)
+#define SUPPORTED_1000baseKX_Full (1 << 17)
+#define SUPPORTED_10000baseKX4_Full (1 << 18)
+#define SUPPORTED_10000baseKR_Full (1 << 19)
+#define SUPPORTED_10000baseR_FEC (1 << 20)
/* Indicates what features are advertised by the interface. */
#define ADVERTISED_10baseT_Half (1 << 0)
@@ -509,6 +589,11 @@ struct ethtool_ops {
#define ADVERTISED_Pause (1 << 13)
#define ADVERTISED_Asym_Pause (1 << 14)
#define ADVERTISED_2500baseX_Full (1 << 15)
+#define ADVERTISED_Backplane (1 << 16)
+#define ADVERTISED_1000baseKX_Full (1 << 17)
+#define ADVERTISED_10000baseKX4_Full (1 << 18)
+#define ADVERTISED_10000baseKR_Full (1 << 19)
+#define ADVERTISED_10000baseR_FEC (1 << 20)
/* The following are all involved in forcing a particular link
* mode for the device for setting things. When getting the
@@ -533,6 +618,7 @@ struct ethtool_ops {
#define PORT_MII 0x02
#define PORT_FIBRE 0x03
#define PORT_BNC 0x04
+#define PORT_OTHER 0xff
/* Which transceiver to use. */
#define XCVR_INTERNAL 0x00
@@ -547,6 +633,11 @@ struct ethtool_ops {
#define AUTONEG_DISABLE 0x00
#define AUTONEG_ENABLE 0x01
+/* Mode MDI or MDI-X */
+#define ETH_TP_MDI_INVALID 0x00
+#define ETH_TP_MDI 0x01
+#define ETH_TP_MDI_X 0x02
+
/* Wake-On-Lan options. */
#define WAKE_PHY (1 << 0)
#define WAKE_UCAST (1 << 1)
@@ -565,9 +656,13 @@ struct ethtool_ops {
#define UDP_V6_FLOW 0x06
#define SCTP_V6_FLOW 0x07
#define AH_ESP_V6_FLOW 0x08
+#define AH_V4_FLOW 0x09
+#define ESP_V4_FLOW 0x0a
+#define AH_V6_FLOW 0x0b
+#define ESP_V6_FLOW 0x0c
+#define IP_USER_FLOW 0x0d
/* L3-L4 network traffic flow hash options */
-#define RXH_DEV_PORT (1 << 0)
#define RXH_L2DA (1 << 1)
#define RXH_VLAN (1 << 2)
#define RXH_L3_PROTO (1 << 3)
@@ -577,5 +672,6 @@ struct ethtool_ops {
#define RXH_L4_B_2_3 (1 << 7) /* dst port in case of TCP/UDP/SCTP */
#define RXH_DISCARD (1 << 31)
+#define RX_CLS_FLOW_DISC 0xffffffffffffffffULL
#endif /* _LINUX_ETHTOOL_H */
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index a667637b54e..f45a8ae5f82 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -13,10 +13,20 @@
/* For O_CLOEXEC and O_NONBLOCK */
#include <linux/fcntl.h>
-/* Flags for eventfd2. */
+/*
+ * CAREFUL: Check include/asm-generic/fcntl.h when defining
+ * new flags, since they might collide with O_* ones. We want
+ * to re-use O_* flags that couldn't possibly have a meaning
+ * from eventfd, in order to leave a free define-space for
+ * shared O_* flags.
+ */
+#define EFD_SEMAPHORE (1 << 0)
#define EFD_CLOEXEC O_CLOEXEC
#define EFD_NONBLOCK O_NONBLOCK
+#define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
+#define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE)
+
struct file *eventfd_fget(int fd);
int eventfd_signal(struct file *file, int n);
diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h
index f1e1d3c4712..f6856a5a1d4 100644
--- a/include/linux/eventpoll.h
+++ b/include/linux/eventpoll.h
@@ -61,7 +61,6 @@ struct file;
static inline void eventpoll_init_file(struct file *file)
{
INIT_LIST_HEAD(&file->f_ep_links);
- spin_lock_init(&file->f_ep_lock);
}
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index dd495b8c309..634a5e5aba3 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -208,6 +208,7 @@ static inline __u32 ext3_mask_flags(umode_t mode, __u32 flags)
#define EXT3_STATE_JDATA 0x00000001 /* journaled data exists */
#define EXT3_STATE_NEW 0x00000002 /* inode is newly created */
#define EXT3_STATE_XATTR 0x00000004 /* has in-inode xattrs */
+#define EXT3_STATE_FLUSH_ON_CLOSE 0x00000008
/* Used to pass group descriptor data when online resize is done */
struct ext3_new_group_input {
@@ -893,9 +894,8 @@ extern int ext3_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len);
/* ioctl.c */
-extern int ext3_ioctl (struct inode *, struct file *, unsigned int,
- unsigned long);
-extern long ext3_compat_ioctl (struct file *, unsigned int, unsigned long);
+extern long ext3_ioctl(struct file *, unsigned int, unsigned long);
+extern long ext3_compat_ioctl(struct file *, unsigned int, unsigned long);
/* namei.c */
extern int ext3_orphan_add(handle_t *, struct inode *);
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 31527e17076..dd68358996b 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -123,6 +123,7 @@ struct dentry;
#define FB_ACCEL_TRIDENT_3DIMAGE 51 /* Trident 3DImage */
#define FB_ACCEL_TRIDENT_BLADE3D 52 /* Trident Blade3D */
#define FB_ACCEL_TRIDENT_BLADEXP 53 /* Trident BladeXP */
+#define FB_ACCEL_CIRRUS_ALPINE 53 /* Cirrus Logic 543x/544x/5480 */
#define FB_ACCEL_NEOMAGIC_NM2070 90 /* NeoMagic NM2070 */
#define FB_ACCEL_NEOMAGIC_NM2090 91 /* NeoMagic NM2090 */
#define FB_ACCEL_NEOMAGIC_NM2093 92 /* NeoMagic NM2093 */
@@ -172,8 +173,12 @@ struct fb_fix_screeninfo {
/* Interpretation of offset for color fields: All offsets are from the right,
* inside a "pixel" value, which is exactly 'bits_per_pixel' wide (means: you
* can use the offset as right argument to <<). A pixel afterwards is a bit
- * stream and is written to video memory as that unmodified. This implies
- * big-endian byte order if bits_per_pixel is greater than 8.
+ * stream and is written to video memory as that unmodified.
+ *
+ * For pseudocolor: offset and length should be the same for all color
+ * components. Offset specifies the position of the least significant bit
+ * of the pallette index in a pixel value. Length indicates the number
+ * of available palette entries (i.e. # of entries = 1 << length).
*/
struct fb_bitfield {
__u32 offset; /* beginning of bitfield */
@@ -672,6 +677,9 @@ struct fb_ops {
/* get capability given var */
void (*fb_get_caps)(struct fb_info *info, struct fb_blit_caps *caps,
struct fb_var_screeninfo *var);
+
+ /* teardown any resources to do with this framebuffer */
+ void (*fb_destroy)(struct fb_info *info);
};
#ifdef CONFIG_FB_TILEBLITTING
@@ -781,6 +789,8 @@ struct fb_tile_ops {
#define FBINFO_MISC_USEREVENT 0x10000 /* event request
from userspace */
#define FBINFO_MISC_TILEBLITTING 0x20000 /* use tile blitting */
+#define FBINFO_MISC_FIRMWARE 0x40000 /* a replaceable firmware
+ inited framebuffer */
/* A driver may set this flag to indicate that it does want a set_par to be
* called every time when fbcon_switch is executed. The advantage is that with
@@ -849,7 +859,12 @@ struct fb_info {
u32 state; /* Hardware state i.e suspend */
void *fbcon_par; /* fbcon use-only private area */
/* From here on everything is device dependent */
- void *par;
+ void *par;
+ /* we need the PCI or similiar aperture base/size not
+ smem_start/size as smem_start may just be an object
+ allocated inside the aperture so may not actually overlap */
+ resource_size_t aperture_base;
+ resource_size_t aperture_size;
};
#ifdef MODULE
@@ -888,7 +903,7 @@ struct fb_info {
#define fb_writeq sbus_writeq
#define fb_memset sbus_memset_io
-#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || defined(__avr32__)
+#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || defined(__avr32__) || defined(__bfin__)
#define fb_readb __raw_readb
#define fb_readw __raw_readw
@@ -960,15 +975,7 @@ extern struct fb_info *registered_fb[FB_MAX];
extern int num_registered_fb;
extern struct class *fb_class;
-static inline int lock_fb_info(struct fb_info *info)
-{
- mutex_lock(&info->lock);
- if (!info->fbops) {
- mutex_unlock(&info->lock);
- return 0;
- }
- return 1;
-}
+extern int lock_fb_info(struct fb_info *info);
static inline void unlock_fb_info(struct fb_info *info)
{
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index 09d6c5bbddd..a2ec74bc481 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -5,12 +5,14 @@
#ifndef __LINUX_FDTABLE_H
#define __LINUX_FDTABLE_H
-#include <asm/atomic.h>
#include <linux/posix_types.h>
#include <linux/compiler.h>
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
#include <linux/types.h>
+#include <linux/init.h>
+
+#include <asm/atomic.h>
/*
* The default fd array needs to be at least BITS_PER_LONG,
diff --git a/include/linux/fiemap.h b/include/linux/fiemap.h
index 671decbd2ae..934e22d6580 100644
--- a/include/linux/fiemap.h
+++ b/include/linux/fiemap.h
@@ -11,6 +11,8 @@
#ifndef _LINUX_FIEMAP_H
#define _LINUX_FIEMAP_H
+#include <linux/types.h>
+
struct fiemap_extent {
__u64 fe_logical; /* logical offset in bytes for the start of
* the extent from the beginning of the file */
diff --git a/include/linux/firmware-map.h b/include/linux/firmware-map.h
index cca686b3912..875451f1373 100644
--- a/include/linux/firmware-map.h
+++ b/include/linux/firmware-map.h
@@ -24,21 +24,17 @@
*/
#ifdef CONFIG_FIRMWARE_MEMMAP
-int firmware_map_add(resource_size_t start, resource_size_t end,
- const char *type);
-int firmware_map_add_early(resource_size_t start, resource_size_t end,
- const char *type);
+int firmware_map_add(u64 start, u64 end, const char *type);
+int firmware_map_add_early(u64 start, u64 end, const char *type);
#else /* CONFIG_FIRMWARE_MEMMAP */
-static inline int firmware_map_add(resource_size_t start, resource_size_t end,
- const char *type)
+static inline int firmware_map_add(u64 start, u64 end, const char *type)
{
return 0;
}
-static inline int firmware_map_add_early(resource_size_t start,
- resource_size_t end, const char *type)
+static inline int firmware_map_add_early(u64 start, u64 end, const char *type)
{
return 0;
}
diff --git a/include/linux/firmware.h b/include/linux/firmware.h
index c8ecf5b2a20..d3154462843 100644
--- a/include/linux/firmware.h
+++ b/include/linux/firmware.h
@@ -5,7 +5,6 @@
#include <linux/types.h>
#include <linux/compiler.h>
-#define FIRMWARE_NAME_MAX 30
#define FW_ACTION_NOHOTPLUG 0
#define FW_ACTION_HOTPLUG 1
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 92734c0012e..1ff5e4e0195 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -87,6 +87,60 @@ struct inodes_stat_t {
*/
#define FMODE_NOCMTIME ((__force fmode_t)2048)
+/*
+ * The below are the various read and write types that we support. Some of
+ * them include behavioral modifiers that send information down to the
+ * block layer and IO scheduler. Terminology:
+ *
+ * The block layer uses device plugging to defer IO a little bit, in
+ * the hope that we will see more IO very shortly. This increases
+ * coalescing of adjacent IO and thus reduces the number of IOs we
+ * have to send to the device. It also allows for better queuing,
+ * if the IO isn't mergeable. If the caller is going to be waiting
+ * for the IO, then he must ensure that the device is unplugged so
+ * that the IO is dispatched to the driver.
+ *
+ * All IO is handled async in Linux. This is fine for background
+ * writes, but for reads or writes that someone waits for completion
+ * on, we want to notify the block layer and IO scheduler so that they
+ * know about it. That allows them to make better scheduling
+ * decisions. So when the below references 'sync' and 'async', it
+ * is referencing this priority hint.
+ *
+ * With that in mind, the available types are:
+ *
+ * READ A normal read operation. Device will be plugged.
+ * READ_SYNC A synchronous read. Device is not plugged, caller can
+ * immediately wait on this read without caring about
+ * unplugging.
+ * READA Used for read-ahead operations. Lower priority, and the
+ * block layer could (in theory) choose to ignore this
+ * request if it runs into resource problems.
+ * WRITE A normal async write. Device will be plugged.
+ * SWRITE Like WRITE, but a special case for ll_rw_block() that
+ * tells it to lock the buffer first. Normally a buffer
+ * must be locked before doing IO.
+ * WRITE_SYNC_PLUG Synchronous write. Identical to WRITE, but passes down
+ * the hint that someone will be waiting on this IO
+ * shortly. The device must still be unplugged explicitly,
+ * WRITE_SYNC_PLUG does not do this as we could be
+ * submitting more writes before we actually wait on any
+ * of them.
+ * WRITE_SYNC Like WRITE_SYNC_PLUG, but also unplugs the device
+ * immediately after submission. The write equivalent
+ * of READ_SYNC.
+ * WRITE_ODIRECT Special case write for O_DIRECT only.
+ * SWRITE_SYNC
+ * SWRITE_SYNC_PLUG Like WRITE_SYNC/WRITE_SYNC_PLUG, but locks the buffer.
+ * See SWRITE.
+ * WRITE_BARRIER Like WRITE, but tells the block layer that all
+ * previously submitted writes must be safely on storage
+ * before this one is started. Also guarantees that when
+ * this write is complete, it itself is also safely on
+ * storage. Prevents reordering of writes on both sides
+ * of this IO.
+ *
+ */
#define RW_MASK 1
#define RWA_MASK 2
#define READ 0
@@ -95,9 +149,18 @@ struct inodes_stat_t {
#define SWRITE 3 /* for ll_rw_block() - wait for buffer lock */
#define READ_SYNC (READ | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG))
#define READ_META (READ | (1 << BIO_RW_META))
-#define WRITE_SYNC (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG))
-#define SWRITE_SYNC (SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG))
+#define WRITE_SYNC_PLUG (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE))
+#define WRITE_SYNC (WRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG))
+#define WRITE_ODIRECT (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG))
+#define SWRITE_SYNC_PLUG \
+ (SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE))
+#define SWRITE_SYNC (SWRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG))
#define WRITE_BARRIER (WRITE | (1 << BIO_RW_BARRIER))
+
+/*
+ * These aren't really reads or writes, they pass down information about
+ * parts of device that are now unused by the file system.
+ */
#define DISCARD_NOBARRIER (1 << BIO_RW_DISCARD)
#define DISCARD_BARRIER ((1 << BIO_RW_DISCARD) | (1 << BIO_RW_BARRIER))
@@ -141,6 +204,7 @@ struct inodes_stat_t {
#define MS_RELATIME (1<<21) /* Update atime relative to mtime/ctime. */
#define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */
#define MS_I_VERSION (1<<23) /* Update inode I_version field */
+#define MS_STRICTATIME (1<<24) /* Always perform atime updates */
#define MS_ACTIVE (1<<30)
#define MS_NOUSER (1<<31)
@@ -665,8 +729,8 @@ struct inode {
struct timespec i_atime;
struct timespec i_mtime;
struct timespec i_ctime;
- unsigned int i_blkbits;
blkcnt_t i_blocks;
+ unsigned int i_blkbits;
unsigned short i_bytes;
umode_t i_mode;
spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
@@ -687,13 +751,12 @@ struct inode {
struct block_device *i_bdev;
struct cdev *i_cdev;
};
- int i_cindex;
__u32 i_generation;
-#ifdef CONFIG_DNOTIFY
- unsigned long i_dnotify_mask; /* Directory notify events */
- struct dnotify_struct *i_dnotify; /* for directory notifications */
+#ifdef CONFIG_FSNOTIFY
+ __u32 i_fsnotify_mask; /* all events this inode cares about */
+ struct hlist_head i_fsnotify_mark_entries; /* fsnotify mark entries */
#endif
#ifdef CONFIG_INOTIFY
@@ -733,9 +796,6 @@ enum inode_i_mutex_lock_class
I_MUTEX_QUOTA
};
-extern void inode_double_lock(struct inode *inode1, struct inode *inode2);
-extern void inode_double_unlock(struct inode *inode1, struct inode *inode2);
-
/*
* NOTE: in a 32bit arch with a preemptable kernel and
* an UP compile the i_size_read/write must be atomic
@@ -819,7 +879,7 @@ struct file_ra_state {
there are only # of pages ahead */
unsigned int ra_pages; /* Maximum readahead window */
- int mmap_miss; /* Cache miss stat for mmap accesses */
+ unsigned int mmap_miss; /* Cache miss stat for mmap accesses */
loff_t prev_pos; /* Cache last read() position */
};
@@ -848,6 +908,7 @@ struct file {
#define f_dentry f_path.dentry
#define f_vfsmnt f_path.mnt
const struct file_operations *f_op;
+ spinlock_t f_lock; /* f_ep_links, f_flags, no IRQ */
atomic_long_t f_count;
unsigned int f_flags;
fmode_t f_mode;
@@ -866,7 +927,6 @@ struct file {
#ifdef CONFIG_EPOLL
/* Used by fs/eventpoll.c to link all the hooks to this file */
struct list_head f_ep_links;
- spinlock_t f_ep_lock;
#endif /* #ifdef CONFIG_EPOLL */
struct address_space *f_mapping;
#ifdef CONFIG_DEBUG_WRITECOUNT
@@ -1047,6 +1107,7 @@ extern void locks_copy_lock(struct file_lock *, struct file_lock *);
extern void __locks_copy_lock(struct file_lock *, const struct file_lock *);
extern void locks_remove_posix(struct file *, fl_owner_t);
extern void locks_remove_flock(struct file *);
+extern void locks_release_private(struct file_lock *);
extern void posix_test_lock(struct file *, struct file_lock *);
extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
extern int posix_lock_file_wait(struct file *, struct file_lock *);
@@ -1063,34 +1124,147 @@ extern int lease_modify(struct file_lock **, int);
extern int lock_may_read(struct inode *, loff_t start, unsigned long count);
extern int lock_may_write(struct inode *, loff_t start, unsigned long count);
#else /* !CONFIG_FILE_LOCKING */
-#define fcntl_getlk(a, b) ({ -EINVAL; })
-#define fcntl_setlk(a, b, c, d) ({ -EACCES; })
+static inline int fcntl_getlk(struct file *file, struct flock __user *user)
+{
+ return -EINVAL;
+}
+
+static inline int fcntl_setlk(unsigned int fd, struct file *file,
+ unsigned int cmd, struct flock __user *user)
+{
+ return -EACCES;
+}
+
#if BITS_PER_LONG == 32
-#define fcntl_getlk64(a, b) ({ -EINVAL; })
-#define fcntl_setlk64(a, b, c, d) ({ -EACCES; })
+static inline int fcntl_getlk64(struct file *file, struct flock64 __user *user)
+{
+ return -EINVAL;
+}
+
+static inline int fcntl_setlk64(unsigned int fd, struct file *file,
+ unsigned int cmd, struct flock64 __user *user)
+{
+ return -EACCES;
+}
#endif
-#define fcntl_setlease(a, b, c) ({ 0; })
-#define fcntl_getlease(a) ({ 0; })
-#define locks_init_lock(a) ({ })
-#define __locks_copy_lock(a, b) ({ })
-#define locks_copy_lock(a, b) ({ })
-#define locks_remove_posix(a, b) ({ })
-#define locks_remove_flock(a) ({ })
-#define posix_test_lock(a, b) ({ 0; })
-#define posix_lock_file(a, b, c) ({ -ENOLCK; })
-#define posix_lock_file_wait(a, b) ({ -ENOLCK; })
-#define posix_unblock_lock(a, b) (-ENOENT)
-#define vfs_test_lock(a, b) ({ 0; })
-#define vfs_lock_file(a, b, c, d) (-ENOLCK)
-#define vfs_cancel_lock(a, b) ({ 0; })
-#define flock_lock_file_wait(a, b) ({ -ENOLCK; })
-#define __break_lease(a, b) ({ 0; })
-#define lease_get_mtime(a, b) ({ })
-#define generic_setlease(a, b, c) ({ -EINVAL; })
-#define vfs_setlease(a, b, c) ({ -EINVAL; })
-#define lease_modify(a, b) ({ -EINVAL; })
-#define lock_may_read(a, b, c) ({ 1; })
-#define lock_may_write(a, b, c) ({ 1; })
+static inline int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
+{
+ return 0;
+}
+
+static inline int fcntl_getlease(struct file *filp)
+{
+ return 0;
+}
+
+static inline void locks_init_lock(struct file_lock *fl)
+{
+ return;
+}
+
+static inline void __locks_copy_lock(struct file_lock *new, struct file_lock *fl)
+{
+ return;
+}
+
+static inline void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
+{
+ return;
+}
+
+static inline void locks_remove_posix(struct file *filp, fl_owner_t owner)
+{
+ return;
+}
+
+static inline void locks_remove_flock(struct file *filp)
+{
+ return;
+}
+
+static inline void posix_test_lock(struct file *filp, struct file_lock *fl)
+{
+ return;
+}
+
+static inline int posix_lock_file(struct file *filp, struct file_lock *fl,
+ struct file_lock *conflock)
+{
+ return -ENOLCK;
+}
+
+static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
+{
+ return -ENOLCK;
+}
+
+static inline int posix_unblock_lock(struct file *filp,
+ struct file_lock *waiter)
+{
+ return -ENOENT;
+}
+
+static inline int vfs_test_lock(struct file *filp, struct file_lock *fl)
+{
+ return 0;
+}
+
+static inline int vfs_lock_file(struct file *filp, unsigned int cmd,
+ struct file_lock *fl, struct file_lock *conf)
+{
+ return -ENOLCK;
+}
+
+static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
+{
+ return 0;
+}
+
+static inline int flock_lock_file_wait(struct file *filp,
+ struct file_lock *request)
+{
+ return -ENOLCK;
+}
+
+static inline int __break_lease(struct inode *inode, unsigned int mode)
+{
+ return 0;
+}
+
+static inline void lease_get_mtime(struct inode *inode, struct timespec *time)
+{
+ return;
+}
+
+static inline int generic_setlease(struct file *filp, long arg,
+ struct file_lock **flp)
+{
+ return -EINVAL;
+}
+
+static inline int vfs_setlease(struct file *filp, long arg,
+ struct file_lock **lease)
+{
+ return -EINVAL;
+}
+
+static inline int lease_modify(struct file_lock **before, int arg)
+{
+ return -EINVAL;
+}
+
+static inline int lock_may_read(struct inode *inode, loff_t start,
+ unsigned long len)
+{
+ return 1;
+}
+
+static inline int lock_may_write(struct inode *inode, loff_t start,
+ unsigned long len)
+{
+ return 1;
+}
+
#endif /* !CONFIG_FILE_LOCKING */
@@ -1147,7 +1321,7 @@ struct super_block {
struct rw_semaphore s_umount;
struct mutex s_lock;
int s_count;
- int s_need_sync_fs;
+ int s_need_sync;
atomic_t s_active;
#ifdef CONFIG_SECURITY
void *s_security;
@@ -1198,11 +1372,6 @@ struct super_block {
* generic_show_options()
*/
char *s_options;
-
- /*
- * storage for asynchronous operations
- */
- struct list_head s_async_list;
};
extern struct timespec current_fs_time(struct super_block *sb);
@@ -1581,6 +1750,9 @@ struct file_system_type {
struct lock_class_key i_alloc_sem_key;
};
+extern int get_sb_ns(struct file_system_type *fs_type, int flags, void *data,
+ int (*fill_super)(struct super_block *, void *, int),
+ struct vfsmount *mnt);
extern int get_sb_bdev(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data,
int (*fill_super)(struct super_block *, void *, int),
@@ -1598,6 +1770,7 @@ void kill_block_super(struct super_block *sb);
void kill_anon_super(struct super_block *sb);
void kill_litter_super(struct super_block *sb);
void deactivate_super(struct super_block *sb);
+void deactivate_locked_super(struct super_block *sb);
int set_anon_super(struct super_block *s, void *data);
struct super_block *sget(struct file_system_type *type,
int (*test)(struct super_block *,void *),
@@ -1606,7 +1779,7 @@ struct super_block *sget(struct file_system_type *type,
extern int get_sb_pseudo(struct file_system_type *, char *,
const struct super_operations *ops, unsigned long,
struct vfsmount *mnt);
-extern int simple_set_mnt(struct vfsmount *mnt, struct super_block *sb);
+extern void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb);
int __put_super_and_need_restart(struct super_block *sb);
/* Alas, no aliases. Too much hassle with bringing module.h everywhere */
@@ -1622,11 +1795,13 @@ extern struct vfsmount *kern_mount_data(struct file_system_type *, void *data);
extern int may_umount_tree(struct vfsmount *);
extern int may_umount(struct vfsmount *);
extern long do_mount(char *, char *, char *, unsigned long, void *);
-extern struct vfsmount *collect_mounts(struct vfsmount *, struct dentry *);
+extern struct vfsmount *collect_mounts(struct path *);
extern void drop_collected_mounts(struct vfsmount *);
extern int vfs_statfs(struct dentry *, struct kstatfs *);
+extern int current_umask(void);
+
/* /sys/fs */
extern struct kobject *fs_kobj;
@@ -1687,13 +1862,44 @@ static inline int break_lease(struct inode *inode, unsigned int mode)
return 0;
}
#else /* !CONFIG_FILE_LOCKING */
-#define locks_mandatory_locked(a) ({ 0; })
-#define locks_mandatory_area(a, b, c, d, e) ({ 0; })
-#define __mandatory_lock(a) ({ 0; })
-#define mandatory_lock(a) ({ 0; })
-#define locks_verify_locked(a) ({ 0; })
-#define locks_verify_truncate(a, b, c) ({ 0; })
-#define break_lease(a, b) ({ 0; })
+static inline int locks_mandatory_locked(struct inode *inode)
+{
+ return 0;
+}
+
+static inline int locks_mandatory_area(int rw, struct inode *inode,
+ struct file *filp, loff_t offset,
+ size_t count)
+{
+ return 0;
+}
+
+static inline int __mandatory_lock(struct inode *inode)
+{
+ return 0;
+}
+
+static inline int mandatory_lock(struct inode *inode)
+{
+ return 0;
+}
+
+static inline int locks_verify_locked(struct inode *inode)
+{
+ return 0;
+}
+
+static inline int locks_verify_truncate(struct inode *inode, struct file *filp,
+ size_t size)
+{
+ return 0;
+}
+
+static inline int break_lease(struct inode *inode, unsigned int mode)
+{
+ return 0;
+}
+
#endif /* CONFIG_FILE_LOCKING */
/* fs/open.c */
@@ -1714,8 +1920,9 @@ extern void __init vfs_caches_init(unsigned long);
extern struct kmem_cache *names_cachep;
-#define __getname() kmem_cache_alloc(names_cachep, GFP_KERNEL)
-#define __putname(name) kmem_cache_free(names_cachep, (void *)(name))
+#define __getname_gfp(gfp) kmem_cache_alloc(names_cachep, (gfp))
+#define __getname() __getname_gfp(GFP_KERNEL)
+#define __putname(name) kmem_cache_free(names_cachep, (void *)(name))
#ifndef CONFIG_AUDITSYSCALL
#define putname(name) __putname(name)
#else
@@ -1730,9 +1937,28 @@ extern void bd_set_size(struct block_device *, loff_t size);
extern void bd_forget(struct inode *inode);
extern void bdput(struct block_device *);
extern struct block_device *open_by_devnum(dev_t, fmode_t);
+extern void invalidate_bdev(struct block_device *);
+extern int sync_blockdev(struct block_device *bdev);
+extern struct super_block *freeze_bdev(struct block_device *);
+extern void emergency_thaw_all(void);
+extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
+extern int fsync_bdev(struct block_device *);
#else
static inline void bd_forget(struct inode *inode) {}
+static inline int sync_blockdev(struct block_device *bdev) { return 0; }
+static inline void invalidate_bdev(struct block_device *bdev) {}
+
+static inline struct super_block *freeze_bdev(struct block_device *sb)
+{
+ return NULL;
+}
+
+static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb)
+{
+ return 0;
+}
#endif
+extern int sync_filesystem(struct super_block *);
extern const struct file_operations def_blk_fops;
extern const struct file_operations def_chr_fops;
extern const struct file_operations bad_sock_fops;
@@ -1812,9 +2038,6 @@ extern int __invalidate_device(struct block_device *);
extern int invalidate_partition(struct gendisk *, int);
#endif
extern int invalidate_inodes(struct super_block *);
-unsigned long __invalidate_mapping_pages(struct address_space *mapping,
- pgoff_t start, pgoff_t end,
- bool be_atomic);
unsigned long invalidate_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t end);
@@ -1851,12 +2074,8 @@ extern int filemap_fdatawrite_range(struct address_space *mapping,
extern int vfs_fsync(struct file *file, struct dentry *dentry, int datasync);
extern void sync_supers(void);
-extern void sync_filesystems(int wait);
-extern void __fsync_super(struct super_block *sb);
extern void emergency_sync(void);
extern void emergency_remount(void);
-extern int do_remount_sb(struct super_block *sb, int flags,
- void *data, int force);
#ifdef CONFIG_BLOCK
extern sector_t bmap(struct inode *, sector_t);
#endif
@@ -1881,14 +2100,13 @@ static inline void allow_write_access(struct file *file)
if (file)
atomic_inc(&file->f_path.dentry->d_inode->i_writecount);
}
-extern int do_pipe(int *);
extern int do_pipe_flags(int *, int);
extern struct file *create_read_pipe(struct file *f, int flags);
extern struct file *create_write_pipe(int flags);
extern void free_write_pipe(struct file *);
extern struct file *do_filp_open(int dfd, const char *pathname,
- int open_flag, int mode);
+ int open_flag, int mode, int acc_mode);
extern int may_open(struct path *, int, int);
extern int kernel_read(struct file *, unsigned long, char *, unsigned long);
@@ -1975,10 +2193,10 @@ extern int generic_segment_checks(const struct iovec *iov,
/* fs/splice.c */
extern ssize_t generic_file_splice_read(struct file *, loff_t *,
struct pipe_inode_info *, size_t, unsigned int);
+extern ssize_t default_file_splice_read(struct file *, loff_t *,
+ struct pipe_inode_info *, size_t, unsigned int);
extern ssize_t generic_file_splice_write(struct pipe_inode_info *,
struct file *, loff_t *, size_t, unsigned int);
-extern ssize_t generic_file_splice_write_nolock(struct pipe_inode_info *,
- struct file *, loff_t *, size_t, unsigned int);
extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
struct file *out, loff_t *, size_t len, unsigned int flags);
extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
@@ -2072,9 +2290,8 @@ extern int vfs_readdir(struct file *, filldir_t, void *);
extern int vfs_stat(char __user *, struct kstat *);
extern int vfs_lstat(char __user *, struct kstat *);
-extern int vfs_stat_fd(int dfd, char __user *, struct kstat *);
-extern int vfs_lstat_fd(int dfd, char __user *, struct kstat *);
extern int vfs_fstat(unsigned int, struct kstat *);
+extern int vfs_fstatat(int , char __user *, struct kstat *, int);
extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
unsigned long arg);
@@ -2127,6 +2344,8 @@ extern void simple_release_fs(struct vfsmount **mount, int *count);
extern ssize_t simple_read_from_buffer(void __user *to, size_t count,
loff_t *ppos, const void *from, size_t available);
+extern int simple_fsync(struct file *, struct dentry *, int);
+
#ifdef CONFIG_MIGRATION
extern int buffer_migrate_page(struct address_space *,
struct page *, struct page *);
@@ -2141,6 +2360,7 @@ extern void file_update_time(struct file *file);
extern int generic_show_options(struct seq_file *m, struct vfsmount *mnt);
extern void save_mount_options(struct super_block *sb, char *options);
+extern void replace_mount_options(struct super_block *sb, char *options);
static inline ino_t parent_ino(struct dentry *dentry)
{
@@ -2171,19 +2391,7 @@ ssize_t simple_transaction_read(struct file *file, char __user *buf,
size_t size, loff_t *pos);
int simple_transaction_release(struct inode *inode, struct file *file);
-static inline void simple_transaction_set(struct file *file, size_t n)
-{
- struct simple_transaction_argresp *ar = file->private_data;
-
- BUG_ON(n > SIMPLE_TRANSACTION_LIMIT);
-
- /*
- * The barrier ensures that ar->size will really remain zero until
- * ar->data is ready for reading.
- */
- smp_mb();
- ar->size = n;
-}
+void simple_transaction_set(struct file *file, size_t n);
/*
* simple attribute files
@@ -2230,32 +2438,11 @@ ssize_t simple_attr_read(struct file *file, char __user *buf,
ssize_t simple_attr_write(struct file *file, const char __user *buf,
size_t len, loff_t *ppos);
-
-#ifdef CONFIG_SECURITY
-static inline char *alloc_secdata(void)
-{
- return (char *)get_zeroed_page(GFP_KERNEL);
-}
-
-static inline void free_secdata(void *secdata)
-{
- free_page((unsigned long)secdata);
-}
-#else
-static inline char *alloc_secdata(void)
-{
- return (char *)1;
-}
-
-static inline void free_secdata(void *secdata)
-{ }
-#endif /* CONFIG_SECURITY */
-
struct ctl_table;
int proc_nr_files(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos);
-int get_filesystem_list(char * buf);
+int __init get_filesystem_list(char *buf);
#endif /* __KERNEL__ */
#endif /* _LINUX_FS_H */
diff --git a/include/linux/fs_enet_pd.h b/include/linux/fs_enet_pd.h
index 8300cab30f9..51b793466ff 100644
--- a/include/linux/fs_enet_pd.h
+++ b/include/linux/fs_enet_pd.h
@@ -17,6 +17,7 @@
#define FS_ENET_PD_H
#include <linux/string.h>
+#include <linux/of_mdio.h>
#include <asm/types.h>
#define FS_ENET_NAME "fs_enet"
@@ -130,10 +131,7 @@ struct fs_platform_info {
u32 device_flags;
- int phy_addr; /* the phy address (-1 no phy) */
- char bus_id[16];
- int phy_irq; /* the phy irq (if it exists) */
-
+ struct device_node *phy_node;
const struct fs_mii_bus_info *bus_info;
int rx_ring, tx_ring; /* number of buffers on rx */
diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
index a97c053d3a9..78a05bfcd8e 100644
--- a/include/linux/fs_struct.h
+++ b/include/linux/fs_struct.h
@@ -4,9 +4,10 @@
#include <linux/path.h>
struct fs_struct {
- atomic_t count;
+ int users;
rwlock_t lock;
int umask;
+ int in_exec;
struct path root, pwd;
};
@@ -16,6 +17,8 @@ extern void exit_fs(struct task_struct *);
extern void set_fs_root(struct fs_struct *, struct path *);
extern void set_fs_pwd(struct fs_struct *, struct path *);
extern struct fs_struct *copy_fs_struct(struct fs_struct *);
-extern void put_fs_struct(struct fs_struct *);
+extern void free_fs_struct(struct fs_struct *);
+extern void daemonize_fs_struct(void);
+extern int unshare_fs_struct(void);
#endif /* _LINUX_FS_STRUCT_H */
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
new file mode 100644
index 00000000000..84d3532dd3e
--- /dev/null
+++ b/include/linux/fscache-cache.h
@@ -0,0 +1,505 @@
+/* General filesystem caching backing cache interface
+ *
+ * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * NOTE!!! See:
+ *
+ * Documentation/filesystems/caching/backend-api.txt
+ *
+ * for a description of the cache backend interface declared here.
+ */
+
+#ifndef _LINUX_FSCACHE_CACHE_H
+#define _LINUX_FSCACHE_CACHE_H
+
+#include <linux/fscache.h>
+#include <linux/sched.h>
+#include <linux/slow-work.h>
+
+#define NR_MAXCACHES BITS_PER_LONG
+
+struct fscache_cache;
+struct fscache_cache_ops;
+struct fscache_object;
+struct fscache_operation;
+
+/*
+ * cache tag definition
+ */
+struct fscache_cache_tag {
+ struct list_head link;
+ struct fscache_cache *cache; /* cache referred to by this tag */
+ unsigned long flags;
+#define FSCACHE_TAG_RESERVED 0 /* T if tag is reserved for a cache */
+ atomic_t usage;
+ char name[0]; /* tag name */
+};
+
+/*
+ * cache definition
+ */
+struct fscache_cache {
+ const struct fscache_cache_ops *ops;
+ struct fscache_cache_tag *tag; /* tag representing this cache */
+ struct kobject *kobj; /* system representation of this cache */
+ struct list_head link; /* link in list of caches */
+ size_t max_index_size; /* maximum size of index data */
+ char identifier[36]; /* cache label */
+
+ /* node management */
+ struct work_struct op_gc; /* operation garbage collector */
+ struct list_head object_list; /* list of data/index objects */
+ struct list_head op_gc_list; /* list of ops to be deleted */
+ spinlock_t object_list_lock;
+ spinlock_t op_gc_list_lock;
+ atomic_t object_count; /* no. of live objects in this cache */
+ struct fscache_object *fsdef; /* object for the fsdef index */
+ unsigned long flags;
+#define FSCACHE_IOERROR 0 /* cache stopped on I/O error */
+#define FSCACHE_CACHE_WITHDRAWN 1 /* cache has been withdrawn */
+};
+
+extern wait_queue_head_t fscache_cache_cleared_wq;
+
+/*
+ * operation to be applied to a cache object
+ * - retrieval initiation operations are done in the context of the process
+ * that issued them, and not in an async thread pool
+ */
+typedef void (*fscache_operation_release_t)(struct fscache_operation *op);
+typedef void (*fscache_operation_processor_t)(struct fscache_operation *op);
+
+struct fscache_operation {
+ union {
+ struct work_struct fast_work; /* record for fast ops */
+ struct slow_work slow_work; /* record for (very) slow ops */
+ };
+ struct list_head pend_link; /* link in object->pending_ops */
+ struct fscache_object *object; /* object to be operated upon */
+
+ unsigned long flags;
+#define FSCACHE_OP_TYPE 0x000f /* operation type */
+#define FSCACHE_OP_FAST 0x0001 /* - fast op, processor may not sleep for disk */
+#define FSCACHE_OP_SLOW 0x0002 /* - (very) slow op, processor may sleep for disk */
+#define FSCACHE_OP_MYTHREAD 0x0003 /* - processing is done be issuing thread, not pool */
+#define FSCACHE_OP_WAITING 4 /* cleared when op is woken */
+#define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */
+#define FSCACHE_OP_DEAD 6 /* op is now dead */
+
+ atomic_t usage;
+ unsigned debug_id; /* debugging ID */
+
+ /* operation processor callback
+ * - can be NULL if FSCACHE_OP_WAITING is going to be used to perform
+ * the op in a non-pool thread */
+ fscache_operation_processor_t processor;
+
+ /* operation releaser */
+ fscache_operation_release_t release;
+};
+
+extern atomic_t fscache_op_debug_id;
+extern const struct slow_work_ops fscache_op_slow_work_ops;
+
+extern void fscache_enqueue_operation(struct fscache_operation *);
+extern void fscache_put_operation(struct fscache_operation *);
+
+/**
+ * fscache_operation_init - Do basic initialisation of an operation
+ * @op: The operation to initialise
+ * @release: The release function to assign
+ *
+ * Do basic initialisation of an operation. The caller must still set flags,
+ * object, either fast_work or slow_work if necessary, and processor if needed.
+ */
+static inline void fscache_operation_init(struct fscache_operation *op,
+ fscache_operation_release_t release)
+{
+ atomic_set(&op->usage, 1);
+ op->debug_id = atomic_inc_return(&fscache_op_debug_id);
+ op->release = release;
+ INIT_LIST_HEAD(&op->pend_link);
+}
+
+/**
+ * fscache_operation_init_slow - Do additional initialisation of a slow op
+ * @op: The operation to initialise
+ * @processor: The processor function to assign
+ *
+ * Do additional initialisation of an operation as required for slow work.
+ */
+static inline
+void fscache_operation_init_slow(struct fscache_operation *op,
+ fscache_operation_processor_t processor)
+{
+ op->processor = processor;
+ slow_work_init(&op->slow_work, &fscache_op_slow_work_ops);
+}
+
+/*
+ * data read operation
+ */
+struct fscache_retrieval {
+ struct fscache_operation op;
+ struct address_space *mapping; /* netfs pages */
+ fscache_rw_complete_t end_io_func; /* function to call on I/O completion */
+ void *context; /* netfs read context (pinned) */
+ struct list_head to_do; /* list of things to be done by the backend */
+ unsigned long start_time; /* time at which retrieval started */
+};
+
+typedef int (*fscache_page_retrieval_func_t)(struct fscache_retrieval *op,
+ struct page *page,
+ gfp_t gfp);
+
+typedef int (*fscache_pages_retrieval_func_t)(struct fscache_retrieval *op,
+ struct list_head *pages,
+ unsigned *nr_pages,
+ gfp_t gfp);
+
+/**
+ * fscache_get_retrieval - Get an extra reference on a retrieval operation
+ * @op: The retrieval operation to get a reference on
+ *
+ * Get an extra reference on a retrieval operation.
+ */
+static inline
+struct fscache_retrieval *fscache_get_retrieval(struct fscache_retrieval *op)
+{
+ atomic_inc(&op->op.usage);
+ return op;
+}
+
+/**
+ * fscache_enqueue_retrieval - Enqueue a retrieval operation for processing
+ * @op: The retrieval operation affected
+ *
+ * Enqueue a retrieval operation for processing by the FS-Cache thread pool.
+ */
+static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op)
+{
+ fscache_enqueue_operation(&op->op);
+}
+
+/**
+ * fscache_put_retrieval - Drop a reference to a retrieval operation
+ * @op: The retrieval operation affected
+ *
+ * Drop a reference to a retrieval operation.
+ */
+static inline void fscache_put_retrieval(struct fscache_retrieval *op)
+{
+ fscache_put_operation(&op->op);
+}
+
+/*
+ * cached page storage work item
+ * - used to do three things:
+ * - batch writes to the cache
+ * - do cache writes asynchronously
+ * - defer writes until cache object lookup completion
+ */
+struct fscache_storage {
+ struct fscache_operation op;
+ pgoff_t store_limit; /* don't write more than this */
+};
+
+/*
+ * cache operations
+ */
+struct fscache_cache_ops {
+ /* name of cache provider */
+ const char *name;
+
+ /* allocate an object record for a cookie */
+ struct fscache_object *(*alloc_object)(struct fscache_cache *cache,
+ struct fscache_cookie *cookie);
+
+ /* look up the object for a cookie */
+ void (*lookup_object)(struct fscache_object *object);
+
+ /* finished looking up */
+ void (*lookup_complete)(struct fscache_object *object);
+
+ /* increment the usage count on this object (may fail if unmounting) */
+ struct fscache_object *(*grab_object)(struct fscache_object *object);
+
+ /* pin an object in the cache */
+ int (*pin_object)(struct fscache_object *object);
+
+ /* unpin an object in the cache */
+ void (*unpin_object)(struct fscache_object *object);
+
+ /* store the updated auxilliary data on an object */
+ void (*update_object)(struct fscache_object *object);
+
+ /* discard the resources pinned by an object and effect retirement if
+ * necessary */
+ void (*drop_object)(struct fscache_object *object);
+
+ /* dispose of a reference to an object */
+ void (*put_object)(struct fscache_object *object);
+
+ /* sync a cache */
+ void (*sync_cache)(struct fscache_cache *cache);
+
+ /* notification that the attributes of a non-index object (such as
+ * i_size) have changed */
+ int (*attr_changed)(struct fscache_object *object);
+
+ /* reserve space for an object's data and associated metadata */
+ int (*reserve_space)(struct fscache_object *object, loff_t i_size);
+
+ /* request a backing block for a page be read or allocated in the
+ * cache */
+ fscache_page_retrieval_func_t read_or_alloc_page;
+
+ /* request backing blocks for a list of pages be read or allocated in
+ * the cache */
+ fscache_pages_retrieval_func_t read_or_alloc_pages;
+
+ /* request a backing block for a page be allocated in the cache so that
+ * it can be written directly */
+ fscache_page_retrieval_func_t allocate_page;
+
+ /* request backing blocks for pages be allocated in the cache so that
+ * they can be written directly */
+ fscache_pages_retrieval_func_t allocate_pages;
+
+ /* write a page to its backing block in the cache */
+ int (*write_page)(struct fscache_storage *op, struct page *page);
+
+ /* detach backing block from a page (optional)
+ * - must release the cookie lock before returning
+ * - may sleep
+ */
+ void (*uncache_page)(struct fscache_object *object,
+ struct page *page);
+
+ /* dissociate a cache from all the pages it was backing */
+ void (*dissociate_pages)(struct fscache_cache *cache);
+};
+
+/*
+ * data file or index object cookie
+ * - a file will only appear in one cache
+ * - a request to cache a file may or may not be honoured, subject to
+ * constraints such as disk space
+ * - indices are created on disk just-in-time
+ */
+struct fscache_cookie {
+ atomic_t usage; /* number of users of this cookie */
+ atomic_t n_children; /* number of children of this cookie */
+ spinlock_t lock;
+ struct hlist_head backing_objects; /* object(s) backing this file/index */
+ const struct fscache_cookie_def *def; /* definition */
+ struct fscache_cookie *parent; /* parent of this entry */
+ void *netfs_data; /* back pointer to netfs */
+ struct radix_tree_root stores; /* pages to be stored on this cookie */
+#define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */
+
+ unsigned long flags;
+#define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */
+#define FSCACHE_COOKIE_CREATING 1 /* T if non-index object being created still */
+#define FSCACHE_COOKIE_NO_DATA_YET 2 /* T if new object with no cached data yet */
+#define FSCACHE_COOKIE_PENDING_FILL 3 /* T if pending initial fill on object */
+#define FSCACHE_COOKIE_FILLING 4 /* T if filling object incrementally */
+#define FSCACHE_COOKIE_UNAVAILABLE 5 /* T if cookie is unavailable (error, etc) */
+};
+
+extern struct fscache_cookie fscache_fsdef_index;
+
+/*
+ * on-disk cache file or index handle
+ */
+struct fscache_object {
+ enum fscache_object_state {
+ FSCACHE_OBJECT_INIT, /* object in initial unbound state */
+ FSCACHE_OBJECT_LOOKING_UP, /* looking up object */
+ FSCACHE_OBJECT_CREATING, /* creating object */
+
+ /* active states */
+ FSCACHE_OBJECT_AVAILABLE, /* cleaning up object after creation */
+ FSCACHE_OBJECT_ACTIVE, /* object is usable */
+ FSCACHE_OBJECT_UPDATING, /* object is updating */
+
+ /* terminal states */
+ FSCACHE_OBJECT_DYING, /* object waiting for accessors to finish */
+ FSCACHE_OBJECT_LC_DYING, /* object cleaning up after lookup/create */
+ FSCACHE_OBJECT_ABORT_INIT, /* abort the init state */
+ FSCACHE_OBJECT_RELEASING, /* releasing object */
+ FSCACHE_OBJECT_RECYCLING, /* retiring object */
+ FSCACHE_OBJECT_WITHDRAWING, /* withdrawing object */
+ FSCACHE_OBJECT_DEAD, /* object is now dead */
+ } state;
+
+ int debug_id; /* debugging ID */
+ int n_children; /* number of child objects */
+ int n_ops; /* number of ops outstanding on object */
+ int n_obj_ops; /* number of object ops outstanding on object */
+ int n_in_progress; /* number of ops in progress */
+ int n_exclusive; /* number of exclusive ops queued */
+ spinlock_t lock; /* state and operations lock */
+
+ unsigned long lookup_jif; /* time at which lookup started */
+ unsigned long event_mask; /* events this object is interested in */
+ unsigned long events; /* events to be processed by this object
+ * (order is important - using fls) */
+#define FSCACHE_OBJECT_EV_REQUEUE 0 /* T if object should be requeued */
+#define FSCACHE_OBJECT_EV_UPDATE 1 /* T if object should be updated */
+#define FSCACHE_OBJECT_EV_CLEARED 2 /* T if accessors all gone */
+#define FSCACHE_OBJECT_EV_ERROR 3 /* T if fatal error occurred during processing */
+#define FSCACHE_OBJECT_EV_RELEASE 4 /* T if netfs requested object release */
+#define FSCACHE_OBJECT_EV_RETIRE 5 /* T if netfs requested object retirement */
+#define FSCACHE_OBJECT_EV_WITHDRAW 6 /* T if cache requested object withdrawal */
+
+ unsigned long flags;
+#define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */
+#define FSCACHE_OBJECT_PENDING_WRITE 1 /* T if object has pending write */
+#define FSCACHE_OBJECT_WAITING 2 /* T if object is waiting on its parent */
+
+ struct list_head cache_link; /* link in cache->object_list */
+ struct hlist_node cookie_link; /* link in cookie->backing_objects */
+ struct fscache_cache *cache; /* cache that supplied this object */
+ struct fscache_cookie *cookie; /* netfs's file/index object */
+ struct fscache_object *parent; /* parent object */
+ struct slow_work work; /* attention scheduling record */
+ struct list_head dependents; /* FIFO of dependent objects */
+ struct list_head dep_link; /* link in parent's dependents list */
+ struct list_head pending_ops; /* unstarted operations on this object */
+ pgoff_t store_limit; /* current storage limit */
+};
+
+extern const char *fscache_object_states[];
+
+#define fscache_object_is_active(obj) \
+ (!test_bit(FSCACHE_IOERROR, &(obj)->cache->flags) && \
+ (obj)->state >= FSCACHE_OBJECT_AVAILABLE && \
+ (obj)->state < FSCACHE_OBJECT_DYING)
+
+extern const struct slow_work_ops fscache_object_slow_work_ops;
+
+/**
+ * fscache_object_init - Initialise a cache object description
+ * @object: Object description
+ *
+ * Initialise a cache object description to its basic values.
+ *
+ * See Documentation/filesystems/caching/backend-api.txt for a complete
+ * description.
+ */
+static inline
+void fscache_object_init(struct fscache_object *object,
+ struct fscache_cookie *cookie,
+ struct fscache_cache *cache)
+{
+ atomic_inc(&cache->object_count);
+
+ object->state = FSCACHE_OBJECT_INIT;
+ spin_lock_init(&object->lock);
+ INIT_LIST_HEAD(&object->cache_link);
+ INIT_HLIST_NODE(&object->cookie_link);
+ vslow_work_init(&object->work, &fscache_object_slow_work_ops);
+ INIT_LIST_HEAD(&object->dependents);
+ INIT_LIST_HEAD(&object->dep_link);
+ INIT_LIST_HEAD(&object->pending_ops);
+ object->n_children = 0;
+ object->n_ops = object->n_in_progress = object->n_exclusive = 0;
+ object->events = object->event_mask = 0;
+ object->flags = 0;
+ object->store_limit = 0;
+ object->cache = cache;
+ object->cookie = cookie;
+ object->parent = NULL;
+}
+
+extern void fscache_object_lookup_negative(struct fscache_object *object);
+extern void fscache_obtained_object(struct fscache_object *object);
+
+/**
+ * fscache_object_destroyed - Note destruction of an object in a cache
+ * @cache: The cache from which the object came
+ *
+ * Note the destruction and deallocation of an object record in a cache.
+ */
+static inline void fscache_object_destroyed(struct fscache_cache *cache)
+{
+ if (atomic_dec_and_test(&cache->object_count))
+ wake_up_all(&fscache_cache_cleared_wq);
+}
+
+/**
+ * fscache_object_lookup_error - Note an object encountered an error
+ * @object: The object on which the error was encountered
+ *
+ * Note that an object encountered a fatal error (usually an I/O error) and
+ * that it should be withdrawn as soon as possible.
+ */
+static inline void fscache_object_lookup_error(struct fscache_object *object)
+{
+ set_bit(FSCACHE_OBJECT_EV_ERROR, &object->events);
+}
+
+/**
+ * fscache_set_store_limit - Set the maximum size to be stored in an object
+ * @object: The object to set the maximum on
+ * @i_size: The limit to set in bytes
+ *
+ * Set the maximum size an object is permitted to reach, implying the highest
+ * byte that may be written. Intended to be called by the attr_changed() op.
+ *
+ * See Documentation/filesystems/caching/backend-api.txt for a complete
+ * description.
+ */
+static inline
+void fscache_set_store_limit(struct fscache_object *object, loff_t i_size)
+{
+ object->store_limit = i_size >> PAGE_SHIFT;
+ if (i_size & ~PAGE_MASK)
+ object->store_limit++;
+}
+
+/**
+ * fscache_end_io - End a retrieval operation on a page
+ * @op: The FS-Cache operation covering the retrieval
+ * @page: The page that was to be fetched
+ * @error: The error code (0 if successful)
+ *
+ * Note the end of an operation to retrieve a page, as covered by a particular
+ * operation record.
+ */
+static inline void fscache_end_io(struct fscache_retrieval *op,
+ struct page *page, int error)
+{
+ op->end_io_func(page, op->context, error);
+}
+
+/*
+ * out-of-line cache backend functions
+ */
+extern void fscache_init_cache(struct fscache_cache *cache,
+ const struct fscache_cache_ops *ops,
+ const char *idfmt,
+ ...) __attribute__ ((format (printf, 3, 4)));
+
+extern int fscache_add_cache(struct fscache_cache *cache,
+ struct fscache_object *fsdef,
+ const char *tagname);
+extern void fscache_withdraw_cache(struct fscache_cache *cache);
+
+extern void fscache_io_error(struct fscache_cache *cache);
+
+extern void fscache_mark_pages_cached(struct fscache_retrieval *op,
+ struct pagevec *pagevec);
+
+extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
+ const void *data,
+ uint16_t datalen);
+
+#endif /* _LINUX_FSCACHE_CACHE_H */
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
new file mode 100644
index 00000000000..6d8ee466e0a
--- /dev/null
+++ b/include/linux/fscache.h
@@ -0,0 +1,618 @@
+/* General filesystem caching interface
+ *
+ * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * NOTE!!! See:
+ *
+ * Documentation/filesystems/caching/netfs-api.txt
+ *
+ * for a description of the network filesystem interface declared here.
+ */
+
+#ifndef _LINUX_FSCACHE_H
+#define _LINUX_FSCACHE_H
+
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/pagemap.h>
+#include <linux/pagevec.h>
+
+#if defined(CONFIG_FSCACHE) || defined(CONFIG_FSCACHE_MODULE)
+#define fscache_available() (1)
+#define fscache_cookie_valid(cookie) (cookie)
+#else
+#define fscache_available() (0)
+#define fscache_cookie_valid(cookie) (0)
+#endif
+
+
+/*
+ * overload PG_private_2 to give us PG_fscache - this is used to indicate that
+ * a page is currently backed by a local disk cache
+ */
+#define PageFsCache(page) PagePrivate2((page))
+#define SetPageFsCache(page) SetPagePrivate2((page))
+#define ClearPageFsCache(page) ClearPagePrivate2((page))
+#define TestSetPageFsCache(page) TestSetPagePrivate2((page))
+#define TestClearPageFsCache(page) TestClearPagePrivate2((page))
+
+/* pattern used to fill dead space in an index entry */
+#define FSCACHE_INDEX_DEADFILL_PATTERN 0x79
+
+struct pagevec;
+struct fscache_cache_tag;
+struct fscache_cookie;
+struct fscache_netfs;
+
+typedef void (*fscache_rw_complete_t)(struct page *page,
+ void *context,
+ int error);
+
+/* result of index entry consultation */
+enum fscache_checkaux {
+ FSCACHE_CHECKAUX_OKAY, /* entry okay as is */
+ FSCACHE_CHECKAUX_NEEDS_UPDATE, /* entry requires update */
+ FSCACHE_CHECKAUX_OBSOLETE, /* entry requires deletion */
+};
+
+/*
+ * fscache cookie definition
+ */
+struct fscache_cookie_def {
+ /* name of cookie type */
+ char name[16];
+
+ /* cookie type */
+ uint8_t type;
+#define FSCACHE_COOKIE_TYPE_INDEX 0
+#define FSCACHE_COOKIE_TYPE_DATAFILE 1
+
+ /* select the cache into which to insert an entry in this index
+ * - optional
+ * - should return a cache identifier or NULL to cause the cache to be
+ * inherited from the parent if possible or the first cache picked
+ * for a non-index file if not
+ */
+ struct fscache_cache_tag *(*select_cache)(
+ const void *parent_netfs_data,
+ const void *cookie_netfs_data);
+
+ /* get an index key
+ * - should store the key data in the buffer
+ * - should return the amount of amount stored
+ * - not permitted to return an error
+ * - the netfs data from the cookie being used as the source is
+ * presented
+ */
+ uint16_t (*get_key)(const void *cookie_netfs_data,
+ void *buffer,
+ uint16_t bufmax);
+
+ /* get certain file attributes from the netfs data
+ * - this function can be absent for an index
+ * - not permitted to return an error
+ * - the netfs data from the cookie being used as the source is
+ * presented
+ */
+ void (*get_attr)(const void *cookie_netfs_data, uint64_t *size);
+
+ /* get the auxilliary data from netfs data
+ * - this function can be absent if the index carries no state data
+ * - should store the auxilliary data in the buffer
+ * - should return the amount of amount stored
+ * - not permitted to return an error
+ * - the netfs data from the cookie being used as the source is
+ * presented
+ */
+ uint16_t (*get_aux)(const void *cookie_netfs_data,
+ void *buffer,
+ uint16_t bufmax);
+
+ /* consult the netfs about the state of an object
+ * - this function can be absent if the index carries no state data
+ * - the netfs data from the cookie being used as the target is
+ * presented, as is the auxilliary data
+ */
+ enum fscache_checkaux (*check_aux)(void *cookie_netfs_data,
+ const void *data,
+ uint16_t datalen);
+
+ /* get an extra reference on a read context
+ * - this function can be absent if the completion function doesn't
+ * require a context
+ */
+ void (*get_context)(void *cookie_netfs_data, void *context);
+
+ /* release an extra reference on a read context
+ * - this function can be absent if the completion function doesn't
+ * require a context
+ */
+ void (*put_context)(void *cookie_netfs_data, void *context);
+
+ /* indicate pages that now have cache metadata retained
+ * - this function should mark the specified pages as now being cached
+ * - the pages will have been marked with PG_fscache before this is
+ * called, so this is optional
+ */
+ void (*mark_pages_cached)(void *cookie_netfs_data,
+ struct address_space *mapping,
+ struct pagevec *cached_pvec);
+
+ /* indicate the cookie is no longer cached
+ * - this function is called when the backing store currently caching
+ * a cookie is removed
+ * - the netfs should use this to clean up any markers indicating
+ * cached pages
+ * - this is mandatory for any object that may have data
+ */
+ void (*now_uncached)(void *cookie_netfs_data);
+};
+
+/*
+ * fscache cached network filesystem type
+ * - name, version and ops must be filled in before registration
+ * - all other fields will be set during registration
+ */
+struct fscache_netfs {
+ uint32_t version; /* indexing version */
+ const char *name; /* filesystem name */
+ struct fscache_cookie *primary_index;
+ struct list_head link; /* internal link */
+};
+
+/*
+ * slow-path functions for when there is actually caching available, and the
+ * netfs does actually have a valid token
+ * - these are not to be called directly
+ * - these are undefined symbols when FS-Cache is not configured and the
+ * optimiser takes care of not using them
+ */
+extern int __fscache_register_netfs(struct fscache_netfs *);
+extern void __fscache_unregister_netfs(struct fscache_netfs *);
+extern struct fscache_cache_tag *__fscache_lookup_cache_tag(const char *);
+extern void __fscache_release_cache_tag(struct fscache_cache_tag *);
+
+extern struct fscache_cookie *__fscache_acquire_cookie(
+ struct fscache_cookie *,
+ const struct fscache_cookie_def *,
+ void *);
+extern void __fscache_relinquish_cookie(struct fscache_cookie *, int);
+extern void __fscache_update_cookie(struct fscache_cookie *);
+extern int __fscache_attr_changed(struct fscache_cookie *);
+extern int __fscache_read_or_alloc_page(struct fscache_cookie *,
+ struct page *,
+ fscache_rw_complete_t,
+ void *,
+ gfp_t);
+extern int __fscache_read_or_alloc_pages(struct fscache_cookie *,
+ struct address_space *,
+ struct list_head *,
+ unsigned *,
+ fscache_rw_complete_t,
+ void *,
+ gfp_t);
+extern int __fscache_alloc_page(struct fscache_cookie *, struct page *, gfp_t);
+extern int __fscache_write_page(struct fscache_cookie *, struct page *, gfp_t);
+extern void __fscache_uncache_page(struct fscache_cookie *, struct page *);
+extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *);
+extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *);
+
+/**
+ * fscache_register_netfs - Register a filesystem as desiring caching services
+ * @netfs: The description of the filesystem
+ *
+ * Register a filesystem as desiring caching services if they're available.
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * description.
+ */
+static inline
+int fscache_register_netfs(struct fscache_netfs *netfs)
+{
+ if (fscache_available())
+ return __fscache_register_netfs(netfs);
+ else
+ return 0;
+}
+
+/**
+ * fscache_unregister_netfs - Indicate that a filesystem no longer desires
+ * caching services
+ * @netfs: The description of the filesystem
+ *
+ * Indicate that a filesystem no longer desires caching services for the
+ * moment.
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * description.
+ */
+static inline
+void fscache_unregister_netfs(struct fscache_netfs *netfs)
+{
+ if (fscache_available())
+ __fscache_unregister_netfs(netfs);
+}
+
+/**
+ * fscache_lookup_cache_tag - Look up a cache tag
+ * @name: The name of the tag to search for
+ *
+ * Acquire a specific cache referral tag that can be used to select a specific
+ * cache in which to cache an index.
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * description.
+ */
+static inline
+struct fscache_cache_tag *fscache_lookup_cache_tag(const char *name)
+{
+ if (fscache_available())
+ return __fscache_lookup_cache_tag(name);
+ else
+ return NULL;
+}
+
+/**
+ * fscache_release_cache_tag - Release a cache tag
+ * @tag: The tag to release
+ *
+ * Release a reference to a cache referral tag previously looked up.
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * description.
+ */
+static inline
+void fscache_release_cache_tag(struct fscache_cache_tag *tag)
+{
+ if (fscache_available())
+ __fscache_release_cache_tag(tag);
+}
+
+/**
+ * fscache_acquire_cookie - Acquire a cookie to represent a cache object
+ * @parent: The cookie that's to be the parent of this one
+ * @def: A description of the cache object, including callback operations
+ * @netfs_data: An arbitrary piece of data to be kept in the cookie to
+ * represent the cache object to the netfs
+ *
+ * This function is used to inform FS-Cache about part of an index hierarchy
+ * that can be used to locate files. This is done by requesting a cookie for
+ * each index in the path to the file.
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * description.
+ */
+static inline
+struct fscache_cookie *fscache_acquire_cookie(
+ struct fscache_cookie *parent,
+ const struct fscache_cookie_def *def,
+ void *netfs_data)
+{
+ if (fscache_cookie_valid(parent))
+ return __fscache_acquire_cookie(parent, def, netfs_data);
+ else
+ return NULL;
+}
+
+/**
+ * fscache_relinquish_cookie - Return the cookie to the cache, maybe discarding
+ * it
+ * @cookie: The cookie being returned
+ * @retire: True if the cache object the cookie represents is to be discarded
+ *
+ * This function returns a cookie to the cache, forcibly discarding the
+ * associated cache object if retire is set to true.
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * description.
+ */
+static inline
+void fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
+{
+ if (fscache_cookie_valid(cookie))
+ __fscache_relinquish_cookie(cookie, retire);
+}
+
+/**
+ * fscache_update_cookie - Request that a cache object be updated
+ * @cookie: The cookie representing the cache object
+ *
+ * Request an update of the index data for the cache object associated with the
+ * cookie.
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * description.
+ */
+static inline
+void fscache_update_cookie(struct fscache_cookie *cookie)
+{
+ if (fscache_cookie_valid(cookie))
+ __fscache_update_cookie(cookie);
+}
+
+/**
+ * fscache_pin_cookie - Pin a data-storage cache object in its cache
+ * @cookie: The cookie representing the cache object
+ *
+ * Permit data-storage cache objects to be pinned in the cache.
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * description.
+ */
+static inline
+int fscache_pin_cookie(struct fscache_cookie *cookie)
+{
+ return -ENOBUFS;
+}
+
+/**
+ * fscache_pin_cookie - Unpin a data-storage cache object in its cache
+ * @cookie: The cookie representing the cache object
+ *
+ * Permit data-storage cache objects to be unpinned from the cache.
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * description.
+ */
+static inline
+void fscache_unpin_cookie(struct fscache_cookie *cookie)
+{
+}
+
+/**
+ * fscache_attr_changed - Notify cache that an object's attributes changed
+ * @cookie: The cookie representing the cache object
+ *
+ * Send a notification to the cache indicating that an object's attributes have
+ * changed. This includes the data size. These attributes will be obtained
+ * through the get_attr() cookie definition op.
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * description.
+ */
+static inline
+int fscache_attr_changed(struct fscache_cookie *cookie)
+{
+ if (fscache_cookie_valid(cookie))
+ return __fscache_attr_changed(cookie);
+ else
+ return -ENOBUFS;
+}
+
+/**
+ * fscache_reserve_space - Reserve data space for a cached object
+ * @cookie: The cookie representing the cache object
+ * @i_size: The amount of space to be reserved
+ *
+ * Reserve an amount of space in the cache for the cache object attached to a
+ * cookie so that a write to that object within the space can always be
+ * honoured.
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * description.
+ */
+static inline
+int fscache_reserve_space(struct fscache_cookie *cookie, loff_t size)
+{
+ return -ENOBUFS;
+}
+
+/**
+ * fscache_read_or_alloc_page - Read a page from the cache or allocate a block
+ * in which to store it
+ * @cookie: The cookie representing the cache object
+ * @page: The netfs page to fill if possible
+ * @end_io_func: The callback to invoke when and if the page is filled
+ * @context: An arbitrary piece of data to pass on to end_io_func()
+ * @gfp: The conditions under which memory allocation should be made
+ *
+ * Read a page from the cache, or if that's not possible make a potential
+ * one-block reservation in the cache into which the page may be stored once
+ * fetched from the server.
+ *
+ * If the page is not backed by the cache object, or if it there's some reason
+ * it can't be, -ENOBUFS will be returned and nothing more will be done for
+ * that page.
+ *
+ * Else, if that page is backed by the cache, a read will be initiated directly
+ * to the netfs's page and 0 will be returned by this function. The
+ * end_io_func() callback will be invoked when the operation terminates on a
+ * completion or failure. Note that the callback may be invoked before the
+ * return.
+ *
+ * Else, if the page is unbacked, -ENODATA is returned and a block may have
+ * been allocated in the cache.
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * description.
+ */
+static inline
+int fscache_read_or_alloc_page(struct fscache_cookie *cookie,
+ struct page *page,
+ fscache_rw_complete_t end_io_func,
+ void *context,
+ gfp_t gfp)
+{
+ if (fscache_cookie_valid(cookie))
+ return __fscache_read_or_alloc_page(cookie, page, end_io_func,
+ context, gfp);
+ else
+ return -ENOBUFS;
+}
+
+/**
+ * fscache_read_or_alloc_pages - Read pages from the cache and/or allocate
+ * blocks in which to store them
+ * @cookie: The cookie representing the cache object
+ * @mapping: The netfs inode mapping to which the pages will be attached
+ * @pages: A list of potential netfs pages to be filled
+ * @end_io_func: The callback to invoke when and if each page is filled
+ * @context: An arbitrary piece of data to pass on to end_io_func()
+ * @gfp: The conditions under which memory allocation should be made
+ *
+ * Read a set of pages from the cache, or if that's not possible, attempt to
+ * make a potential one-block reservation for each page in the cache into which
+ * that page may be stored once fetched from the server.
+ *
+ * If some pages are not backed by the cache object, or if it there's some
+ * reason they can't be, -ENOBUFS will be returned and nothing more will be
+ * done for that pages.
+ *
+ * Else, if some of the pages are backed by the cache, a read will be initiated
+ * directly to the netfs's page and 0 will be returned by this function. The
+ * end_io_func() callback will be invoked when the operation terminates on a
+ * completion or failure. Note that the callback may be invoked before the
+ * return.
+ *
+ * Else, if a page is unbacked, -ENODATA is returned and a block may have
+ * been allocated in the cache.
+ *
+ * Because the function may want to return all of -ENOBUFS, -ENODATA and 0 in
+ * regard to different pages, the return values are prioritised in that order.
+ * Any pages submitted for reading are removed from the pages list.
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * description.
+ */
+static inline
+int fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
+ struct address_space *mapping,
+ struct list_head *pages,
+ unsigned *nr_pages,
+ fscache_rw_complete_t end_io_func,
+ void *context,
+ gfp_t gfp)
+{
+ if (fscache_cookie_valid(cookie))
+ return __fscache_read_or_alloc_pages(cookie, mapping, pages,
+ nr_pages, end_io_func,
+ context, gfp);
+ else
+ return -ENOBUFS;
+}
+
+/**
+ * fscache_alloc_page - Allocate a block in which to store a page
+ * @cookie: The cookie representing the cache object
+ * @page: The netfs page to allocate a page for
+ * @gfp: The conditions under which memory allocation should be made
+ *
+ * Request Allocation a block in the cache in which to store a netfs page
+ * without retrieving any contents from the cache.
+ *
+ * If the page is not backed by a file then -ENOBUFS will be returned and
+ * nothing more will be done, and no reservation will be made.
+ *
+ * Else, a block will be allocated if one wasn't already, and 0 will be
+ * returned
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * description.
+ */
+static inline
+int fscache_alloc_page(struct fscache_cookie *cookie,
+ struct page *page,
+ gfp_t gfp)
+{
+ if (fscache_cookie_valid(cookie))
+ return __fscache_alloc_page(cookie, page, gfp);
+ else
+ return -ENOBUFS;
+}
+
+/**
+ * fscache_write_page - Request storage of a page in the cache
+ * @cookie: The cookie representing the cache object
+ * @page: The netfs page to store
+ * @gfp: The conditions under which memory allocation should be made
+ *
+ * Request the contents of the netfs page be written into the cache. This
+ * request may be ignored if no cache block is currently allocated, in which
+ * case it will return -ENOBUFS.
+ *
+ * If a cache block was already allocated, a write will be initiated and 0 will
+ * be returned. The PG_fscache_write page bit is set immediately and will then
+ * be cleared at the completion of the write to indicate the success or failure
+ * of the operation. Note that the completion may happen before the return.
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * description.
+ */
+static inline
+int fscache_write_page(struct fscache_cookie *cookie,
+ struct page *page,
+ gfp_t gfp)
+{
+ if (fscache_cookie_valid(cookie))
+ return __fscache_write_page(cookie, page, gfp);
+ else
+ return -ENOBUFS;
+}
+
+/**
+ * fscache_uncache_page - Indicate that caching is no longer required on a page
+ * @cookie: The cookie representing the cache object
+ * @page: The netfs page that was being cached.
+ *
+ * Tell the cache that we no longer want a page to be cached and that it should
+ * remove any knowledge of the netfs page it may have.
+ *
+ * Note that this cannot cancel any outstanding I/O operations between this
+ * page and the cache.
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * description.
+ */
+static inline
+void fscache_uncache_page(struct fscache_cookie *cookie,
+ struct page *page)
+{
+ if (fscache_cookie_valid(cookie))
+ __fscache_uncache_page(cookie, page);
+}
+
+/**
+ * fscache_check_page_write - Ask if a page is being writing to the cache
+ * @cookie: The cookie representing the cache object
+ * @page: The netfs page that is being cached.
+ *
+ * Ask the cache if a page is being written to the cache.
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * description.
+ */
+static inline
+bool fscache_check_page_write(struct fscache_cookie *cookie,
+ struct page *page)
+{
+ if (fscache_cookie_valid(cookie))
+ return __fscache_check_page_write(cookie, page);
+ return false;
+}
+
+/**
+ * fscache_wait_on_page_write - Wait for a page to complete writing to the cache
+ * @cookie: The cookie representing the cache object
+ * @page: The netfs page that is being cached.
+ *
+ * Ask the cache to wake us up when a page is no longer being written to the
+ * cache.
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * description.
+ */
+static inline
+void fscache_wait_on_page_write(struct fscache_cookie *cookie,
+ struct page *page)
+{
+ if (fscache_cookie_valid(cookie))
+ __fscache_wait_on_page_write(cookie, page);
+}
+
+#endif /* _LINUX_FSCACHE_H */
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index d9051d717d2..43fc95d822d 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -18,7 +18,6 @@
#define _FSL_DEVICE_H_
#include <linux/types.h>
-#include <linux/phy.h>
/*
* Some conventions on how we handle peripherals on Freescale chips
@@ -44,31 +43,6 @@
*
*/
-struct gianfar_platform_data {
- /* device specific information */
- u32 device_flags;
- char bus_id[BUS_ID_SIZE];
- phy_interface_t interface;
-};
-
-struct gianfar_mdio_data {
- /* board specific information */
- int irq[32];
-};
-
-/* Flags in gianfar_platform_data */
-#define FSL_GIANFAR_BRD_HAS_PHY_INTR 0x00000001 /* set or use a timer */
-#define FSL_GIANFAR_BRD_IS_REDUCED 0x00000002 /* Set if RGMII, RMII */
-
-struct fsl_i2c_platform_data {
- /* device specific information */
- u32 device_flags;
-};
-
-/* Flags related to I2C device features */
-#define FSL_I2C_DEV_SEPARATE_DFSRR 0x00000001
-#define FSL_I2C_DEV_CLOCK_5200 0x00000002
-
enum fsl_usb2_operating_modes {
FSL_USB2_MPH_HOST,
FSL_USB2_DR_HOST,
@@ -95,14 +69,15 @@ struct fsl_usb2_platform_data {
#define FSL_USB2_PORT0_ENABLED 0x00000001
#define FSL_USB2_PORT1_ENABLED 0x00000002
+struct spi_device;
+
struct fsl_spi_platform_data {
u32 initial_spmode; /* initial SPMODE value */
- u16 bus_num;
+ s16 bus_num;
bool qe_mode;
/* board specific information */
u16 max_chipselect;
- void (*activate_cs)(u8 cs, u8 polarity);
- void (*deactivate_cs)(u8 cs, u8 polarity);
+ void (*cs_control)(struct spi_device *spi, bool on);
u32 sysclk;
};
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index 00fbd5b245c..936f9aa8bb9 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -13,6 +13,7 @@
#include <linux/dnotify.h>
#include <linux/inotify.h>
+#include <linux/fsnotify_backend.h>
#include <linux/audit.h>
/*
@@ -22,19 +23,45 @@
static inline void fsnotify_d_instantiate(struct dentry *entry,
struct inode *inode)
{
+ __fsnotify_d_instantiate(entry, inode);
+
inotify_d_instantiate(entry, inode);
}
+/* Notify this dentry's parent about a child's events. */
+static inline void fsnotify_parent(struct dentry *dentry, __u32 mask)
+{
+ __fsnotify_parent(dentry, mask);
+
+ inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name);
+}
+
/*
* fsnotify_d_move - entry has been moved
* Called with dcache_lock and entry->d_lock held.
*/
static inline void fsnotify_d_move(struct dentry *entry)
{
+ /*
+ * On move we need to update entry->d_flags to indicate if the new parent
+ * cares about events from this entry.
+ */
+ __fsnotify_update_dcache_flags(entry);
+
inotify_d_move(entry);
}
/*
+ * fsnotify_link_count - inode's link count changed
+ */
+static inline void fsnotify_link_count(struct inode *inode)
+{
+ inotify_inode_queue_event(inode, IN_ATTRIB, 0, NULL, NULL);
+
+ fsnotify(inode, FS_ATTRIB, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
+}
+
+/*
* fsnotify_move - file old_name at old_dir was moved to new_name at new_dir
*/
static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
@@ -42,42 +69,62 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
int isdir, struct inode *target, struct dentry *moved)
{
struct inode *source = moved->d_inode;
- u32 cookie = inotify_get_cookie();
+ u32 in_cookie = inotify_get_cookie();
+ u32 fs_cookie = fsnotify_get_cookie();
+ __u32 old_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_FROM);
+ __u32 new_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_TO);
if (old_dir == new_dir)
- inode_dir_notify(old_dir, DN_RENAME);
- else {
- inode_dir_notify(old_dir, DN_DELETE);
- inode_dir_notify(new_dir, DN_CREATE);
- }
+ old_dir_mask |= FS_DN_RENAME;
- if (isdir)
+ if (isdir) {
isdir = IN_ISDIR;
- inotify_inode_queue_event(old_dir, IN_MOVED_FROM|isdir,cookie,old_name,
+ old_dir_mask |= FS_IN_ISDIR;
+ new_dir_mask |= FS_IN_ISDIR;
+ }
+
+ inotify_inode_queue_event(old_dir, IN_MOVED_FROM|isdir, in_cookie, old_name,
source);
- inotify_inode_queue_event(new_dir, IN_MOVED_TO|isdir, cookie, new_name,
+ inotify_inode_queue_event(new_dir, IN_MOVED_TO|isdir, in_cookie, new_name,
source);
+ fsnotify(old_dir, old_dir_mask, old_dir, FSNOTIFY_EVENT_INODE, old_name, fs_cookie);
+ fsnotify(new_dir, new_dir_mask, new_dir, FSNOTIFY_EVENT_INODE, new_name, fs_cookie);
+
if (target) {
inotify_inode_queue_event(target, IN_DELETE_SELF, 0, NULL, NULL);
inotify_inode_is_dead(target);
+
+ /* this is really a link_count change not a removal */
+ fsnotify_link_count(target);
}
if (source) {
inotify_inode_queue_event(source, IN_MOVE_SELF, 0, NULL, NULL);
+ fsnotify(source, FS_MOVE_SELF, moved->d_inode, FSNOTIFY_EVENT_INODE, NULL, 0);
}
audit_inode_child(new_name, moved, new_dir);
}
/*
+ * fsnotify_inode_delete - and inode is being evicted from cache, clean up is needed
+ */
+static inline void fsnotify_inode_delete(struct inode *inode)
+{
+ __fsnotify_inode_delete(inode);
+}
+
+/*
* fsnotify_nameremove - a filename was removed from a directory
*/
static inline void fsnotify_nameremove(struct dentry *dentry, int isdir)
{
+ __u32 mask = FS_DELETE;
+
if (isdir)
- isdir = IN_ISDIR;
- dnotify_parent(dentry, DN_DELETE);
- inotify_dentry_parent_queue_event(dentry, IN_DELETE|isdir, 0, dentry->d_name.name);
+ mask |= FS_IN_ISDIR;
+
+ fsnotify_parent(dentry, mask);
}
/*
@@ -87,14 +134,9 @@ static inline void fsnotify_inoderemove(struct inode *inode)
{
inotify_inode_queue_event(inode, IN_DELETE_SELF, 0, NULL, NULL);
inotify_inode_is_dead(inode);
-}
-/*
- * fsnotify_link_count - inode's link count changed
- */
-static inline void fsnotify_link_count(struct inode *inode)
-{
- inotify_inode_queue_event(inode, IN_ATTRIB, 0, NULL, NULL);
+ fsnotify(inode, FS_DELETE_SELF, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
+ __fsnotify_inode_delete(inode);
}
/*
@@ -102,10 +144,11 @@ static inline void fsnotify_link_count(struct inode *inode)
*/
static inline void fsnotify_create(struct inode *inode, struct dentry *dentry)
{
- inode_dir_notify(inode, DN_CREATE);
inotify_inode_queue_event(inode, IN_CREATE, 0, dentry->d_name.name,
dentry->d_inode);
audit_inode_child(dentry->d_name.name, dentry, inode);
+
+ fsnotify(inode, FS_CREATE, dentry->d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0);
}
/*
@@ -115,11 +158,12 @@ static inline void fsnotify_create(struct inode *inode, struct dentry *dentry)
*/
static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct dentry *new_dentry)
{
- inode_dir_notify(dir, DN_CREATE);
inotify_inode_queue_event(dir, IN_CREATE, 0, new_dentry->d_name.name,
inode);
fsnotify_link_count(inode);
audit_inode_child(new_dentry->d_name.name, new_dentry, dir);
+
+ fsnotify(dir, FS_CREATE, inode, FSNOTIFY_EVENT_INODE, new_dentry->d_name.name, 0);
}
/*
@@ -127,10 +171,13 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct
*/
static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
{
- inode_dir_notify(inode, DN_CREATE);
- inotify_inode_queue_event(inode, IN_CREATE | IN_ISDIR, 0,
- dentry->d_name.name, dentry->d_inode);
+ __u32 mask = (FS_CREATE | FS_IN_ISDIR);
+ struct inode *d_inode = dentry->d_inode;
+
+ inotify_inode_queue_event(inode, mask, 0, dentry->d_name.name, d_inode);
audit_inode_child(dentry->d_name.name, dentry, inode);
+
+ fsnotify(inode, mask, d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0);
}
/*
@@ -139,14 +186,15 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
static inline void fsnotify_access(struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
- u32 mask = IN_ACCESS;
+ __u32 mask = FS_ACCESS;
if (S_ISDIR(inode->i_mode))
- mask |= IN_ISDIR;
+ mask |= FS_IN_ISDIR;
- dnotify_parent(dentry, DN_ACCESS);
- inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name);
inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
+
+ fsnotify_parent(dentry, mask);
+ fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
}
/*
@@ -155,14 +203,15 @@ static inline void fsnotify_access(struct dentry *dentry)
static inline void fsnotify_modify(struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
- u32 mask = IN_MODIFY;
+ __u32 mask = FS_MODIFY;
if (S_ISDIR(inode->i_mode))
- mask |= IN_ISDIR;
+ mask |= FS_IN_ISDIR;
- dnotify_parent(dentry, DN_MODIFY);
- inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name);
inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
+
+ fsnotify_parent(dentry, mask);
+ fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
}
/*
@@ -171,13 +220,15 @@ static inline void fsnotify_modify(struct dentry *dentry)
static inline void fsnotify_open(struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
- u32 mask = IN_OPEN;
+ __u32 mask = FS_OPEN;
if (S_ISDIR(inode->i_mode))
- mask |= IN_ISDIR;
+ mask |= FS_IN_ISDIR;
- inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name);
inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
+
+ fsnotify_parent(dentry, mask);
+ fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
}
/*
@@ -187,15 +238,16 @@ static inline void fsnotify_close(struct file *file)
{
struct dentry *dentry = file->f_path.dentry;
struct inode *inode = dentry->d_inode;
- const char *name = dentry->d_name.name;
fmode_t mode = file->f_mode;
- u32 mask = (mode & FMODE_WRITE) ? IN_CLOSE_WRITE : IN_CLOSE_NOWRITE;
+ __u32 mask = (mode & FMODE_WRITE) ? FS_CLOSE_WRITE : FS_CLOSE_NOWRITE;
if (S_ISDIR(inode->i_mode))
- mask |= IN_ISDIR;
+ mask |= FS_IN_ISDIR;
- inotify_dentry_parent_queue_event(dentry, mask, 0, name);
inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
+
+ fsnotify_parent(dentry, mask);
+ fsnotify(inode, mask, file, FSNOTIFY_EVENT_FILE, NULL, 0);
}
/*
@@ -204,13 +256,15 @@ static inline void fsnotify_close(struct file *file)
static inline void fsnotify_xattr(struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
- u32 mask = IN_ATTRIB;
+ __u32 mask = FS_ATTRIB;
if (S_ISDIR(inode->i_mode))
- mask |= IN_ISDIR;
+ mask |= FS_IN_ISDIR;
- inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name);
inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
+
+ fsnotify_parent(dentry, mask);
+ fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
}
/*
@@ -220,50 +274,37 @@ static inline void fsnotify_xattr(struct dentry *dentry)
static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
{
struct inode *inode = dentry->d_inode;
- int dn_mask = 0;
- u32 in_mask = 0;
+ __u32 mask = 0;
+
+ if (ia_valid & ATTR_UID)
+ mask |= FS_ATTRIB;
+ if (ia_valid & ATTR_GID)
+ mask |= FS_ATTRIB;
+ if (ia_valid & ATTR_SIZE)
+ mask |= FS_MODIFY;
- if (ia_valid & ATTR_UID) {
- in_mask |= IN_ATTRIB;
- dn_mask |= DN_ATTRIB;
- }
- if (ia_valid & ATTR_GID) {
- in_mask |= IN_ATTRIB;
- dn_mask |= DN_ATTRIB;
- }
- if (ia_valid & ATTR_SIZE) {
- in_mask |= IN_MODIFY;
- dn_mask |= DN_MODIFY;
- }
/* both times implies a utime(s) call */
if ((ia_valid & (ATTR_ATIME | ATTR_MTIME)) == (ATTR_ATIME | ATTR_MTIME))
- {
- in_mask |= IN_ATTRIB;
- dn_mask |= DN_ATTRIB;
- } else if (ia_valid & ATTR_ATIME) {
- in_mask |= IN_ACCESS;
- dn_mask |= DN_ACCESS;
- } else if (ia_valid & ATTR_MTIME) {
- in_mask |= IN_MODIFY;
- dn_mask |= DN_MODIFY;
- }
- if (ia_valid & ATTR_MODE) {
- in_mask |= IN_ATTRIB;
- dn_mask |= DN_ATTRIB;
- }
+ mask |= FS_ATTRIB;
+ else if (ia_valid & ATTR_ATIME)
+ mask |= FS_ACCESS;
+ else if (ia_valid & ATTR_MTIME)
+ mask |= FS_MODIFY;
+
+ if (ia_valid & ATTR_MODE)
+ mask |= FS_ATTRIB;
- if (dn_mask)
- dnotify_parent(dentry, dn_mask);
- if (in_mask) {
+ if (mask) {
if (S_ISDIR(inode->i_mode))
- in_mask |= IN_ISDIR;
- inotify_inode_queue_event(inode, in_mask, 0, NULL, NULL);
- inotify_dentry_parent_queue_event(dentry, in_mask, 0,
- dentry->d_name.name);
+ mask |= FS_IN_ISDIR;
+ inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
+
+ fsnotify_parent(dentry, mask);
+ fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
}
}
-#ifdef CONFIG_INOTIFY /* inotify helpers */
+#if defined(CONFIG_INOTIFY) || defined(CONFIG_FSNOTIFY) /* notify helpers */
/*
* fsnotify_oldname_init - save off the old filename before we change it
@@ -281,7 +322,7 @@ static inline void fsnotify_oldname_free(const char *old_name)
kfree(old_name);
}
-#else /* CONFIG_INOTIFY */
+#else /* CONFIG_INOTIFY || CONFIG_FSNOTIFY */
static inline const char *fsnotify_oldname_init(const char *name)
{
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
new file mode 100644
index 00000000000..44848aa830d
--- /dev/null
+++ b/include/linux/fsnotify_backend.h
@@ -0,0 +1,387 @@
+/*
+ * Filesystem access notification for Linux
+ *
+ * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
+ */
+
+#ifndef __LINUX_FSNOTIFY_BACKEND_H
+#define __LINUX_FSNOTIFY_BACKEND_H
+
+#ifdef __KERNEL__
+
+#include <linux/idr.h> /* inotify uses this */
+#include <linux/fs.h> /* struct inode */
+#include <linux/list.h>
+#include <linux/path.h> /* struct path */
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <asm/atomic.h>
+
+/*
+ * IN_* from inotfy.h lines up EXACTLY with FS_*, this is so we can easily
+ * convert between them. dnotify only needs conversion at watch creation
+ * so no perf loss there. fanotify isn't defined yet, so it can use the
+ * wholes if it needs more events.
+ */
+#define FS_ACCESS 0x00000001 /* File was accessed */
+#define FS_MODIFY 0x00000002 /* File was modified */
+#define FS_ATTRIB 0x00000004 /* Metadata changed */
+#define FS_CLOSE_WRITE 0x00000008 /* Writtable file was closed */
+#define FS_CLOSE_NOWRITE 0x00000010 /* Unwrittable file closed */
+#define FS_OPEN 0x00000020 /* File was opened */
+#define FS_MOVED_FROM 0x00000040 /* File was moved from X */
+#define FS_MOVED_TO 0x00000080 /* File was moved to Y */
+#define FS_CREATE 0x00000100 /* Subfile was created */
+#define FS_DELETE 0x00000200 /* Subfile was deleted */
+#define FS_DELETE_SELF 0x00000400 /* Self was deleted */
+#define FS_MOVE_SELF 0x00000800 /* Self was moved */
+
+#define FS_UNMOUNT 0x00002000 /* inode on umount fs */
+#define FS_Q_OVERFLOW 0x00004000 /* Event queued overflowed */
+#define FS_IN_IGNORED 0x00008000 /* last inotify event here */
+
+#define FS_IN_ISDIR 0x40000000 /* event occurred against dir */
+#define FS_IN_ONESHOT 0x80000000 /* only send event once */
+
+#define FS_DN_RENAME 0x10000000 /* file renamed */
+#define FS_DN_MULTISHOT 0x20000000 /* dnotify multishot */
+
+/* This inode cares about things that happen to its children. Always set for
+ * dnotify and inotify. */
+#define FS_EVENT_ON_CHILD 0x08000000
+
+/* This is a list of all events that may get sent to a parernt based on fs event
+ * happening to inodes inside that directory */
+#define FS_EVENTS_POSS_ON_CHILD (FS_ACCESS | FS_MODIFY | FS_ATTRIB |\
+ FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN |\
+ FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE |\
+ FS_DELETE)
+
+/* listeners that hard code group numbers near the top */
+#define DNOTIFY_GROUP_NUM UINT_MAX
+#define INOTIFY_GROUP_NUM (DNOTIFY_GROUP_NUM-1)
+
+struct fsnotify_group;
+struct fsnotify_event;
+struct fsnotify_mark_entry;
+struct fsnotify_event_private_data;
+
+/*
+ * Each group much define these ops. The fsnotify infrastructure will call
+ * these operations for each relevant group.
+ *
+ * should_send_event - given a group, inode, and mask this function determines
+ * if the group is interested in this event.
+ * handle_event - main call for a group to handle an fs event
+ * free_group_priv - called when a group refcnt hits 0 to clean up the private union
+ * freeing-mark - this means that a mark has been flagged to die when everything
+ * finishes using it. The function is supplied with what must be a
+ * valid group and inode to use to clean up.
+ */
+struct fsnotify_ops {
+ bool (*should_send_event)(struct fsnotify_group *group, struct inode *inode, __u32 mask);
+ int (*handle_event)(struct fsnotify_group *group, struct fsnotify_event *event);
+ void (*free_group_priv)(struct fsnotify_group *group);
+ void (*freeing_mark)(struct fsnotify_mark_entry *entry, struct fsnotify_group *group);
+ void (*free_event_priv)(struct fsnotify_event_private_data *priv);
+};
+
+/*
+ * A group is a "thing" that wants to receive notification about filesystem
+ * events. The mask holds the subset of event types this group cares about.
+ * refcnt on a group is up to the implementor and at any moment if it goes 0
+ * everything will be cleaned up.
+ */
+struct fsnotify_group {
+ /*
+ * global list of all groups receiving events from fsnotify.
+ * anchored by fsnotify_groups and protected by either fsnotify_grp_mutex
+ * or fsnotify_grp_srcu depending on write vs read.
+ */
+ struct list_head group_list;
+
+ /*
+ * Defines all of the event types in which this group is interested.
+ * This mask is a bitwise OR of the FS_* events from above. Each time
+ * this mask changes for a group (if it changes) the correct functions
+ * must be called to update the global structures which indicate global
+ * interest in event types.
+ */
+ __u32 mask;
+
+ /*
+ * How the refcnt is used is up to each group. When the refcnt hits 0
+ * fsnotify will clean up all of the resources associated with this group.
+ * As an example, the dnotify group will always have a refcnt=1 and that
+ * will never change. Inotify, on the other hand, has a group per
+ * inotify_init() and the refcnt will hit 0 only when that fd has been
+ * closed.
+ */
+ atomic_t refcnt; /* things with interest in this group */
+ unsigned int group_num; /* simply prevents accidental group collision */
+
+ const struct fsnotify_ops *ops; /* how this group handles things */
+
+ /* needed to send notification to userspace */
+ struct mutex notification_mutex; /* protect the notification_list */
+ struct list_head notification_list; /* list of event_holder this group needs to send to userspace */
+ wait_queue_head_t notification_waitq; /* read() on the notification file blocks on this waitq */
+ unsigned int q_len; /* events on the queue */
+ unsigned int max_events; /* maximum events allowed on the list */
+
+ /* stores all fastapth entries assoc with this group so they can be cleaned on unregister */
+ spinlock_t mark_lock; /* protect mark_entries list */
+ atomic_t num_marks; /* 1 for each mark entry and 1 for not being
+ * past the point of no return when freeing
+ * a group */
+ struct list_head mark_entries; /* all inode mark entries for this group */
+
+ /* prevents double list_del of group_list. protected by global fsnotify_grp_mutex */
+ bool on_group_list;
+
+ /* groups can define private fields here or use the void *private */
+ union {
+ void *private;
+#ifdef CONFIG_INOTIFY_USER
+ struct inotify_group_private_data {
+ spinlock_t idr_lock;
+ struct idr idr;
+ u32 last_wd;
+ struct fasync_struct *fa; /* async notification */
+ struct user_struct *user;
+ } inotify_data;
+#endif
+ };
+};
+
+/*
+ * A single event can be queued in multiple group->notification_lists.
+ *
+ * each group->notification_list will point to an event_holder which in turns points
+ * to the actual event that needs to be sent to userspace.
+ *
+ * Seemed cheaper to create a refcnt'd event and a small holder for every group
+ * than create a different event for every group
+ *
+ */
+struct fsnotify_event_holder {
+ struct fsnotify_event *event;
+ struct list_head event_list;
+};
+
+/*
+ * Inotify needs to tack data onto an event. This struct lets us later find the
+ * correct private data of the correct group.
+ */
+struct fsnotify_event_private_data {
+ struct fsnotify_group *group;
+ struct list_head event_list;
+};
+
+/*
+ * all of the information about the original object we want to now send to
+ * a group. If you want to carry more info from the accessing task to the
+ * listener this structure is where you need to be adding fields.
+ */
+struct fsnotify_event {
+ /*
+ * If we create an event we are also likely going to need a holder
+ * to link to a group. So embed one holder in the event. Means only
+ * one allocation for the common case where we only have one group
+ */
+ struct fsnotify_event_holder holder;
+ spinlock_t lock; /* protection for the associated event_holder and private_list */
+ /* to_tell may ONLY be dereferenced during handle_event(). */
+ struct inode *to_tell; /* either the inode the event happened to or its parent */
+ /*
+ * depending on the event type we should have either a path or inode
+ * We hold a reference on path, but NOT on inode. Since we have the ref on
+ * the path, it may be dereferenced at any point during this object's
+ * lifetime. That reference is dropped when this object's refcnt hits
+ * 0. If this event contains an inode instead of a path, the inode may
+ * ONLY be used during handle_event().
+ */
+ union {
+ struct path path;
+ struct inode *inode;
+ };
+/* when calling fsnotify tell it if the data is a path or inode */
+#define FSNOTIFY_EVENT_NONE 0
+#define FSNOTIFY_EVENT_PATH 1
+#define FSNOTIFY_EVENT_INODE 2
+#define FSNOTIFY_EVENT_FILE 3
+ int data_type; /* which of the above union we have */
+ atomic_t refcnt; /* how many groups still are using/need to send this event */
+ __u32 mask; /* the type of access, bitwise OR for FS_* event types */
+
+ u32 sync_cookie; /* used to corrolate events, namely inotify mv events */
+ char *file_name;
+ size_t name_len;
+
+ struct list_head private_data_list; /* groups can store private data here */
+};
+
+/*
+ * a mark is simply an entry attached to an in core inode which allows an
+ * fsnotify listener to indicate they are either no longer interested in events
+ * of a type matching mask or only interested in those events.
+ *
+ * these are flushed when an inode is evicted from core and may be flushed
+ * when the inode is modified (as seen by fsnotify_access). Some fsnotify users
+ * (such as dnotify) will flush these when the open fd is closed and not at
+ * inode eviction or modification.
+ */
+struct fsnotify_mark_entry {
+ __u32 mask; /* mask this mark entry is for */
+ /* we hold ref for each i_list and g_list. also one ref for each 'thing'
+ * in kernel that found and may be using this mark. */
+ atomic_t refcnt; /* active things looking at this mark */
+ struct inode *inode; /* inode this entry is associated with */
+ struct fsnotify_group *group; /* group this mark entry is for */
+ struct hlist_node i_list; /* list of mark_entries by inode->i_fsnotify_mark_entries */
+ struct list_head g_list; /* list of mark_entries by group->i_fsnotify_mark_entries */
+ spinlock_t lock; /* protect group, inode, and killme */
+ struct list_head free_i_list; /* tmp list used when freeing this mark */
+ struct list_head free_g_list; /* tmp list used when freeing this mark */
+ void (*free_mark)(struct fsnotify_mark_entry *entry); /* called on final put+free */
+};
+
+#ifdef CONFIG_FSNOTIFY
+
+/* called from the vfs helpers */
+
+/* main fsnotify call to send events */
+extern void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
+ const char *name, u32 cookie);
+extern void __fsnotify_parent(struct dentry *dentry, __u32 mask);
+extern void __fsnotify_inode_delete(struct inode *inode);
+extern u32 fsnotify_get_cookie(void);
+
+static inline int fsnotify_inode_watches_children(struct inode *inode)
+{
+ /* FS_EVENT_ON_CHILD is set if the inode may care */
+ if (!(inode->i_fsnotify_mask & FS_EVENT_ON_CHILD))
+ return 0;
+ /* this inode might care about child events, does it care about the
+ * specific set of events that can happen on a child? */
+ return inode->i_fsnotify_mask & FS_EVENTS_POSS_ON_CHILD;
+}
+
+/*
+ * Update the dentry with a flag indicating the interest of its parent to receive
+ * filesystem events when those events happens to this dentry->d_inode.
+ */
+static inline void __fsnotify_update_dcache_flags(struct dentry *dentry)
+{
+ struct dentry *parent;
+
+ assert_spin_locked(&dcache_lock);
+ assert_spin_locked(&dentry->d_lock);
+
+ parent = dentry->d_parent;
+ if (fsnotify_inode_watches_children(parent->d_inode))
+ dentry->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED;
+ else
+ dentry->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED;
+}
+
+/*
+ * fsnotify_d_instantiate - instantiate a dentry for inode
+ * Called with dcache_lock held.
+ */
+static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode *inode)
+{
+ if (!inode)
+ return;
+
+ assert_spin_locked(&dcache_lock);
+
+ spin_lock(&dentry->d_lock);
+ __fsnotify_update_dcache_flags(dentry);
+ spin_unlock(&dentry->d_lock);
+}
+
+/* called from fsnotify listeners, such as fanotify or dnotify */
+
+/* must call when a group changes its ->mask */
+extern void fsnotify_recalc_global_mask(void);
+/* get a reference to an existing or create a new group */
+extern struct fsnotify_group *fsnotify_obtain_group(unsigned int group_num,
+ __u32 mask,
+ const struct fsnotify_ops *ops);
+/* run all marks associated with this group and update group->mask */
+extern void fsnotify_recalc_group_mask(struct fsnotify_group *group);
+/* drop reference on a group from fsnotify_obtain_group */
+extern void fsnotify_put_group(struct fsnotify_group *group);
+
+/* take a reference to an event */
+extern void fsnotify_get_event(struct fsnotify_event *event);
+extern void fsnotify_put_event(struct fsnotify_event *event);
+/* find private data previously attached to an event and unlink it */
+extern struct fsnotify_event_private_data *fsnotify_remove_priv_from_event(struct fsnotify_group *group,
+ struct fsnotify_event *event);
+
+/* attach the event to the group notification queue */
+extern int fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_event *event,
+ struct fsnotify_event_private_data *priv);
+/* true if the group notification queue is empty */
+extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
+/* return, but do not dequeue the first event on the notification queue */
+extern struct fsnotify_event *fsnotify_peek_notify_event(struct fsnotify_group *group);
+/* return AND dequeue the first event on the notification queue */
+extern struct fsnotify_event *fsnotify_remove_notify_event(struct fsnotify_group *group);
+
+/* functions used to manipulate the marks attached to inodes */
+
+/* run all marks associated with an inode and update inode->i_fsnotify_mask */
+extern void fsnotify_recalc_inode_mask(struct inode *inode);
+extern void fsnotify_init_mark(struct fsnotify_mark_entry *entry, void (*free_mark)(struct fsnotify_mark_entry *entry));
+/* find (and take a reference) to a mark associated with group and inode */
+extern struct fsnotify_mark_entry *fsnotify_find_mark_entry(struct fsnotify_group *group, struct inode *inode);
+/* attach the mark to both the group and the inode */
+extern int fsnotify_add_mark(struct fsnotify_mark_entry *entry, struct fsnotify_group *group, struct inode *inode);
+/* given a mark, flag it to be freed when all references are dropped */
+extern void fsnotify_destroy_mark_by_entry(struct fsnotify_mark_entry *entry);
+/* run all the marks in a group, and flag them to be freed */
+extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group);
+extern void fsnotify_get_mark(struct fsnotify_mark_entry *entry);
+extern void fsnotify_put_mark(struct fsnotify_mark_entry *entry);
+extern void fsnotify_unmount_inodes(struct list_head *list);
+
+/* put here because inotify does some weird stuff when destroying watches */
+extern struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask,
+ void *data, int data_is, const char *name,
+ u32 cookie);
+
+#else
+
+static inline void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
+ const char *name, u32 cookie)
+{}
+
+static inline void __fsnotify_parent(struct dentry *dentry, __u32 mask)
+{}
+
+static inline void __fsnotify_inode_delete(struct inode *inode)
+{}
+
+static inline void __fsnotify_update_dcache_flags(struct dentry *dentry)
+{}
+
+static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode *inode)
+{}
+
+static inline u32 fsnotify_get_cookie(void)
+{
+ return 0;
+}
+
+static inline void fsnotify_unmount_inodes(struct list_head *list)
+{}
+
+#endif /* CONFIG_FSNOTIFY */
+
+#endif /* __KERNEL __ */
+
+#endif /* __LINUX_FSNOTIFY_BACKEND_H */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 677432b9cb7..dc3b1328aae 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -1,15 +1,18 @@
#ifndef _LINUX_FTRACE_H
#define _LINUX_FTRACE_H
-#include <linux/linkage.h>
-#include <linux/fs.h>
-#include <linux/ktime.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/module.h>
+#include <linux/trace_clock.h>
#include <linux/kallsyms.h>
+#include <linux/linkage.h>
#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/ktime.h>
#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+
+#include <asm/ftrace.h>
#ifdef CONFIG_FUNCTION_TRACER
@@ -95,9 +98,41 @@ stack_trace_sysctl(struct ctl_table *table, int write,
loff_t *ppos);
#endif
+struct ftrace_func_command {
+ struct list_head list;
+ char *name;
+ int (*func)(char *func, char *cmd,
+ char *params, int enable);
+};
+
#ifdef CONFIG_DYNAMIC_FTRACE
-/* asm/ftrace.h must be defined for archs supporting dynamic ftrace */
-#include <asm/ftrace.h>
+
+int ftrace_arch_code_modify_prepare(void);
+int ftrace_arch_code_modify_post_process(void);
+
+struct seq_file;
+
+struct ftrace_probe_ops {
+ void (*func)(unsigned long ip,
+ unsigned long parent_ip,
+ void **data);
+ int (*callback)(unsigned long ip, void **data);
+ void (*free)(void **data);
+ int (*print)(struct seq_file *m,
+ unsigned long ip,
+ struct ftrace_probe_ops *ops,
+ void *data);
+};
+
+extern int
+register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
+ void *data);
+extern void
+unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
+ void *data);
+extern void
+unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
+extern void unregister_ftrace_function_probe_all(char *glob);
enum {
FTRACE_FL_FREE = (1 << 0),
@@ -110,15 +145,23 @@ enum {
};
struct dyn_ftrace {
- struct list_head list;
- unsigned long ip; /* address of mcount call-site */
- unsigned long flags;
- struct dyn_arch_ftrace arch;
+ union {
+ unsigned long ip; /* address of mcount call-site */
+ struct dyn_ftrace *freelist;
+ };
+ union {
+ unsigned long flags;
+ struct dyn_ftrace *newlist;
+ };
+ struct dyn_arch_ftrace arch;
};
int ftrace_force_update(void);
void ftrace_set_filter(unsigned char *buf, int len, int reset);
+int register_ftrace_command(struct ftrace_func_command *cmd);
+int unregister_ftrace_command(struct ftrace_func_command *cmd);
+
/* defined in arch */
extern int ftrace_ip_converted(unsigned long ip);
extern int ftrace_dyn_arch_init(void *data);
@@ -126,6 +169,10 @@ extern int ftrace_update_ftrace_func(ftrace_func_t func);
extern void ftrace_caller(void);
extern void ftrace_call(void);
extern void mcount_call(void);
+
+#ifndef FTRACE_ADDR
+#define FTRACE_ADDR ((unsigned long)ftrace_caller)
+#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
extern void ftrace_graph_caller(void);
extern int ftrace_enable_ftrace_graph_caller(void);
@@ -136,7 +183,7 @@ static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
#endif
/**
- * ftrace_make_nop - convert code into top
+ * ftrace_make_nop - convert code into nop
* @mod: module structure if called by module load initialization
* @rec: the mcount call site record
* @addr: the address that the call site should be calling
@@ -181,14 +228,11 @@ extern int ftrace_make_nop(struct module *mod,
*/
extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
-
/* May be defined in arch */
extern int ftrace_arch_read_dyn_info(char *buf, int size);
extern int skip_trace(unsigned long ip);
-extern void ftrace_release(void *start, unsigned long size);
-
extern void ftrace_disable_daemon(void);
extern void ftrace_enable_daemon(void);
#else
@@ -198,6 +242,14 @@ extern void ftrace_enable_daemon(void);
# define ftrace_disable_daemon() do { } while (0)
# define ftrace_enable_daemon() do { } while (0)
static inline void ftrace_release(void *start, unsigned long size) { }
+static inline int register_ftrace_command(struct ftrace_func_command *cmd)
+{
+ return -EINVAL;
+}
+static inline int unregister_ftrace_command(char *cmd_name)
+{
+ return -EINVAL;
+}
#endif /* CONFIG_DYNAMIC_FTRACE */
/* totally disable ftrace - can not re-enable after this */
@@ -233,24 +285,25 @@ static inline void __ftrace_enabled_restore(int enabled)
#endif
}
-#ifdef CONFIG_FRAME_POINTER
-/* TODO: need to fix this for ARM */
-# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
-# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1))
-# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2))
-# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3))
-# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4))
-# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5))
-# define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6))
-#else
-# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
-# define CALLER_ADDR1 0UL
-# define CALLER_ADDR2 0UL
-# define CALLER_ADDR3 0UL
-# define CALLER_ADDR4 0UL
-# define CALLER_ADDR5 0UL
-# define CALLER_ADDR6 0UL
-#endif
+#ifndef HAVE_ARCH_CALLER_ADDR
+# ifdef CONFIG_FRAME_POINTER
+# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
+# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1))
+# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2))
+# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3))
+# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4))
+# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5))
+# define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6))
+# else
+# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
+# define CALLER_ADDR1 0UL
+# define CALLER_ADDR2 0UL
+# define CALLER_ADDR3 0UL
+# define CALLER_ADDR4 0UL
+# define CALLER_ADDR5 0UL
+# define CALLER_ADDR6 0UL
+# endif
+#endif /* ifndef HAVE_ARCH_CALLER_ADDR */
#ifdef CONFIG_IRQSOFF_TRACER
extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
@@ -268,94 +321,11 @@ static inline void __ftrace_enabled_restore(int enabled)
# define trace_preempt_off(a0, a1) do { } while (0)
#endif
-#ifdef CONFIG_TRACING
-extern int ftrace_dump_on_oops;
-
-extern void tracing_start(void);
-extern void tracing_stop(void);
-extern void ftrace_off_permanent(void);
-
-extern void
-ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
-
-/**
- * ftrace_printk - printf formatting in the ftrace buffer
- * @fmt: the printf format for printing
- *
- * Note: __ftrace_printk is an internal function for ftrace_printk and
- * the @ip is passed in via the ftrace_printk macro.
- *
- * This function allows a kernel developer to debug fast path sections
- * that printk is not appropriate for. By scattering in various
- * printk like tracing in the code, a developer can quickly see
- * where problems are occurring.
- *
- * This is intended as a debugging tool for the developer only.
- * Please refrain from leaving ftrace_printks scattered around in
- * your code.
- */
-# define ftrace_printk(fmt...) __ftrace_printk(_THIS_IP_, fmt)
-extern int
-__ftrace_printk(unsigned long ip, const char *fmt, ...)
- __attribute__ ((format (printf, 2, 3)));
-extern void ftrace_dump(void);
-#else
-static inline void
-ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
-static inline int
-ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2)));
-
-static inline void tracing_start(void) { }
-static inline void tracing_stop(void) { }
-static inline void ftrace_off_permanent(void) { }
-static inline int
-ftrace_printk(const char *fmt, ...)
-{
- return 0;
-}
-static inline void ftrace_dump(void) { }
-#endif
-
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
extern void ftrace_init(void);
-extern void ftrace_init_module(struct module *mod,
- unsigned long *start, unsigned long *end);
#else
static inline void ftrace_init(void) { }
-static inline void
-ftrace_init_module(struct module *mod,
- unsigned long *start, unsigned long *end) { }
-#endif
-
-enum {
- POWER_NONE = 0,
- POWER_CSTATE = 1,
- POWER_PSTATE = 2,
-};
-
-struct power_trace {
-#ifdef CONFIG_POWER_TRACER
- ktime_t stamp;
- ktime_t end;
- int type;
- int state;
#endif
-};
-
-#ifdef CONFIG_POWER_TRACER
-extern void trace_power_start(struct power_trace *it, unsigned int type,
- unsigned int state);
-extern void trace_power_mark(struct power_trace *it, unsigned int type,
- unsigned int state);
-extern void trace_power_end(struct power_trace *it);
-#else
-static inline void trace_power_start(struct power_trace *it, unsigned int type,
- unsigned int state) { }
-static inline void trace_power_mark(struct power_trace *it, unsigned int type,
- unsigned int state) { }
-static inline void trace_power_end(struct power_trace *it) { }
-#endif
-
/*
* Structure that defines an entry function trace.
@@ -379,6 +349,33 @@ struct ftrace_graph_ret {
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+/* for init task */
+#define INIT_FTRACE_GRAPH .ret_stack = NULL,
+
+/*
+ * Stack of return addresses for functions
+ * of a thread.
+ * Used in struct thread_info
+ */
+struct ftrace_ret_stack {
+ unsigned long ret;
+ unsigned long func;
+ unsigned long long calltime;
+ unsigned long long subtime;
+ unsigned long fp;
+};
+
+/*
+ * Primary handler of a function return.
+ * It relays on ftrace_return_to_handler.
+ * Defined in entry_32/64.S
+ */
+extern void return_to_handler(void);
+
+extern int
+ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
+ unsigned long frame_pointer);
+
/*
* Sometimes we don't want to trace a function with the function
* graph tracer but we want them to keep traced by the usual function
@@ -430,10 +427,11 @@ static inline void unpause_graph_tracing(void)
{
atomic_dec(&current->tracing_graph_pause);
}
-#else
+#else /* !CONFIG_FUNCTION_GRAPH_TRACER */
#define __notrace_funcgraph
#define __irq_entry
+#define INIT_FTRACE_GRAPH
static inline void ftrace_graph_init_task(struct task_struct *t) { }
static inline void ftrace_graph_exit_task(struct task_struct *t) { }
@@ -445,7 +443,7 @@ static inline int task_curr_ret_stack(struct task_struct *tsk)
static inline void pause_graph_tracing(void) { }
static inline void unpause_graph_tracing(void) { }
-#endif
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#ifdef CONFIG_TRACING
#include <linux/sched.h>
@@ -490,6 +488,28 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk)
return tsk->trace & TSK_TRACE_FL_GRAPH;
}
+extern int ftrace_dump_on_oops;
+
+#ifdef CONFIG_PREEMPT
+#define INIT_TRACE_RECURSION .trace_recursion = 0,
+#endif
+
#endif /* CONFIG_TRACING */
+#ifndef INIT_TRACE_RECURSION
+#define INIT_TRACE_RECURSION
+#endif
+
+#ifdef CONFIG_HW_BRANCH_TRACER
+
+void trace_hw_branch(u64 from, u64 to);
+void trace_hw_branch_oops(void);
+
+#else /* CONFIG_HW_BRANCH_TRACER */
+
+static inline void trace_hw_branch(u64 from, u64 to) {}
+static inline void trace_hw_branch_oops(void) {}
+
+#endif /* CONFIG_HW_BRANCH_TRACER */
+
#endif /* _LINUX_FTRACE_H */
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
new file mode 100644
index 00000000000..5c093ffc655
--- /dev/null
+++ b/include/linux/ftrace_event.h
@@ -0,0 +1,172 @@
+#ifndef _LINUX_FTRACE_EVENT_H
+#define _LINUX_FTRACE_EVENT_H
+
+#include <linux/trace_seq.h>
+#include <linux/ring_buffer.h>
+#include <linux/percpu.h>
+
+struct trace_array;
+struct tracer;
+struct dentry;
+
+DECLARE_PER_CPU(struct trace_seq, ftrace_event_seq);
+
+struct trace_print_flags {
+ unsigned long mask;
+ const char *name;
+};
+
+const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
+ unsigned long flags,
+ const struct trace_print_flags *flag_array);
+
+const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
+ const struct trace_print_flags *symbol_array);
+
+/*
+ * The trace entry - the most basic unit of tracing. This is what
+ * is printed in the end as a single line in the trace output, such as:
+ *
+ * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
+ */
+struct trace_entry {
+ unsigned short type;
+ unsigned char flags;
+ unsigned char preempt_count;
+ int pid;
+ int tgid;
+};
+
+#define FTRACE_MAX_EVENT \
+ ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
+
+/*
+ * Trace iterator - used by printout routines who present trace
+ * results to users and which routines might sleep, etc:
+ */
+struct trace_iterator {
+ struct trace_array *tr;
+ struct tracer *trace;
+ void *private;
+ int cpu_file;
+ struct mutex mutex;
+ struct ring_buffer_iter *buffer_iter[NR_CPUS];
+ unsigned long iter_flags;
+
+ /* The below is zeroed out in pipe_read */
+ struct trace_seq seq;
+ struct trace_entry *ent;
+ int cpu;
+ u64 ts;
+
+ loff_t pos;
+ long idx;
+
+ cpumask_var_t started;
+};
+
+
+typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
+ int flags);
+struct trace_event {
+ struct hlist_node node;
+ struct list_head list;
+ int type;
+ trace_print_func trace;
+ trace_print_func raw;
+ trace_print_func hex;
+ trace_print_func binary;
+};
+
+extern int register_ftrace_event(struct trace_event *event);
+extern int unregister_ftrace_event(struct trace_event *event);
+
+/* Return values for print_line callback */
+enum print_line_t {
+ TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */
+ TRACE_TYPE_HANDLED = 1,
+ TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */
+ TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
+};
+
+
+struct ring_buffer_event *
+trace_current_buffer_lock_reserve(int type, unsigned long len,
+ unsigned long flags, int pc);
+void trace_current_buffer_unlock_commit(struct ring_buffer_event *event,
+ unsigned long flags, int pc);
+void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event,
+ unsigned long flags, int pc);
+void trace_current_buffer_discard_commit(struct ring_buffer_event *event);
+
+void tracing_record_cmdline(struct task_struct *tsk);
+
+struct ftrace_event_call {
+ struct list_head list;
+ char *name;
+ char *system;
+ struct dentry *dir;
+ struct trace_event *event;
+ int enabled;
+ int (*regfunc)(void);
+ void (*unregfunc)(void);
+ int id;
+ int (*raw_init)(void);
+ int (*show_format)(struct trace_seq *s);
+ int (*define_fields)(void);
+ struct list_head fields;
+ int filter_active;
+ void *filter;
+ void *mod;
+
+#ifdef CONFIG_EVENT_PROFILE
+ atomic_t profile_count;
+ int (*profile_enable)(struct ftrace_event_call *);
+ void (*profile_disable)(struct ftrace_event_call *);
+#endif
+};
+
+#define MAX_FILTER_PRED 32
+#define MAX_FILTER_STR_VAL 128
+
+extern int init_preds(struct ftrace_event_call *call);
+extern void destroy_preds(struct ftrace_event_call *call);
+extern int filter_match_preds(struct ftrace_event_call *call, void *rec);
+extern int filter_current_check_discard(struct ftrace_event_call *call,
+ void *rec,
+ struct ring_buffer_event *event);
+
+extern int trace_define_field(struct ftrace_event_call *call, char *type,
+ char *name, int offset, int size, int is_signed);
+
+#define is_signed_type(type) (((type)(-1)) < 0)
+
+int trace_set_clr_event(const char *system, const char *event, int set);
+
+/*
+ * The double __builtin_constant_p is because gcc will give us an error
+ * if we try to allocate the static variable to fmt if it is not a
+ * constant. Even with the outer if statement optimizing out.
+ */
+#define event_trace_printk(ip, fmt, args...) \
+do { \
+ __trace_printk_check_format(fmt, ##args); \
+ tracing_record_cmdline(current); \
+ if (__builtin_constant_p(fmt)) { \
+ static const char *trace_printk_fmt \
+ __attribute__((section("__trace_printk_fmt"))) = \
+ __builtin_constant_p(fmt) ? fmt : NULL; \
+ \
+ __trace_bprintk(ip, trace_printk_fmt, ##args); \
+ } else \
+ __trace_printk(ip, fmt, ##args); \
+} while (0)
+
+#define __common_field(type, item, is_signed) \
+ ret = trace_define_field(event_call, #type, "common_" #item, \
+ offsetof(typeof(field.ent), item), \
+ sizeof(field.ent.item), is_signed); \
+ if (ret) \
+ return ret;
+
+#endif /* _LINUX_FTRACE_EVENT_H */
diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h
index 366a054d0b0..dca7bf8cffe 100644
--- a/include/linux/ftrace_irq.h
+++ b/include/linux/ftrace_irq.h
@@ -2,7 +2,7 @@
#define _LINUX_FTRACE_IRQ_H
-#if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_FUNCTION_GRAPH_TRACER)
+#ifdef CONFIG_FTRACE_NMI_ENTER
extern void ftrace_nmi_enter(void);
extern void ftrace_nmi_exit(void);
#else
diff --git a/include/linux/fuse.h b/include/linux/fuse.h
index 162e5defe68..d41ed593f79 100644
--- a/include/linux/fuse.h
+++ b/include/linux/fuse.h
@@ -121,6 +121,13 @@ struct fuse_file_lock {
#define FUSE_BIG_WRITES (1 << 5)
/**
+ * CUSE INIT request/reply flags
+ *
+ * CUSE_UNRESTRICTED_IOCTL: use unrestricted ioctl
+ */
+#define CUSE_UNRESTRICTED_IOCTL (1 << 0)
+
+/**
* Release flags
*/
#define FUSE_RELEASE_FLUSH (1 << 0)
@@ -210,6 +217,9 @@ enum fuse_opcode {
FUSE_DESTROY = 38,
FUSE_IOCTL = 39,
FUSE_POLL = 40,
+
+ /* CUSE specific operations */
+ CUSE_INIT = 4096,
};
enum fuse_notify_code {
@@ -401,6 +411,27 @@ struct fuse_init_out {
__u32 max_write;
};
+#define CUSE_INIT_INFO_MAX 4096
+
+struct cuse_init_in {
+ __u32 major;
+ __u32 minor;
+ __u32 unused;
+ __u32 flags;
+};
+
+struct cuse_init_out {
+ __u32 major;
+ __u32 minor;
+ __u32 unused;
+ __u32 flags;
+ __u32 max_read;
+ __u32 max_write;
+ __u32 dev_major; /* chardev major */
+ __u32 dev_minor; /* chardev minor */
+ __u32 spare[10];
+};
+
struct fuse_interrupt_in {
__u64 unique;
};
diff --git a/include/linux/futex.h b/include/linux/futex.h
index 3bf5bb5a34f..34956c8fdeb 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -23,6 +23,8 @@ union ktime;
#define FUTEX_TRYLOCK_PI 8
#define FUTEX_WAIT_BITSET 9
#define FUTEX_WAKE_BITSET 10
+#define FUTEX_WAIT_REQUEUE_PI 11
+#define FUTEX_CMP_REQUEUE_PI 12
#define FUTEX_PRIVATE_FLAG 128
#define FUTEX_CLOCK_REALTIME 256
@@ -38,6 +40,10 @@ union ktime;
#define FUTEX_TRYLOCK_PI_PRIVATE (FUTEX_TRYLOCK_PI | FUTEX_PRIVATE_FLAG)
#define FUTEX_WAIT_BITSET_PRIVATE (FUTEX_WAIT_BITS | FUTEX_PRIVATE_FLAG)
#define FUTEX_WAKE_BITSET_PRIVATE (FUTEX_WAKE_BITS | FUTEX_PRIVATE_FLAG)
+#define FUTEX_WAIT_REQUEUE_PI_PRIVATE (FUTEX_WAIT_REQUEUE_PI | \
+ FUTEX_PRIVATE_FLAG)
+#define FUTEX_CMP_REQUEUE_PI_PRIVATE (FUTEX_CMP_REQUEUE_PI | \
+ FUTEX_PRIVATE_FLAG)
/*
* Support for robust futexes: the kernel cleans up held futexes at
diff --git a/include/linux/gameport.h b/include/linux/gameport.h
index 0cd825f7363..1bc08541c2b 100644
--- a/include/linux/gameport.h
+++ b/include/linux/gameport.h
@@ -11,6 +11,7 @@
#ifdef __KERNEL__
#include <asm/io.h>
+#include <linux/types.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/device.h>
@@ -62,7 +63,7 @@ struct gameport_driver {
struct device_driver driver;
- unsigned int ignore;
+ bool ignore;
};
#define to_gameport_driver(d) container_of(d, struct gameport_driver, driver)
diff --git a/include/linux/gcd.h b/include/linux/gcd.h
new file mode 100644
index 00000000000..69f5e8a01ba
--- /dev/null
+++ b/include/linux/gcd.h
@@ -0,0 +1,8 @@
+#ifndef _GCD_H
+#define _GCD_H
+
+#include <linux/compiler.h>
+
+unsigned long gcd(unsigned long a, unsigned long b) __attribute_const__;
+
+#endif /* _GCD_H */
diff --git a/include/linux/gen_stats.h b/include/linux/gen_stats.h
index 13f4e74609a..0ffa41df0ee 100644
--- a/include/linux/gen_stats.h
+++ b/include/linux/gen_stats.h
@@ -22,7 +22,7 @@ struct gnet_stats_basic
{
__u64 bytes;
__u32 packets;
-};
+} __attribute__ ((packed));
/**
* struct gnet_stats_rate_est - rate estimator
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 16948eaecae..45fc320a53c 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -90,6 +90,7 @@ struct disk_stats {
struct hd_struct {
sector_t start_sect;
sector_t nr_sects;
+ sector_t alignment_offset;
struct device __dev;
struct kobject *holder_dir;
int policy, partno;
@@ -113,6 +114,7 @@ struct hd_struct {
#define GENHD_FL_UP 16
#define GENHD_FL_SUPPRESS_PARTITION_INFO 32
#define GENHD_FL_EXT_DEVT 64 /* allow extended devt */
+#define GENHD_FL_NATIVE_CAPACITY 128
#define BLK_SCSI_MAX_CMDS (256)
#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
@@ -140,7 +142,7 @@ struct gendisk {
* disks that can't be partitioned. */
char disk_name[DISK_NAME_LEN]; /* name of major driver */
-
+ char *(*nodename)(struct gendisk *gd);
/* Array of pointers to partitions indexed by partno.
* Protected with matching bdev lock but stat and other
* non-critical accesses use RCU. Always access through
@@ -214,6 +216,7 @@ static inline void disk_put_part(struct hd_struct *part)
#define DISK_PITER_REVERSE (1 << 0) /* iterate in the reverse direction */
#define DISK_PITER_INCL_EMPTY (1 << 1) /* include 0-sized parts */
#define DISK_PITER_INCL_PART0 (1 << 2) /* include partition 0 */
+#define DISK_PITER_INCL_EMPTY_PART0 (1 << 3) /* include empty partition 0 */
struct disk_part_iter {
struct gendisk *disk;
@@ -333,11 +336,10 @@ static inline void part_dec_in_flight(struct hd_struct *part)
part_to_disk(part)->part0.in_flight--;
}
-/* drivers/block/ll_rw_blk.c */
+/* block/blk-core.c */
extern void part_round_stats(int cpu, struct hd_struct *part);
-/* drivers/block/genhd.c */
-extern int get_blkdev_list(char *, int);
+/* block/genhd.c */
extern void add_disk(struct gendisk *disk);
extern void del_gendisk(struct gendisk *gp);
extern void unlink_gendisk(struct gendisk *gp);
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index dd20cd78faa..7c777a0da17 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -4,6 +4,8 @@
#include <linux/mmzone.h>
#include <linux/stddef.h>
#include <linux/linkage.h>
+#include <linux/topology.h>
+#include <linux/mmdebug.h>
struct vm_area_struct;
@@ -19,7 +21,8 @@ struct vm_area_struct;
#define __GFP_DMA ((__force gfp_t)0x01u)
#define __GFP_HIGHMEM ((__force gfp_t)0x02u)
#define __GFP_DMA32 ((__force gfp_t)0x04u)
-
+#define __GFP_MOVABLE ((__force gfp_t)0x08u) /* Page is movable */
+#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
/*
* Action modifiers - doesn't change the zoning
*
@@ -49,9 +52,20 @@ struct vm_area_struct;
#define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */
#define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */
#define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */
-#define __GFP_MOVABLE ((__force gfp_t)0x100000u) /* Page is movable */
-#define __GFP_BITS_SHIFT 21 /* Room for 21 __GFP_FOO bits */
+#ifdef CONFIG_KMEMCHECK
+#define __GFP_NOTRACK ((__force gfp_t)0x200000u) /* Don't track with kmemcheck */
+#else
+#define __GFP_NOTRACK ((__force gfp_t)0)
+#endif
+
+/*
+ * This may seem redundant, but it's a way of annotating false positives vs.
+ * allocations that simply cannot be supported (e.g. page tables).
+ */
+#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
+
+#define __GFP_BITS_SHIFT 22 /* Room for 22 __GFP_FOO bits */
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
/* This equals 0, but use constants in case they ever change */
@@ -84,6 +98,9 @@ struct vm_area_struct;
__GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
__GFP_NORETRY|__GFP_NOMEMALLOC)
+/* Control slab gfp mask during early boot */
+#define GFP_BOOT_MASK __GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS)
+
/* Control allocation constraints */
#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
@@ -111,24 +128,105 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags)
((gfp_flags & __GFP_RECLAIMABLE) != 0);
}
-static inline enum zone_type gfp_zone(gfp_t flags)
-{
+#ifdef CONFIG_HIGHMEM
+#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
+#else
+#define OPT_ZONE_HIGHMEM ZONE_NORMAL
+#endif
+
#ifdef CONFIG_ZONE_DMA
- if (flags & __GFP_DMA)
- return ZONE_DMA;
+#define OPT_ZONE_DMA ZONE_DMA
+#else
+#define OPT_ZONE_DMA ZONE_NORMAL
#endif
+
#ifdef CONFIG_ZONE_DMA32
- if (flags & __GFP_DMA32)
- return ZONE_DMA32;
+#define OPT_ZONE_DMA32 ZONE_DMA32
+#else
+#define OPT_ZONE_DMA32 ZONE_NORMAL
#endif
- if ((flags & (__GFP_HIGHMEM | __GFP_MOVABLE)) ==
- (__GFP_HIGHMEM | __GFP_MOVABLE))
- return ZONE_MOVABLE;
-#ifdef CONFIG_HIGHMEM
- if (flags & __GFP_HIGHMEM)
- return ZONE_HIGHMEM;
+
+/*
+ * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the
+ * zone to use given the lowest 4 bits of gfp_t. Entries are ZONE_SHIFT long
+ * and there are 16 of them to cover all possible combinations of
+ * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM
+ *
+ * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA.
+ * But GFP_MOVABLE is not only a zone specifier but also an allocation
+ * policy. Therefore __GFP_MOVABLE plus another zone selector is valid.
+ * Only 1bit of the lowest 3 bit (DMA,DMA32,HIGHMEM) can be set to "1".
+ *
+ * bit result
+ * =================
+ * 0x0 => NORMAL
+ * 0x1 => DMA or NORMAL
+ * 0x2 => HIGHMEM or NORMAL
+ * 0x3 => BAD (DMA+HIGHMEM)
+ * 0x4 => DMA32 or DMA or NORMAL
+ * 0x5 => BAD (DMA+DMA32)
+ * 0x6 => BAD (HIGHMEM+DMA32)
+ * 0x7 => BAD (HIGHMEM+DMA32+DMA)
+ * 0x8 => NORMAL (MOVABLE+0)
+ * 0x9 => DMA or NORMAL (MOVABLE+DMA)
+ * 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too)
+ * 0xb => BAD (MOVABLE+HIGHMEM+DMA)
+ * 0xc => DMA32 (MOVABLE+HIGHMEM+DMA32)
+ * 0xd => BAD (MOVABLE+DMA32+DMA)
+ * 0xe => BAD (MOVABLE+DMA32+HIGHMEM)
+ * 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA)
+ *
+ * ZONES_SHIFT must be <= 2 on 32 bit platforms.
+ */
+
+#if 16 * ZONES_SHIFT > BITS_PER_LONG
+#error ZONES_SHIFT too large to create GFP_ZONE_TABLE integer
#endif
- return ZONE_NORMAL;
+
+#define GFP_ZONE_TABLE ( \
+ (ZONE_NORMAL << 0 * ZONES_SHIFT) \
+ | (OPT_ZONE_DMA << __GFP_DMA * ZONES_SHIFT) \
+ | (OPT_ZONE_HIGHMEM << __GFP_HIGHMEM * ZONES_SHIFT) \
+ | (OPT_ZONE_DMA32 << __GFP_DMA32 * ZONES_SHIFT) \
+ | (ZONE_NORMAL << __GFP_MOVABLE * ZONES_SHIFT) \
+ | (OPT_ZONE_DMA << (__GFP_MOVABLE | __GFP_DMA) * ZONES_SHIFT) \
+ | (ZONE_MOVABLE << (__GFP_MOVABLE | __GFP_HIGHMEM) * ZONES_SHIFT)\
+ | (OPT_ZONE_DMA32 << (__GFP_MOVABLE | __GFP_DMA32) * ZONES_SHIFT)\
+)
+
+/*
+ * GFP_ZONE_BAD is a bitmap for all combination of __GFP_DMA, __GFP_DMA32
+ * __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per
+ * entry starting with bit 0. Bit is set if the combination is not
+ * allowed.
+ */
+#define GFP_ZONE_BAD ( \
+ 1 << (__GFP_DMA | __GFP_HIGHMEM) \
+ | 1 << (__GFP_DMA | __GFP_DMA32) \
+ | 1 << (__GFP_DMA32 | __GFP_HIGHMEM) \
+ | 1 << (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM) \
+ | 1 << (__GFP_MOVABLE | __GFP_HIGHMEM | __GFP_DMA) \
+ | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_DMA) \
+ | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_HIGHMEM) \
+ | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_DMA | __GFP_HIGHMEM)\
+)
+
+static inline enum zone_type gfp_zone(gfp_t flags)
+{
+ enum zone_type z;
+ int bit = flags & GFP_ZONEMASK;
+
+ z = (GFP_ZONE_TABLE >> (bit * ZONES_SHIFT)) &
+ ((1 << ZONES_SHIFT) - 1);
+
+ if (__builtin_constant_p(bit))
+ BUILD_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
+ else {
+#ifdef CONFIG_DEBUG_VM
+ BUG_ON((GFP_ZONE_BAD >> bit) & 1);
+#endif
+ }
+ return z;
}
/*
@@ -168,30 +266,19 @@ static inline void arch_alloc_page(struct page *page, int order) { }
#endif
struct page *
-__alloc_pages_internal(gfp_t gfp_mask, unsigned int order,
+__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, nodemask_t *nodemask);
static inline struct page *
__alloc_pages(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist)
{
- return __alloc_pages_internal(gfp_mask, order, zonelist, NULL);
+ return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL);
}
-static inline struct page *
-__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
- struct zonelist *zonelist, nodemask_t *nodemask)
-{
- return __alloc_pages_internal(gfp_mask, order, zonelist, nodemask);
-}
-
-
static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
unsigned int order)
{
- if (unlikely(order >= MAX_ORDER))
- return NULL;
-
/* Unknown node is current node */
if (nid < 0)
nid = numa_node_id();
@@ -199,15 +286,20 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
}
+static inline struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask,
+ unsigned int order)
+{
+ VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
+
+ return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
+}
+
#ifdef CONFIG_NUMA
extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
static inline struct page *
alloc_pages(gfp_t gfp_mask, unsigned int order)
{
- if (unlikely(order >= MAX_ORDER))
- return NULL;
-
return alloc_pages_current(gfp_mask, order);
}
extern struct page *alloc_page_vma(gfp_t gfp_mask,
@@ -244,4 +336,23 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
void drain_all_pages(void);
void drain_local_pages(void *dummy);
+extern bool oom_killer_disabled;
+
+static inline void oom_killer_disable(void)
+{
+ oom_killer_disabled = true;
+}
+
+static inline void oom_killer_enable(void)
+{
+ oom_killer_disabled = false;
+}
+
+extern gfp_t gfp_allowed_mask;
+
+static inline void set_gfp_allowed_mask(gfp_t mask)
+{
+ gfp_allowed_mask = mask;
+}
+
#endif /* __LINUX_GFP_H */
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index f83288347dd..45257475623 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -15,55 +15,61 @@
* - bits 0-7 are the preemption count (max preemption depth: 256)
* - bits 8-15 are the softirq count (max # of softirqs: 256)
*
- * The hardirq count can be overridden per architecture, the default is:
+ * The hardirq count can in theory reach the same as NR_IRQS.
+ * In reality, the number of nested IRQS is limited to the stack
+ * size as well. For archs with over 1000 IRQS it is not practical
+ * to expect that they will all nest. We give a max of 10 bits for
+ * hardirq nesting. An arch may choose to give less than 10 bits.
+ * m68k expects it to be 8.
*
- * - bits 16-27 are the hardirq count (max # of hardirqs: 4096)
- * - ( bit 28 is the PREEMPT_ACTIVE flag. )
+ * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024)
+ * - bit 26 is the NMI_MASK
+ * - bit 28 is the PREEMPT_ACTIVE flag
*
* PREEMPT_MASK: 0x000000ff
* SOFTIRQ_MASK: 0x0000ff00
- * HARDIRQ_MASK: 0x0fff0000
+ * HARDIRQ_MASK: 0x03ff0000
+ * NMI_MASK: 0x04000000
*/
#define PREEMPT_BITS 8
#define SOFTIRQ_BITS 8
+#define NMI_BITS 1
-#ifndef HARDIRQ_BITS
-#define HARDIRQ_BITS 12
+#define MAX_HARDIRQ_BITS 10
-#ifndef MAX_HARDIRQS_PER_CPU
-#define MAX_HARDIRQS_PER_CPU NR_IRQS
+#ifndef HARDIRQ_BITS
+# define HARDIRQ_BITS MAX_HARDIRQ_BITS
#endif
-/*
- * The hardirq mask has to be large enough to have space for potentially
- * all IRQ sources in the system nesting on a single CPU.
- */
-#if (1 << HARDIRQ_BITS) < MAX_HARDIRQS_PER_CPU
-# error HARDIRQ_BITS is too low!
-#endif
+#if HARDIRQ_BITS > MAX_HARDIRQ_BITS
+#error HARDIRQ_BITS too high!
#endif
#define PREEMPT_SHIFT 0
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
+#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
#define __IRQ_MASK(x) ((1UL << (x))-1)
#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
+#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
+#define NMI_OFFSET (1UL << NMI_SHIFT)
-#if PREEMPT_ACTIVE < (1 << (HARDIRQ_SHIFT + HARDIRQ_BITS))
+#if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS))
#error PREEMPT_ACTIVE is too low!
#endif
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
-#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))
+#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
+ | NMI_MASK))
/*
* Are we doing bottom half or hardware interrupt processing?
@@ -73,6 +79,11 @@
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
+/*
+ * Are we in NMI context?
+ */
+#define in_nmi() (preempt_count() & NMI_MASK)
+
#if defined(CONFIG_PREEMPT)
# define PREEMPT_INATOMIC_BASE kernel_locked()
# define PREEMPT_CHECK_OFFSET 1
@@ -105,7 +116,7 @@
# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
#endif
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) || defined(CONFIG_GENERIC_HARDIRQS)
extern void synchronize_irq(unsigned int irq);
#else
# define synchronize_irq(irq) barrier()
@@ -164,20 +175,24 @@ extern void irq_enter(void);
*/
extern void irq_exit(void);
-#define nmi_enter() \
- do { \
- ftrace_nmi_enter(); \
- lockdep_off(); \
- rcu_nmi_enter(); \
- __irq_enter(); \
+#define nmi_enter() \
+ do { \
+ ftrace_nmi_enter(); \
+ BUG_ON(in_nmi()); \
+ add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
+ lockdep_off(); \
+ rcu_nmi_enter(); \
+ trace_hardirq_enter(); \
} while (0)
-#define nmi_exit() \
- do { \
- __irq_exit(); \
- rcu_nmi_exit(); \
- lockdep_on(); \
- ftrace_nmi_exit(); \
+#define nmi_exit() \
+ do { \
+ trace_hardirq_exit(); \
+ rcu_nmi_exit(); \
+ lockdep_on(); \
+ BUG_ON(!in_nmi()); \
+ sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
+ ftrace_nmi_exit(); \
} while (0)
#endif /* LINUX_HARDIRQ_H */
diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h
index fd47a151665..6a6e701f163 100644
--- a/include/linux/hdlc.h
+++ b/include/linux/hdlc.h
@@ -38,6 +38,7 @@ struct hdlc_proto {
int (*ioctl)(struct net_device *dev, struct ifreq *ifr);
__be16 (*type_trans)(struct sk_buff *skb, struct net_device *dev);
int (*netif_rx)(struct sk_buff *skb);
+ int (*xmit)(struct sk_buff *skb, struct net_device *dev);
struct module *module;
struct hdlc_proto *next; /* next protocol in the list */
};
@@ -102,6 +103,10 @@ static __inline__ void debug_frame(const struct sk_buff *skb)
int hdlc_open(struct net_device *dev);
/* Must be called by hardware driver when HDLC device is being closed */
void hdlc_close(struct net_device *dev);
+/* May be used by hardware driver */
+int hdlc_change_mtu(struct net_device *dev, int new_mtu);
+/* Must be pointed to by hw driver's dev->netdev_ops->ndo_start_xmit */
+int hdlc_start_xmit(struct sk_buff *skb, struct net_device *dev);
int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto,
size_t size);
diff --git a/include/linux/hdlcdrv.h b/include/linux/hdlcdrv.h
index bf6302f6b5f..c010b4a785b 100644
--- a/include/linux/hdlcdrv.h
+++ b/include/linux/hdlcdrv.h
@@ -215,7 +215,7 @@ struct hdlcdrv_state {
struct hdlcdrv_hdlctx {
struct hdlcdrv_hdlcbuffer hbuf;
- long in_hdlc_tx;
+ unsigned long in_hdlc_tx;
/*
* 0 = send flags
* 1 = send txtail (flags)
@@ -241,7 +241,6 @@ struct hdlcdrv_state {
struct hdlcdrv_bitbuffer bitbuf_hdlc;
#endif /* HDLCDRV_DEBUG */
- struct net_device_stats stats;
int ptt_keyed;
/* queued skb for transmission */
diff --git a/include/linux/hdreg.h b/include/linux/hdreg.h
index ed21bd3dbd2..29ee2873f4a 100644
--- a/include/linux/hdreg.h
+++ b/include/linux/hdreg.h
@@ -1,68 +1,6 @@
#ifndef _LINUX_HDREG_H
#define _LINUX_HDREG_H
-#ifdef __KERNEL__
-#include <linux/ata.h>
-
-/*
- * This file contains some defines for the AT-hd-controller.
- * Various sources.
- */
-
-/* ide.c has its own port definitions in "ide.h" */
-
-#define HD_IRQ 14
-
-/* Hd controller regs. Ref: IBM AT Bios-listing */
-#define HD_DATA 0x1f0 /* _CTL when writing */
-#define HD_ERROR 0x1f1 /* see err-bits */
-#define HD_NSECTOR 0x1f2 /* nr of sectors to read/write */
-#define HD_SECTOR 0x1f3 /* starting sector */
-#define HD_LCYL 0x1f4 /* starting cylinder */
-#define HD_HCYL 0x1f5 /* high byte of starting cyl */
-#define HD_CURRENT 0x1f6 /* 101dhhhh , d=drive, hhhh=head */
-#define HD_STATUS 0x1f7 /* see status-bits */
-#define HD_FEATURE HD_ERROR /* same io address, read=error, write=feature */
-#define HD_PRECOMP HD_FEATURE /* obsolete use of this port - predates IDE */
-#define HD_COMMAND HD_STATUS /* same io address, read=status, write=cmd */
-
-#define HD_CMD 0x3f6 /* used for resets */
-#define HD_ALTSTATUS 0x3f6 /* same as HD_STATUS but doesn't clear irq */
-
-/* remainder is shared between hd.c, ide.c, ide-cd.c, and the hdparm utility */
-
-/* Bits of HD_STATUS */
-#define ERR_STAT 0x01
-#define INDEX_STAT 0x02
-#define ECC_STAT 0x04 /* Corrected error */
-#define DRQ_STAT 0x08
-#define SEEK_STAT 0x10
-#define SRV_STAT 0x10
-#define WRERR_STAT 0x20
-#define READY_STAT 0x40
-#define BUSY_STAT 0x80
-
-/* Bits for HD_ERROR */
-#define MARK_ERR 0x01 /* Bad address mark */
-#define ILI_ERR 0x01 /* Illegal Length Indication (ATAPI) */
-#define TRK0_ERR 0x02 /* couldn't find track 0 */
-#define EOM_ERR 0x02 /* End Of Media (ATAPI) */
-#define ABRT_ERR 0x04 /* Command aborted */
-#define MCR_ERR 0x08 /* media change request */
-#define ID_ERR 0x10 /* ID field not found */
-#define MC_ERR 0x20 /* media changed */
-#define ECC_ERR 0x40 /* Uncorrectable ECC error */
-#define BBD_ERR 0x80 /* pre-EIDE meaning: block marked bad */
-#define ICRC_ERR 0x80 /* new meaning: CRC error during transfer */
-#define LFS_ERR 0xf0 /* Last Failed Sense (ATAPI) */
-
-/* Bits of HD_NSECTOR */
-#define CD 0x01
-#define IO 0x02
-#define REL 0x04
-#define TAG_MASK 0xf8
-#endif /* __KERNEL__ */
-
#include <linux/types.h>
/*
@@ -191,6 +129,7 @@ typedef struct hd_drive_hob_hdr {
#define TASKFILE_INVALID 0x7fff
#endif
+#ifndef __KERNEL__
/* ATA/ATAPI Commands pre T13 Spec */
#define WIN_NOP 0x00
/*
@@ -379,6 +318,7 @@ typedef struct hd_drive_hob_hdr {
#define SECURITY_ERASE_UNIT 0xBD
#define SECURITY_FREEZE_LOCK 0xBE
#define SECURITY_DISABLE_PASSWORD 0xBF
+#endif /* __KERNEL__ */
struct hd_geometry {
unsigned char heads;
@@ -448,6 +388,7 @@ enum {
#define __NEW_HD_DRIVE_ID
+#ifndef __KERNEL__
/*
* Structure returned by HDIO_GET_IDENTITY, as per ANSI NCITS ATA6 rev.1b spec.
*
@@ -699,6 +640,7 @@ struct hd_driveid {
* 7:0 Signature
*/
};
+#endif /* __KERNEL__ */
/*
* IDE "nice" flags. These are used on a per drive basis to determine
diff --git a/include/linux/hid.h b/include/linux/hid.h
index fa8ee9cef7b..53489fd4d70 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -238,6 +238,42 @@ struct hid_item {
#define HID_GD_RIGHT 0x00010092
#define HID_GD_LEFT 0x00010093
+#define HID_DG_DIGITIZER 0x000d0001
+#define HID_DG_PEN 0x000d0002
+#define HID_DG_LIGHTPEN 0x000d0003
+#define HID_DG_TOUCHSCREEN 0x000d0004
+#define HID_DG_TOUCHPAD 0x000d0005
+#define HID_DG_STYLUS 0x000d0020
+#define HID_DG_PUCK 0x000d0021
+#define HID_DG_FINGER 0x000d0022
+#define HID_DG_TIPPRESSURE 0x000d0030
+#define HID_DG_BARRELPRESSURE 0x000d0031
+#define HID_DG_INRANGE 0x000d0032
+#define HID_DG_TOUCH 0x000d0033
+#define HID_DG_UNTOUCH 0x000d0034
+#define HID_DG_TAP 0x000d0035
+#define HID_DG_TABLETFUNCTIONKEY 0x000d0039
+#define HID_DG_PROGRAMCHANGEKEY 0x000d003a
+#define HID_DG_INVERT 0x000d003c
+#define HID_DG_TIPSWITCH 0x000d0042
+#define HID_DG_TIPSWITCH2 0x000d0043
+#define HID_DG_BARRELSWITCH 0x000d0044
+#define HID_DG_ERASER 0x000d0045
+#define HID_DG_TABLETPICK 0x000d0046
+/*
+ * as of May 20, 2009 the usages below are not yet in the official USB spec
+ * but are being pushed by Microsft as described in their paper "Digitizer
+ * Drivers for Windows Touch and Pen-Based Computers"
+ */
+#define HID_DG_CONFIDENCE 0x000d0047
+#define HID_DG_WIDTH 0x000d0048
+#define HID_DG_HEIGHT 0x000d0049
+#define HID_DG_CONTACTID 0x000d0051
+#define HID_DG_INPUTMODE 0x000d0052
+#define HID_DG_DEVICEINDEX 0x000d0053
+#define HID_DG_CONTACTCOUNT 0x000d0054
+#define HID_DG_CONTACTMAX 0x000d0055
+
/*
* HID report types --- Ouch! HID spec says 1 2 3!
*/
@@ -270,6 +306,7 @@ struct hid_item {
#define HID_QUIRK_INVERT 0x00000001
#define HID_QUIRK_NOTOUCH 0x00000002
+#define HID_QUIRK_IGNORE 0x00000004
#define HID_QUIRK_NOGET 0x00000008
#define HID_QUIRK_BADPAD 0x00000020
#define HID_QUIRK_MULTI_INPUT 0x00000040
@@ -603,12 +640,17 @@ struct hid_ll_driver {
int (*open)(struct hid_device *hdev);
void (*close)(struct hid_device *hdev);
+ int (*power)(struct hid_device *hdev, int level);
+
int (*hidinput_input_event) (struct input_dev *idev, unsigned int type,
unsigned int code, int value);
int (*parse)(struct hid_device *hdev);
};
+#define PM_HINT_FULLON 1<<5
+#define PM_HINT_NORMAL 1<<1
+
/* Applications from HID Usage Tables 4/8/99 Version 1.1 */
/* We ignore a few input applications that are not widely used */
#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001) || (a == 0x000d0002))
@@ -641,6 +683,7 @@ int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int
void hid_output_report(struct hid_report *report, __u8 *data);
struct hid_device *hid_allocate_device(void);
int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size);
+int hid_check_keys_pressed(struct hid_device *hid);
int hid_connect(struct hid_device *hid, unsigned int connect_mask);
/**
@@ -791,21 +834,5 @@ dbg_hid(const char *fmt, ...)
__FILE__ , ## arg)
#endif /* HID_FF */
-#ifdef __KERNEL__
-#ifdef CONFIG_HID_COMPAT
-#define HID_COMPAT_LOAD_DRIVER(name) \
-/* prototype to avoid sparse warning */ \
-extern void hid_compat_##name(void); \
-void hid_compat_##name(void) { } \
-EXPORT_SYMBOL(hid_compat_##name)
-#else
-#define HID_COMPAT_LOAD_DRIVER(name)
-#endif /* HID_COMPAT */
-#define HID_COMPAT_CALL_DRIVER(name) do { \
- extern void hid_compat_##name(void); \
- hid_compat_##name(); \
-} while (0)
-#endif /* __KERNEL__ */
-
#endif
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 13875ce9112..211ff449726 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -19,8 +19,21 @@ static inline void flush_kernel_dcache_page(struct page *page)
}
#endif
-#ifdef CONFIG_HIGHMEM
+#include <asm/kmap_types.h>
+
+#if defined(CONFIG_DEBUG_HIGHMEM) && defined(CONFIG_TRACE_IRQFLAGS_SUPPORT)
+
+void debug_kmap_atomic(enum km_type type);
+
+#else
+
+static inline void debug_kmap_atomic(enum km_type type)
+{
+}
+#endif
+
+#ifdef CONFIG_HIGHMEM
#include <asm/highmem.h>
/* declarations for linux/mm/highmem.c */
@@ -42,9 +55,9 @@ static inline void *kmap(struct page *page)
return page_address(page);
}
-#define kunmap(page) do { (void) (page); } while (0)
-
-#include <asm/kmap_types.h>
+static inline void kunmap(struct page *page)
+{
+}
static inline void *kmap_atomic(struct page *page, enum km_type idx)
{
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index bd37078c2d7..7400900de94 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -30,8 +30,11 @@ struct hrtimer_cpu_base;
* Mode arguments of xxx_hrtimer functions:
*/
enum hrtimer_mode {
- HRTIMER_MODE_ABS, /* Time value is absolute */
- HRTIMER_MODE_REL, /* Time value is relative to now */
+ HRTIMER_MODE_ABS = 0x0, /* Time value is absolute */
+ HRTIMER_MODE_REL = 0x1, /* Time value is relative to now */
+ HRTIMER_MODE_PINNED = 0x02, /* Timer is bound to CPU */
+ HRTIMER_MODE_ABS_PINNED = 0x02,
+ HRTIMER_MODE_REL_PINNED = 0x03,
};
/*
@@ -336,6 +339,11 @@ extern int hrtimer_start(struct hrtimer *timer, ktime_t tim,
const enum hrtimer_mode mode);
extern int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
unsigned long range_ns, const enum hrtimer_mode mode);
+extern int
+__hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+ unsigned long delta_ns,
+ const enum hrtimer_mode mode, int wakeup);
+
extern int hrtimer_cancel(struct hrtimer *timer);
extern int hrtimer_try_to_cancel(struct hrtimer *timer);
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 03be7f29ca0..a05a5ef3339 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -11,6 +11,8 @@
struct ctl_table;
+int PageHuge(struct page *page);
+
static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
{
return vma->vm_flags & VM_HUGETLB;
@@ -61,6 +63,11 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
#else /* !CONFIG_HUGETLB_PAGE */
+static inline int PageHuge(struct page *page)
+{
+ return 0;
+}
+
static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
{
return 0;
diff --git a/include/linux/i2c-algo-pca.h b/include/linux/i2c-algo-pca.h
index adcb3dc7ac2..1364d62e2fb 100644
--- a/include/linux/i2c-algo-pca.h
+++ b/include/linux/i2c-algo-pca.h
@@ -1,7 +1,14 @@
#ifndef _LINUX_I2C_ALGO_PCA_H
#define _LINUX_I2C_ALGO_PCA_H
-/* Clock speeds for the bus */
+/* Chips known to the pca algo */
+#define I2C_PCA_CHIP_9564 0x00
+#define I2C_PCA_CHIP_9665 0x01
+
+/* Internal period for PCA9665 oscilator */
+#define I2C_PCA_OSC_PER 3 /* e10-8s */
+
+/* Clock speeds for the bus for PCA9564*/
#define I2C_PCA_CON_330kHz 0x00
#define I2C_PCA_CON_288kHz 0x01
#define I2C_PCA_CON_217kHz 0x02
@@ -18,6 +25,26 @@
#define I2C_PCA_ADR 0x02 /* OWN ADR Read/Write */
#define I2C_PCA_CON 0x03 /* CONTROL Read/Write */
+/* PCA9665 registers */
+#define I2C_PCA_INDPTR 0x00 /* INDIRECT Pointer Write Only */
+#define I2C_PCA_IND 0x02 /* INDIRECT Read/Write */
+
+/* PCA9665 indirect registers */
+#define I2C_PCA_ICOUNT 0x00 /* Byte Count for buffered mode */
+#define I2C_PCA_IADR 0x01 /* OWN ADR */
+#define I2C_PCA_ISCLL 0x02 /* SCL LOW period */
+#define I2C_PCA_ISCLH 0x03 /* SCL HIGH period */
+#define I2C_PCA_ITO 0x04 /* TIMEOUT */
+#define I2C_PCA_IPRESET 0x05 /* Parallel bus reset */
+#define I2C_PCA_IMODE 0x06 /* I2C Bus mode */
+
+/* PCA9665 I2C bus mode */
+#define I2C_PCA_MODE_STD 0x00 /* Standard mode */
+#define I2C_PCA_MODE_FAST 0x01 /* Fast mode */
+#define I2C_PCA_MODE_FASTP 0x02 /* Fast Plus mode */
+#define I2C_PCA_MODE_TURBO 0x03 /* Turbo mode */
+
+
#define I2C_PCA_CON_AA 0x80 /* Assert Acknowledge */
#define I2C_PCA_CON_ENSIO 0x40 /* Enable */
#define I2C_PCA_CON_STA 0x20 /* Start */
@@ -31,7 +58,9 @@ struct i2c_algo_pca_data {
int (*read_byte) (void *data, int reg);
int (*wait_for_completion) (void *data);
void (*reset_chip) (void *data);
- /* i2c_clock values are defined in linux/i2c-algo-pca.h */
+ /* For PCA9564, use one of the predefined frequencies:
+ * 330000, 288000, 217000, 146000, 88000, 59000, 44000, 36000
+ * For PCA9665, use the frequency you want here. */
unsigned int i2c_clock;
};
diff --git a/include/linux/i2c-algo-sgi.h b/include/linux/i2c-algo-sgi.h
deleted file mode 100644
index 3b7715024e6..00000000000
--- a/include/linux/i2c-algo-sgi.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License version 2 as published by the Free Software Foundation.
- *
- * Copyright (C) 2003 Ladislav Michl <ladis@linux-mips.org>
- */
-
-#ifndef I2C_ALGO_SGI_H
-#define I2C_ALGO_SGI_H 1
-
-#include <linux/i2c.h>
-
-struct i2c_algo_sgi_data {
- void *data; /* private data for lowlevel routines */
- unsigned (*getctrl)(void *data);
- void (*setctrl)(void *data, unsigned val);
- unsigned (*rdata)(void *data);
- void (*wdata)(void *data, unsigned val);
-
- int xfer_timeout;
- int ack_timeout;
-};
-
-int i2c_sgi_add_bus(struct i2c_adapter *);
-
-#endif /* I2C_ALGO_SGI_H */
diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h
index 1ffc23bc5d1..c9087de5c6c 100644
--- a/include/linux/i2c-id.h
+++ b/include/linux/i2c-id.h
@@ -33,46 +33,10 @@
#define I2C_DRIVERID_MSP3400 1
#define I2C_DRIVERID_TUNER 2
-#define I2C_DRIVERID_TEA6420 5 /* audio matrix switch */
-#define I2C_DRIVERID_TEA6415C 6 /* video matrix switch */
-#define I2C_DRIVERID_TDA9840 7 /* stereo sound processor */
-#define I2C_DRIVERID_SAA7111A 8 /* video input processor */
-#define I2C_DRIVERID_SAA7185B 13 /* video encoder */
-#define I2C_DRIVERID_SAA7110 22 /* video decoder */
-#define I2C_DRIVERID_SAA5249 24 /* SAA5249 and compatibles */
#define I2C_DRIVERID_TDA7432 27 /* Stereo sound processor */
#define I2C_DRIVERID_TVAUDIO 29 /* Generic TV sound driver */
-#define I2C_DRIVERID_TDA9875 32 /* TV sound decoder chip */
-#define I2C_DRIVERID_BT819 40 /* video decoder */
-#define I2C_DRIVERID_BT856 41 /* video encoder */
-#define I2C_DRIVERID_VPX3220 42 /* video decoder+vbi/vtxt */
-#define I2C_DRIVERID_ADV7175 48 /* ADV 7175/7176 video encoder */
-#define I2C_DRIVERID_SAA7114 49 /* video decoder */
-#define I2C_DRIVERID_ADV7170 54 /* video encoder */
-#define I2C_DRIVERID_SAA7191 57 /* video decoder */
-#define I2C_DRIVERID_INDYCAM 58 /* SGI IndyCam */
-#define I2C_DRIVERID_OVCAMCHIP 61 /* OmniVision CMOS image sens. */
-#define I2C_DRIVERID_SAA6752HS 67 /* MPEG2 encoder */
-#define I2C_DRIVERID_TVEEPROM 68 /* TV EEPROM */
-#define I2C_DRIVERID_WM8775 69 /* wm8775 audio processor */
-#define I2C_DRIVERID_CS53L32A 70 /* cs53l32a audio processor */
-#define I2C_DRIVERID_CX25840 71 /* cx2584x video encoder */
-#define I2C_DRIVERID_SAA7127 72 /* saa7127 video encoder */
#define I2C_DRIVERID_SAA711X 73 /* saa711x video encoders */
#define I2C_DRIVERID_INFRARED 75 /* I2C InfraRed on Video boards */
-#define I2C_DRIVERID_TVP5150 76 /* TVP5150 video decoder */
-#define I2C_DRIVERID_WM8739 77 /* wm8739 audio processor */
-#define I2C_DRIVERID_UPD64083 78 /* upd64083 video processor */
-#define I2C_DRIVERID_UPD64031A 79 /* upd64031a video processor */
-#define I2C_DRIVERID_SAA717X 80 /* saa717x video encoder */
-#define I2C_DRIVERID_BT866 85 /* Conexant bt866 video encoder */
-#define I2C_DRIVERID_KS0127 86 /* Samsung ks0127 video decoder */
-#define I2C_DRIVERID_TLV320AIC23B 87 /* TI TLV320AIC23B audio codec */
-#define I2C_DRIVERID_VP27SMPX 93 /* Panasonic VP27s tuner internal MPX */
-#define I2C_DRIVERID_M52790 95 /* Mitsubishi M52790SP/FP AV switch */
-#define I2C_DRIVERID_CS5345 96 /* cs5345 audio processor */
-
-#define I2C_DRIVERID_OV7670 1048 /* Omnivision 7670 camera */
/*
* ---- Adapter types ----------------------------------------------------
@@ -87,6 +51,8 @@
#define I2C_HW_B_CX2341X 0x010020 /* Conexant CX2341X MPEG encoder cards */
#define I2C_HW_B_CX23885 0x010022 /* conexant 23885 based tv cards (bus1) */
#define I2C_HW_B_AU0828 0x010023 /* auvitek au0828 usb bridge */
+#define I2C_HW_B_CX231XX 0x010024 /* Conexant CX231XX USB based cards */
+#define I2C_HW_B_HDPVR 0x010025 /* Hauppauge HD PVR */
/* --- SGI adapters */
#define I2C_HW_SGI_VINO 0x160000
diff --git a/include/linux/i2c-ocores.h b/include/linux/i2c-ocores.h
index 8ed591b0887..4d5e57ff661 100644
--- a/include/linux/i2c-ocores.h
+++ b/include/linux/i2c-ocores.h
@@ -14,6 +14,8 @@
struct ocores_i2c_platform_data {
u32 regstep; /* distance between registers */
u32 clock_khz; /* input clock in kHz */
+ u8 num_devices; /* number of devices in the devices list */
+ struct i2c_board_info const *devices; /* devices connected to the bus */
};
#endif /* _LINUX_I2C_OCORES_H */
diff --git a/include/linux/i2c-pca-platform.h b/include/linux/i2c-pca-platform.h
index 3d191873f2d..aba33759dec 100644
--- a/include/linux/i2c-pca-platform.h
+++ b/include/linux/i2c-pca-platform.h
@@ -6,7 +6,7 @@ struct i2c_pca9564_pf_platform_data {
* not supplied (negative value), but it
* cannot exit some error conditions then */
int i2c_clock_speed; /* values are defined in linux/i2c-algo-pca.h */
- int timeout; /* timeout = this value * 10us */
+ int timeout; /* timeout in jiffies */
};
#endif /* I2C_PCA9564_PLATFORM_H */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index c86c3b07604..f4784c0fe97 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -47,6 +47,7 @@ struct i2c_driver;
union i2c_smbus_data;
struct i2c_board_info;
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
/*
* The master routines are the ones normally used to transmit data to devices
* on a bus (or read from them). Apart from two basic transfer functions to
@@ -93,6 +94,7 @@ extern s32 i2c_smbus_read_i2c_block_data(struct i2c_client *client,
extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client,
u8 command, u8 length,
const u8 *values);
+#endif /* I2C */
/**
* struct i2c_driver - represent an I2C device driver
@@ -100,9 +102,8 @@ extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client,
* @class: What kind of i2c device we instantiate (for detect)
* @attach_adapter: Callback for bus addition (for legacy drivers)
* @detach_adapter: Callback for bus removal (for legacy drivers)
- * @detach_client: Callback for device removal (for legacy drivers)
- * @probe: Callback for device binding (new-style drivers)
- * @remove: Callback for device unbinding (new-style drivers)
+ * @probe: Callback for device binding
+ * @remove: Callback for device unbinding
* @shutdown: Callback for device shutdown
* @suspend: Callback for device suspend
* @resume: Callback for device resume
@@ -137,26 +138,14 @@ struct i2c_driver {
int id;
unsigned int class;
- /* Notifies the driver that a new bus has appeared. This routine
- * can be used by the driver to test if the bus meets its conditions
- * & seek for the presence of the chip(s) it supports. If found, it
- * registers the client(s) that are on the bus to the i2c admin. via
- * i2c_attach_client. (LEGACY I2C DRIVERS ONLY)
+ /* Notifies the driver that a new bus has appeared or is about to be
+ * removed. You should avoid using this if you can, it will probably
+ * be removed in a near future.
*/
int (*attach_adapter)(struct i2c_adapter *);
int (*detach_adapter)(struct i2c_adapter *);
- /* tells the driver that a client is about to be deleted & gives it
- * the chance to remove its private data. Also, if the client struct
- * has been dynamically allocated by the driver in the function above,
- * it must be freed here. (LEGACY I2C DRIVERS ONLY)
- */
- int (*detach_client)(struct i2c_client *) __deprecated;
-
- /* Standard driver model interfaces, for "new style" i2c drivers.
- * With the driver model, device enumeration is NEVER done by drivers;
- * it's done by infrastructure. (NEW STYLE DRIVERS ONLY)
- */
+ /* Standard driver model interfaces */
int (*probe)(struct i2c_client *, const struct i2c_device_id *);
int (*remove)(struct i2c_client *);
@@ -191,9 +180,8 @@ struct i2c_driver {
* @driver: device's driver, hence pointer to access routines
* @dev: Driver model device node for the slave.
* @irq: indicates the IRQ generated by this device (if any)
- * @list: list of active/busy clients (DEPRECATED)
- * @detected: member of an i2c_driver.clients list
- * @released: used to synchronize client releases & detaches and references
+ * @detected: member of an i2c_driver.clients list or i2c-core's
+ * userspace_devices list
*
* An i2c_client identifies a single device (i.e. chip) connected to an
* i2c bus. The behaviour exposed to Linux is defined by the driver
@@ -209,9 +197,7 @@ struct i2c_client {
struct i2c_driver *driver; /* and our access routines */
struct device dev; /* the device structure */
int irq; /* irq issued by device */
- struct list_head list; /* DEPRECATED */
struct list_head detected;
- struct completion released;
};
#define to_i2c_client(d) container_of(d, struct i2c_client, dev)
@@ -248,11 +234,10 @@ static inline void i2c_set_clientdata(struct i2c_client *dev, void *data)
* that, such as chip type, configuration, associated IRQ, and so on.
*
* i2c_board_info is used to build tables of information listing I2C devices
- * that are present. This information is used to grow the driver model tree
- * for "new style" I2C drivers. For mainboards this is done statically using
- * i2c_register_board_info(); bus numbers identify adapters that aren't
- * yet available. For add-on boards, i2c_new_device() does this dynamically
- * with the adapter already known.
+ * that are present. This information is used to grow the driver model tree.
+ * For mainboards this is done statically using i2c_register_board_info();
+ * bus numbers identify adapters that aren't yet available. For add-on boards,
+ * i2c_new_device() does this dynamically with the adapter already known.
*/
struct i2c_board_info {
char type[I2C_NAME_SIZE];
@@ -274,9 +259,10 @@ struct i2c_board_info {
* are provided using conventional syntax.
*/
#define I2C_BOARD_INFO(dev_type, dev_addr) \
- .type = (dev_type), .addr = (dev_addr)
+ .type = dev_type, .addr = (dev_addr)
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
/* Add-on boards should register/unregister their devices; e.g. a board
* with integrated I2C, a config eeprom, sensors, and a codec that's
* used in conjunction with the primary hardware.
@@ -300,6 +286,7 @@ extern struct i2c_client *
i2c_new_dummy(struct i2c_adapter *adap, u16 address);
extern void i2c_unregister_device(struct i2c_client *);
+#endif /* I2C */
/* Mainboard arch_initcall() code should register all its I2C devices.
* This is done at arch_initcall time, before declaring any i2c adapters.
@@ -316,7 +303,7 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info,
{
return 0;
}
-#endif
+#endif /* I2C_BOARDINFO */
/*
* The following structs are for those who like to implement new bus drivers:
@@ -352,21 +339,15 @@ struct i2c_adapter {
const struct i2c_algorithm *algo; /* the algorithm to access the bus */
void *algo_data;
- /* --- administration stuff. */
- int (*client_register)(struct i2c_client *);
- int (*client_unregister)(struct i2c_client *);
-
/* data fields that are valid for all devices */
u8 level; /* nesting level for lockdep */
struct mutex bus_lock;
- struct mutex clist_lock;
int timeout; /* in jiffies */
int retries;
struct device dev; /* the adapter device */
int nr;
- struct list_head clients; /* DEPRECATED */
char name[48];
struct completion dev_released;
};
@@ -412,11 +393,16 @@ struct i2c_client_address_data {
/* The numbers to use to set I2C bus address */
#define ANY_I2C_BUS 0xffff
+/* Construct an I2C_CLIENT_END-terminated array of i2c addresses */
+#define I2C_ADDRS(addr, addrs...) \
+ ((const unsigned short []){ addr, ## addrs, I2C_CLIENT_END })
+
/* ----- functions exported by i2c.o */
/* administration...
*/
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
extern int i2c_add_adapter(struct i2c_adapter *);
extern int i2c_del_adapter(struct i2c_adapter *);
extern int i2c_add_numbered_adapter(struct i2c_adapter *);
@@ -429,11 +415,6 @@ static inline int i2c_add_driver(struct i2c_driver *driver)
return i2c_register_driver(THIS_MODULE, driver);
}
-/* These are deprecated, your driver should use the standard .probe()
- * and .remove() methods instead. */
-extern int __deprecated i2c_attach_client(struct i2c_client *);
-extern int __deprecated i2c_detach_client(struct i2c_client *);
-
extern struct i2c_client *i2c_use_client(struct i2c_client *client);
extern void i2c_release_client(struct i2c_client *client);
@@ -442,14 +423,6 @@ extern void i2c_release_client(struct i2c_client *client);
extern void i2c_clients_command(struct i2c_adapter *adap,
unsigned int cmd, void *arg);
-/* Detect function. It iterates over all possible addresses itself.
- * It will only call found_proc if some client is connected at the
- * specific address (unless a 'force' matched);
- */
-extern int i2c_probe(struct i2c_adapter *adapter,
- const struct i2c_client_address_data *address_data,
- int (*found_proc) (struct i2c_adapter *, int, int));
-
extern struct i2c_adapter *i2c_get_adapter(int id);
extern void i2c_put_adapter(struct i2c_adapter *adap);
@@ -471,6 +444,7 @@ static inline int i2c_adapter_id(struct i2c_adapter *adap)
{
return adap->nr;
}
+#endif /* I2C */
#endif /* __KERNEL__ */
/**
diff --git a/include/linux/i2c/at24.h b/include/linux/i2c/at24.h
index f6edd522a92..8ace93024d6 100644
--- a/include/linux/i2c/at24.h
+++ b/include/linux/i2c/at24.h
@@ -2,6 +2,7 @@
#define _LINUX_AT24_H
#include <linux/types.h>
+#include <linux/memory.h>
/*
* As seen through Linux I2C, differences between the most common types of I2C
@@ -23,6 +24,9 @@ struct at24_platform_data {
#define AT24_FLAG_READONLY 0x40 /* sysfs-entry will be read-only */
#define AT24_FLAG_IRUGO 0x20 /* sysfs-entry will be world-readable */
#define AT24_FLAG_TAKE8ADDR 0x10 /* take always 8 addresses (24c00) */
+
+ void (*setup)(struct memory_accessor *, void *context);
+ void *context;
};
#endif /* _LINUX_AT24_H */
diff --git a/include/linux/i2c/lm8323.h b/include/linux/i2c/lm8323.h
new file mode 100644
index 00000000000..478d668bc59
--- /dev/null
+++ b/include/linux/i2c/lm8323.h
@@ -0,0 +1,46 @@
+/*
+ * lm8323.h - Configuration for LM8323 keypad driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation (version 2 of the License only).
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __LINUX_LM8323_H
+#define __LINUX_LM8323_H
+
+#include <linux/types.h>
+
+/*
+ * Largest keycode that the chip can send, plus one,
+ * so keys can be mapped directly at the index of the
+ * LM8323 keycode instead of subtracting one.
+ */
+#define LM8323_KEYMAP_SIZE (0x7f + 1)
+
+#define LM8323_NUM_PWMS 3
+
+struct lm8323_platform_data {
+ int debounce_time; /* Time to watch for key bouncing, in ms. */
+ int active_time; /* Idle time until sleep, in ms. */
+
+ int size_x;
+ int size_y;
+ bool repeat;
+ const unsigned short *keymap;
+
+ const char *pwm_names[LM8323_NUM_PWMS];
+
+ const char *name; /* Device name. */
+};
+
+#endif /* __LINUX_LM8323_H */
diff --git a/include/linux/i2c/pca953x.h b/include/linux/i2c/pca953x.h
index 3c7361217df..81736d6a8db 100644
--- a/include/linux/i2c/pca953x.h
+++ b/include/linux/i2c/pca953x.h
@@ -15,4 +15,5 @@ struct pca953x_platform_data {
int (*teardown)(struct i2c_client *client,
unsigned gpio, unsigned ngpio,
void *context);
+ char **names;
};
diff --git a/include/linux/i2c/s6000.h b/include/linux/i2c/s6000.h
new file mode 100644
index 00000000000..d9b34bfdae7
--- /dev/null
+++ b/include/linux/i2c/s6000.h
@@ -0,0 +1,10 @@
+#ifndef __LINUX_I2C_S6000_H
+#define __LINUX_I2C_S6000_H
+
+struct s6_i2c_platform_data {
+ const char *clock; /* the clock to use */
+ int bus_num; /* the bus number to register */
+};
+
+#endif
+
diff --git a/include/linux/i2c/twl4030.h b/include/linux/i2c/twl4030.h
index 8137f660a5c..0dc80ef2497 100644
--- a/include/linux/i2c/twl4030.h
+++ b/include/linux/i2c/twl4030.h
@@ -218,6 +218,53 @@ int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
/*----------------------------------------------------------------------*/
+/* Power bus message definitions */
+
+#define DEV_GRP_NULL 0x0
+#define DEV_GRP_P1 0x1
+#define DEV_GRP_P2 0x2
+#define DEV_GRP_P3 0x4
+
+#define RES_GRP_RES 0x0
+#define RES_GRP_PP 0x1
+#define RES_GRP_RC 0x2
+#define RES_GRP_PP_RC 0x3
+#define RES_GRP_PR 0x4
+#define RES_GRP_PP_PR 0x5
+#define RES_GRP_RC_PR 0x6
+#define RES_GRP_ALL 0x7
+
+#define RES_TYPE2_R0 0x0
+
+#define RES_TYPE_ALL 0x7
+
+#define RES_STATE_WRST 0xF
+#define RES_STATE_ACTIVE 0xE
+#define RES_STATE_SLEEP 0x8
+#define RES_STATE_OFF 0x0
+
+/*
+ * Power Bus Message Format ... these can be sent individually by Linux,
+ * but are usually part of downloaded scripts that are run when various
+ * power events are triggered.
+ *
+ * Broadcast Message (16 Bits):
+ * DEV_GRP[15:13] MT[12] RES_GRP[11:9] RES_TYPE2[8:7] RES_TYPE[6:4]
+ * RES_STATE[3:0]
+ *
+ * Singular Message (16 Bits):
+ * DEV_GRP[15:13] MT[12] RES_ID[11:4] RES_STATE[3:0]
+ */
+
+#define MSG_BROADCAST(devgrp, grp, type, type2, state) \
+ ( (devgrp) << 13 | 1 << 12 | (grp) << 9 | (type2) << 7 \
+ | (type) << 4 | (state))
+
+#define MSG_SINGULAR(devgrp, id, state) \
+ ((devgrp) << 13 | 0 << 12 | (id) << 4 | (state))
+
+/*----------------------------------------------------------------------*/
+
struct twl4030_bci_platform_data {
int *battery_tmp_tbl;
unsigned int tblsize;
diff --git a/include/linux/i7300_idle.h b/include/linux/i7300_idle.h
index 05a80c44513..1587b7dec50 100644
--- a/include/linux/i7300_idle.h
+++ b/include/linux/i7300_idle.h
@@ -16,35 +16,33 @@
struct fbd_ioat {
unsigned int vendor;
unsigned int ioat_dev;
+ unsigned int enabled;
};
/*
* The i5000 chip-set has the same hooks as the i7300
- * but support is disabled by default because this driver
- * has not been validated on that platform.
+ * but it is not enabled by default and must be manually
+ * manually enabled with "forceload=1" because it is
+ * only lightly validated.
*/
-#define SUPPORT_I5000 0
static const struct fbd_ioat fbd_ioat_list[] = {
- {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB},
-#if SUPPORT_I5000
- {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT},
-#endif
+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB, 1},
+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT, 0},
{0, 0}
};
/* table of devices that work with this driver */
static const struct pci_device_id pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_FBD_CNB) },
-#if SUPPORT_I5000
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5000_ERR) },
-#endif
{ } /* Terminating entry */
};
/* Check for known platforms with I/O-AT */
static inline int i7300_idle_platform_probe(struct pci_dev **fbd_dev,
- struct pci_dev **ioat_dev)
+ struct pci_dev **ioat_dev,
+ int enable_all)
{
int i;
struct pci_dev *memdev, *dmadev;
@@ -69,6 +67,8 @@ static inline int i7300_idle_platform_probe(struct pci_dev **fbd_dev,
for (i = 0; fbd_ioat_list[i].vendor != 0; i++) {
if (dmadev->vendor == fbd_ioat_list[i].vendor &&
dmadev->device == fbd_ioat_list[i].ioat_dev) {
+ if (!(fbd_ioat_list[i].enabled || enable_all))
+ continue;
if (fbd_dev)
*fbd_dev = memdev;
if (ioat_dev)
diff --git a/include/linux/ibmtr.h b/include/linux/ibmtr.h
index 1c7a0dd5536..06695b74d40 100644
--- a/include/linux/ibmtr.h
+++ b/include/linux/ibmtr.h
@@ -207,7 +207,7 @@ struct tok_info {
unsigned short exsap_station_id;
unsigned short global_int_enable;
struct sk_buff *current_skb;
- struct net_device_stats tr_stats;
+
unsigned char auto_speedsave;
open_state open_status, sap_status;
enum {MANUAL, AUTOMATIC} open_mode;
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 25087aead65..95c6e00a72e 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -26,7 +26,10 @@
#include <asm/io.h>
#include <asm/mutex.h>
-#if defined(CONFIG_CRIS) || defined(CONFIG_FRV)
+/* for request_sense */
+#include <linux/cdrom.h>
+
+#if defined(CONFIG_CRIS) || defined(CONFIG_FRV) || defined(CONFIG_MN10300)
# define SUPPORT_VLB_SYNC 0
#else
# define SUPPORT_VLB_SYNC 1
@@ -40,6 +43,13 @@
#define ERROR_RESET 3 /* Reset controller every 4th retry */
#define ERROR_RECAL 1 /* Recalibrate every 2nd retry */
+/* Error codes returned in rq->errors to the higher part of the driver. */
+enum {
+ IDE_DRV_ERROR_GENERAL = 101,
+ IDE_DRV_ERROR_FILEMARK = 102,
+ IDE_DRV_ERROR_EOD = 103,
+};
+
/*
* Definitions for accessing IDE controller registers
*/
@@ -147,12 +157,6 @@ enum {
#define REQ_UNPARK_HEADS 0x23
/*
- * Check for an interrupt and acknowledge the interrupt status
- */
-struct hwif_s;
-typedef int (ide_ack_intr_t)(struct hwif_s *);
-
-/*
* hwif_chipset_t is used to keep track of the specific hardware
* chipset used by each IDE interface, if known.
*/
@@ -168,20 +172,18 @@ typedef u8 hwif_chipset_t;
/*
* Structure to hold all information about the location of this port
*/
-typedef struct hw_regs_s {
+struct ide_hw {
union {
struct ide_io_ports io_ports;
unsigned long io_ports_array[IDE_NR_PORTS];
};
int irq; /* our irq number */
- ide_ack_intr_t *ack_intr; /* acknowledge interrupt */
- hwif_chipset_t chipset;
struct device *dev, *parent;
unsigned long config;
-} hw_regs_t;
+};
-static inline void ide_std_init_ports(hw_regs_t *hw,
+static inline void ide_std_init_ports(struct ide_hw *hw,
unsigned long io_addr,
unsigned long ctl_addr)
{
@@ -193,42 +195,8 @@ static inline void ide_std_init_ports(hw_regs_t *hw,
hw->io_ports.ctl_addr = ctl_addr;
}
-/* for IDE PCI controllers in legacy mode, temporary */
-static inline int __ide_default_irq(unsigned long base)
-{
- switch (base) {
-#ifdef CONFIG_IA64
- case 0x1f0: return isa_irq_to_vector(14);
- case 0x170: return isa_irq_to_vector(15);
-#else
- case 0x1f0: return 14;
- case 0x170: return 15;
-#endif
- }
- return 0;
-}
-
-#if defined(CONFIG_ARM) || defined(CONFIG_FRV) || defined(CONFIG_M68K) || \
- defined(CONFIG_MIPS) || defined(CONFIG_MN10300) || defined(CONFIG_PARISC) \
- || defined(CONFIG_PPC) || defined(CONFIG_SPARC) || defined(CONFIG_SPARC64)
-#include <asm/ide.h>
-#else
-#include <asm-generic/ide_iops.h>
-#endif
-
#define MAX_HWIFS 10
-/* Currently only m68k, apus and m8xx need it */
-#ifndef IDE_ARCH_ACK_INTR
-# define ide_ack_intr(hwif) (1)
-#endif
-
-/* Currently only Atari needs it */
-#ifndef IDE_ARCH_LOCK
-# define ide_release_lock() do {} while (0)
-# define ide_get_lock(hdlr, data) do {} while (0)
-#endif /* IDE_ARCH_LOCK */
-
/*
* Now for the data we need to maintain per-drive: ide_drive_t
*/
@@ -242,21 +210,12 @@ static inline int __ide_default_irq(unsigned long base)
/*
* Special Driver Flags
- *
- * set_geometry : respecify drive geometry
- * recalibrate : seek to cyl 0
- * set_multmode : set multmode count
- * reserved : unused
*/
-typedef union {
- unsigned all : 8;
- struct {
- unsigned set_geometry : 1;
- unsigned recalibrate : 1;
- unsigned set_multmode : 1;
- unsigned reserved : 5;
- } b;
-} special_t;
+enum {
+ IDE_SFLAG_SET_GEOMETRY = (1 << 0),
+ IDE_SFLAG_RECALIBRATE = (1 << 1),
+ IDE_SFLAG_SET_MULTMODE = (1 << 2),
+};
/*
* Status returned from various ide_ functions
@@ -267,108 +226,91 @@ typedef enum {
} ide_startstop_t;
enum {
+ IDE_VALID_ERROR = (1 << 1),
+ IDE_VALID_FEATURE = IDE_VALID_ERROR,
+ IDE_VALID_NSECT = (1 << 2),
+ IDE_VALID_LBAL = (1 << 3),
+ IDE_VALID_LBAM = (1 << 4),
+ IDE_VALID_LBAH = (1 << 5),
+ IDE_VALID_DEVICE = (1 << 6),
+ IDE_VALID_LBA = IDE_VALID_LBAL |
+ IDE_VALID_LBAM |
+ IDE_VALID_LBAH,
+ IDE_VALID_OUT_TF = IDE_VALID_FEATURE |
+ IDE_VALID_NSECT |
+ IDE_VALID_LBA,
+ IDE_VALID_IN_TF = IDE_VALID_NSECT |
+ IDE_VALID_LBA,
+ IDE_VALID_OUT_HOB = IDE_VALID_OUT_TF,
+ IDE_VALID_IN_HOB = IDE_VALID_ERROR |
+ IDE_VALID_NSECT |
+ IDE_VALID_LBA,
+};
+
+enum {
IDE_TFLAG_LBA48 = (1 << 0),
- IDE_TFLAG_FLAGGED = (1 << 2),
- IDE_TFLAG_OUT_DATA = (1 << 3),
- IDE_TFLAG_OUT_HOB_FEATURE = (1 << 4),
- IDE_TFLAG_OUT_HOB_NSECT = (1 << 5),
- IDE_TFLAG_OUT_HOB_LBAL = (1 << 6),
- IDE_TFLAG_OUT_HOB_LBAM = (1 << 7),
- IDE_TFLAG_OUT_HOB_LBAH = (1 << 8),
- IDE_TFLAG_OUT_HOB = IDE_TFLAG_OUT_HOB_FEATURE |
- IDE_TFLAG_OUT_HOB_NSECT |
- IDE_TFLAG_OUT_HOB_LBAL |
- IDE_TFLAG_OUT_HOB_LBAM |
- IDE_TFLAG_OUT_HOB_LBAH,
- IDE_TFLAG_OUT_FEATURE = (1 << 9),
- IDE_TFLAG_OUT_NSECT = (1 << 10),
- IDE_TFLAG_OUT_LBAL = (1 << 11),
- IDE_TFLAG_OUT_LBAM = (1 << 12),
- IDE_TFLAG_OUT_LBAH = (1 << 13),
- IDE_TFLAG_OUT_TF = IDE_TFLAG_OUT_FEATURE |
- IDE_TFLAG_OUT_NSECT |
- IDE_TFLAG_OUT_LBAL |
- IDE_TFLAG_OUT_LBAM |
- IDE_TFLAG_OUT_LBAH,
- IDE_TFLAG_OUT_DEVICE = (1 << 14),
- IDE_TFLAG_WRITE = (1 << 15),
- IDE_TFLAG_FLAGGED_SET_IN_FLAGS = (1 << 16),
- IDE_TFLAG_IN_DATA = (1 << 17),
- IDE_TFLAG_CUSTOM_HANDLER = (1 << 18),
- IDE_TFLAG_DMA_PIO_FALLBACK = (1 << 19),
- IDE_TFLAG_IN_HOB_FEATURE = (1 << 20),
- IDE_TFLAG_IN_HOB_NSECT = (1 << 21),
- IDE_TFLAG_IN_HOB_LBAL = (1 << 22),
- IDE_TFLAG_IN_HOB_LBAM = (1 << 23),
- IDE_TFLAG_IN_HOB_LBAH = (1 << 24),
- IDE_TFLAG_IN_HOB_LBA = IDE_TFLAG_IN_HOB_LBAL |
- IDE_TFLAG_IN_HOB_LBAM |
- IDE_TFLAG_IN_HOB_LBAH,
- IDE_TFLAG_IN_HOB = IDE_TFLAG_IN_HOB_FEATURE |
- IDE_TFLAG_IN_HOB_NSECT |
- IDE_TFLAG_IN_HOB_LBA,
- IDE_TFLAG_IN_FEATURE = (1 << 1),
- IDE_TFLAG_IN_NSECT = (1 << 25),
- IDE_TFLAG_IN_LBAL = (1 << 26),
- IDE_TFLAG_IN_LBAM = (1 << 27),
- IDE_TFLAG_IN_LBAH = (1 << 28),
- IDE_TFLAG_IN_LBA = IDE_TFLAG_IN_LBAL |
- IDE_TFLAG_IN_LBAM |
- IDE_TFLAG_IN_LBAH,
- IDE_TFLAG_IN_TF = IDE_TFLAG_IN_NSECT |
- IDE_TFLAG_IN_LBA,
- IDE_TFLAG_IN_DEVICE = (1 << 29),
- IDE_TFLAG_HOB = IDE_TFLAG_OUT_HOB |
- IDE_TFLAG_IN_HOB,
- IDE_TFLAG_TF = IDE_TFLAG_OUT_TF |
- IDE_TFLAG_IN_TF,
- IDE_TFLAG_DEVICE = IDE_TFLAG_OUT_DEVICE |
- IDE_TFLAG_IN_DEVICE,
+ IDE_TFLAG_WRITE = (1 << 1),
+ IDE_TFLAG_CUSTOM_HANDLER = (1 << 2),
+ IDE_TFLAG_DMA_PIO_FALLBACK = (1 << 3),
/* force 16-bit I/O operations */
- IDE_TFLAG_IO_16BIT = (1 << 30),
- /* ide_task_t was allocated using kmalloc() */
- IDE_TFLAG_DYN = (1 << 31),
+ IDE_TFLAG_IO_16BIT = (1 << 4),
+ /* struct ide_cmd was allocated using kmalloc() */
+ IDE_TFLAG_DYN = (1 << 5),
+ IDE_TFLAG_FS = (1 << 6),
+ IDE_TFLAG_MULTI_PIO = (1 << 7),
};
-struct ide_taskfile {
- u8 hob_data; /* 0: high data byte (for TASKFILE IOCTL) */
+enum {
+ IDE_FTFLAG_FLAGGED = (1 << 0),
+ IDE_FTFLAG_SET_IN_FLAGS = (1 << 1),
+ IDE_FTFLAG_OUT_DATA = (1 << 2),
+ IDE_FTFLAG_IN_DATA = (1 << 3),
+};
- u8 hob_feature; /* 1-5: additional data to support LBA48 */
- u8 hob_nsect;
- u8 hob_lbal;
- u8 hob_lbam;
- u8 hob_lbah;
+struct ide_taskfile {
+ u8 data; /* 0: data byte (for TASKFILE ioctl) */
+ union { /* 1: */
+ u8 error; /* read: error */
+ u8 feature; /* write: feature */
+ };
+ u8 nsect; /* 2: number of sectors */
+ u8 lbal; /* 3: LBA low */
+ u8 lbam; /* 4: LBA mid */
+ u8 lbah; /* 5: LBA high */
+ u8 device; /* 6: device select */
+ union { /* 7: */
+ u8 status; /* read: status */
+ u8 command; /* write: command */
+ };
+};
- u8 data; /* 6: low data byte (for TASKFILE IOCTL) */
+struct ide_cmd {
+ struct ide_taskfile tf;
+ struct ide_taskfile hob;
+ struct {
+ struct {
+ u8 tf;
+ u8 hob;
+ } out, in;
+ } valid;
- union { /*  7: */
- u8 error; /* read: error */
- u8 feature; /* write: feature */
- };
+ u8 tf_flags;
+ u8 ftf_flags; /* for TASKFILE ioctl */
+ int protocol;
- u8 nsect; /* 8: number of sectors */
- u8 lbal; /* 9: LBA low */
- u8 lbam; /* 10: LBA mid */
- u8 lbah; /* 11: LBA high */
+ int sg_nents; /* number of sg entries */
+ int orig_sg_nents;
+ int sg_dma_direction; /* DMA transfer direction */
- u8 device; /* 12: device select */
+ unsigned int nbytes;
+ unsigned int nleft;
+ unsigned int last_xfer_len;
- union { /* 13: */
- u8 status; /*  read: status  */
- u8 command; /* write: command */
- };
-};
+ struct scatterlist *cursg;
+ unsigned int cursg_ofs;
-typedef struct ide_task_s {
- union {
- struct ide_taskfile tf;
- u8 tf_array[14];
- };
- u32 tf_flags;
- int data_phase;
struct request *rq; /* copy of request */
- void *special; /* valid_t generally */
-} ide_task_t;
+};
/* ATAPI packet command flags */
enum {
@@ -380,15 +322,8 @@ enum {
PC_FLAG_DMA_IN_PROGRESS = (1 << 4),
PC_FLAG_DMA_ERROR = (1 << 5),
PC_FLAG_WRITING = (1 << 6),
- /* command timed out */
- PC_FLAG_TIMEDOUT = (1 << 7),
};
-/*
- * With each packet command, we allocate a buffer of IDE_PC_BUFFER_SIZE bytes.
- * This is used for several packet commands (not for READ/WRITE commands).
- */
-#define IDE_PC_BUFFER_SIZE 256
#define ATAPI_WAIT_PC (60 * HZ)
struct ide_atapi_pc {
@@ -400,16 +335,6 @@ struct ide_atapi_pc {
/* bytes to transfer */
int req_xfer;
- /* bytes actually transferred */
- int xferred;
-
- /* data buffer */
- u8 *buf;
- /* current buffer position */
- u8 *cur_pos;
- int buf_size;
- /* missing/available data on the current buffer */
- int b_count;
/* the corresponding request */
struct request *rq;
@@ -420,15 +345,6 @@ struct ide_atapi_pc {
* those are more or less driver-specific and some of them are subject
* to change/removal later.
*/
- u8 pc_buf[IDE_PC_BUFFER_SIZE];
-
- /* idetape only */
- struct idetape_bh *bh;
- char *b_data;
-
- struct scatterlist *sg;
- unsigned int sg_cnt;
-
unsigned long timeout;
};
@@ -445,6 +361,7 @@ struct ide_drive_s;
struct ide_disk_ops {
int (*check)(struct ide_drive_s *, const char *);
int (*get_capacity)(struct ide_drive_s *);
+ u64 (*set_capacity)(struct ide_drive_s *, u64);
void (*setup)(struct ide_drive_s *);
void (*flush)(struct ide_drive_s *);
int (*init_media)(struct ide_drive_s *, struct gendisk *);
@@ -452,7 +369,6 @@ struct ide_disk_ops {
int);
ide_startstop_t (*do_request)(struct ide_drive_s *, struct request *,
sector_t);
- int (*end_request)(struct ide_drive_s *, int, int);
int (*ioctl)(struct ide_drive_s *, struct block_device *,
fmode_t, unsigned int, unsigned long);
};
@@ -470,11 +386,6 @@ enum {
IDE_AFLAG_TOCADDR_AS_BCD = (1 << 3),
/* TOC track numbers are in BCD. */
IDE_AFLAG_TOCTRACKS_AS_BCD = (1 << 4),
- /*
- * Drive does not provide data in multiples of SECTOR_SIZE
- * when more than one interrupt is needed.
- */
- IDE_AFLAG_LIMIT_NFRAMES = (1 << 5),
/* Saved TOC information is current. */
IDE_AFLAG_TOC_VALID = (1 << 6),
/* We think that the drive door is locked. */
@@ -528,8 +439,8 @@ enum {
IDE_DFLAG_NICE1 = (1 << 5),
/* device is physically present */
IDE_DFLAG_PRESENT = (1 << 6),
- /* device ejected hint */
- IDE_DFLAG_DEAD = (1 << 7),
+ /* disable Host Protected Area */
+ IDE_DFLAG_NOHPA = (1 << 7),
/* id read from device (synthetic if not set) */
IDE_DFLAG_ID_READ = (1 << 8),
IDE_DFLAG_NOPROBE = (1 << 9),
@@ -568,6 +479,7 @@ enum {
/* write protect */
IDE_DFLAG_WP = (1 << 29),
IDE_DFLAG_FORMAT_IN_PROGRESS = (1 << 30),
+ IDE_DFLAG_NIEN_QUIRK = (1 << 31),
};
struct ide_drive_s {
@@ -592,14 +504,13 @@ struct ide_drive_s {
unsigned long sleep; /* sleep until this time */
unsigned long timeout; /* max time to wait for irq */
- special_t special; /* special action flags */
+ u8 special_flags; /* special action flags */
u8 select; /* basic drive/head select reg value */
u8 retry_pio; /* retrying dma capable host in pio */
u8 waiting_for_dma; /* dma currently in progress */
u8 dma; /* atapi dma flag */
- u8 quirk_list; /* considered quirky, set for a specific host */
u8 init_speed; /* transfer rate set at boot */
u8 current_speed; /* current transfer rate set */
u8 desired_speed; /* desired transfer rate set */
@@ -621,11 +532,10 @@ struct ide_drive_s {
unsigned int bios_cyl; /* BIOS/fdisk/LILO number of cyls */
unsigned int cyl; /* "real" number of cyls */
- unsigned int drive_data; /* used by set_pio_mode/selectproc */
+ void *drive_data; /* used by set_pio_mode/dev_select() */
unsigned int failures; /* current failure count */
unsigned int max_failures; /* maximum allowed failure count */
- u64 probed_capacity;/* initial reported media capacity (ide-cd only currently) */
-
+ u64 probed_capacity;/* initial/native media capacity */
u64 capacity64; /* total number of sectors */
int lun; /* logical unit */
@@ -643,19 +553,22 @@ struct ide_drive_s {
/* current packet command */
struct ide_atapi_pc *pc;
- /* callback for packet commands */
- void (*pc_callback)(struct ide_drive_s *, int);
+ /* last failed packet command */
+ struct ide_atapi_pc *failed_pc;
- void (*pc_update_buffers)(struct ide_drive_s *, struct ide_atapi_pc *);
- int (*pc_io_buffers)(struct ide_drive_s *, struct ide_atapi_pc *,
- unsigned int, int);
+ /* callback for packet commands */
+ int (*pc_callback)(struct ide_drive_s *, int);
ide_startstop_t (*irq_handler)(struct ide_drive_s *);
unsigned long atapi_flags;
struct ide_atapi_pc request_sense_pc;
- struct request request_sense_rq;
+
+ /* current sense rq and buffer */
+ bool sense_rq_armed;
+ struct request sense_rq;
+ struct request_sense sense_data;
};
typedef struct ide_drive_s ide_drive_t;
@@ -674,16 +587,16 @@ struct ide_tp_ops {
void (*exec_command)(struct hwif_s *, u8);
u8 (*read_status)(struct hwif_s *);
u8 (*read_altstatus)(struct hwif_s *);
+ void (*write_devctl)(struct hwif_s *, u8);
- void (*set_irq)(struct hwif_s *, int);
-
- void (*tf_load)(ide_drive_t *, struct ide_task_s *);
- void (*tf_read)(ide_drive_t *, struct ide_task_s *);
+ void (*dev_select)(ide_drive_t *);
+ void (*tf_load)(ide_drive_t *, struct ide_taskfile *, u8);
+ void (*tf_read)(ide_drive_t *, struct ide_taskfile *, u8);
- void (*input_data)(ide_drive_t *, struct request *, void *,
- unsigned int);
- void (*output_data)(ide_drive_t *, struct request *, void *,
- unsigned int);
+ void (*input_data)(ide_drive_t *, struct ide_cmd *,
+ void *, unsigned int);
+ void (*output_data)(ide_drive_t *, struct ide_cmd *,
+ void *, unsigned int);
};
extern const struct ide_tp_ops default_tp_ops;
@@ -694,7 +607,6 @@ extern const struct ide_tp_ops default_tp_ops;
* @init_dev: host specific initialization of a device
* @set_pio_mode: routine to program host for PIO mode
* @set_dma_mode: routine to program host for DMA mode
- * @selectproc: tweaks hardware to select drive
* @reset_poll: chipset polling based on hba specifics
* @pre_reset: chipset specific changes to default for device-hba resets
* @resetproc: routine to reset controller after a disk reset
@@ -711,13 +623,13 @@ struct ide_port_ops {
void (*init_dev)(ide_drive_t *);
void (*set_pio_mode)(ide_drive_t *, const u8);
void (*set_dma_mode)(ide_drive_t *, const u8);
- void (*selectproc)(ide_drive_t *);
int (*reset_poll)(ide_drive_t *);
void (*pre_reset)(ide_drive_t *);
void (*resetproc)(ide_drive_t *);
void (*maskproc)(ide_drive_t *, int);
void (*quirkproc)(ide_drive_t *);
void (*clear_irq)(ide_drive_t *);
+ int (*test_irq)(struct hwif_s *);
u8 (*mdma_filter)(ide_drive_t *);
u8 (*udma_filter)(ide_drive_t *);
@@ -727,13 +639,15 @@ struct ide_port_ops {
struct ide_dma_ops {
void (*dma_host_set)(struct ide_drive_s *, int);
- int (*dma_setup)(struct ide_drive_s *);
- void (*dma_exec_cmd)(struct ide_drive_s *, u8);
+ int (*dma_setup)(struct ide_drive_s *, struct ide_cmd *);
void (*dma_start)(struct ide_drive_s *);
int (*dma_end)(struct ide_drive_s *);
int (*dma_test_irq)(struct ide_drive_s *);
void (*dma_lost_irq)(struct ide_drive_s *);
- void (*dma_timeout)(struct ide_drive_s *);
+ /* below ones are optional */
+ int (*dma_check)(struct ide_drive_s *, struct ide_cmd *);
+ int (*dma_timer_expiry)(struct ide_drive_s *);
+ void (*dma_clear)(struct ide_drive_s *);
/*
* The following method is optional and only required to be
* implemented for the SFF-8038i compatible controllers.
@@ -741,6 +655,10 @@ struct ide_dma_ops {
u8 (*dma_sff_read_status)(struct hwif_s *);
};
+enum {
+ IDE_PFLAG_PROBING = (1 << 0),
+};
+
struct ide_host;
typedef struct hwif_s {
@@ -757,6 +675,8 @@ typedef struct hwif_s {
ide_drive_t *devices[MAX_DRIVES + 1];
+ unsigned long port_flags;
+
u8 major; /* our major number */
u8 index; /* 0 for ide0; 1 for ide1; ... */
u8 channel; /* for dual-port chips: 0=primary, 1=secondary */
@@ -775,8 +695,6 @@ typedef struct hwif_s {
struct device *dev;
- ide_ack_intr_t *ack_intr;
-
void (*rw_disk)(ide_drive_t *, struct request *);
const struct ide_tp_ops *tp_ops;
@@ -796,19 +714,8 @@ typedef struct hwif_s {
/* Scatter-gather list used to build the above */
struct scatterlist *sg_table;
int sg_max_nents; /* Maximum number of entries in it */
- int sg_nents; /* Current number of entries in it */
- int orig_sg_nents;
- int sg_dma_direction; /* dma transfer direction */
-
- /* data phase of the active command (currently only valid for PIO/DMA) */
- int data_phase;
-
- struct ide_task_s task; /* current command */
- unsigned int nsect;
- unsigned int nleft;
- struct scatterlist *cursg;
- unsigned int cursg_ofs;
+ struct ide_cmd cmd; /* current command */
int rqsize; /* max sectors per request */
int irq; /* our irq number */
@@ -866,9 +773,18 @@ struct ide_host {
ide_hwif_t *ports[MAX_HOST_PORTS + 1];
unsigned int n_ports;
struct device *dev[2];
- unsigned int (*init_chipset)(struct pci_dev *);
+
+ int (*init_chipset)(struct pci_dev *);
+
+ void (*get_lock)(irq_handler_t, void *);
+ void (*release_lock)(void);
+
irq_handler_t irq_handler;
+
unsigned long host_flags;
+
+ int irq_flags;
+
void *host_priv;
ide_hwif_t *cur_port; /* for hosts requiring serialization */
@@ -885,7 +801,7 @@ typedef ide_startstop_t (ide_handler_t)(ide_drive_t *);
typedef int (ide_expiry_t)(ide_drive_t *);
/* used by ide-cd, ide-floppy, etc. */
-typedef void (xfer_func_t)(ide_drive_t *, struct request *rq, void *, unsigned);
+typedef void (xfer_func_t)(ide_drive_t *, struct ide_cmd *, void *, unsigned);
extern struct mutex ide_setting_mtx;
@@ -1061,10 +977,11 @@ enum {
};
/* DRV_NAME has to be defined in the driver before using the macro below */
-#define __ide_debug_log(lvl, fmt, args...) \
-{ \
- if (unlikely(drive->debug_mask & lvl)) \
- printk(KERN_INFO DRV_NAME ": " fmt, ## args); \
+#define __ide_debug_log(lvl, fmt, args...) \
+{ \
+ if (unlikely(drive->debug_mask & lvl)) \
+ printk(KERN_INFO DRV_NAME ": %s: " fmt "\n", \
+ __func__, ## args); \
}
/*
@@ -1103,7 +1020,7 @@ int generic_ide_resume(struct device *);
void ide_complete_power_step(ide_drive_t *, struct request *);
ide_startstop_t ide_start_power_step(ide_drive_t *, struct request *);
-void ide_complete_pm_request(ide_drive_t *, struct request *);
+void ide_complete_pm_rq(ide_drive_t *, struct request *);
void ide_check_pm_state(ide_drive_t *, struct request *);
/*
@@ -1115,7 +1032,6 @@ void ide_check_pm_state(ide_drive_t *, struct request *);
struct ide_driver {
const char *version;
ide_startstop_t (*do_request)(ide_drive_t *, struct request *, sector_t);
- int (*end_request)(ide_drive_t *, int, int);
struct device_driver gen_driver;
int (*probe)(ide_drive_t *);
void (*remove)(ide_drive_t *);
@@ -1146,16 +1062,15 @@ int generic_ide_ioctl(ide_drive_t *, struct block_device *, unsigned, unsigned l
extern int ide_vlb_clk;
extern int ide_pci_clk;
-extern int ide_end_request (ide_drive_t *drive, int uptodate, int nrsecs);
-int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq,
- int uptodate, int nr_sectors);
+unsigned int ide_rq_bytes(struct request *);
+int ide_end_rq(ide_drive_t *, struct request *, int, unsigned int);
+void ide_kill_rq(ide_drive_t *, struct request *);
-extern void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler, unsigned int timeout, ide_expiry_t *expiry);
+void __ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int);
+void ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int);
-void ide_execute_command(ide_drive_t *, u8, ide_handler_t *, unsigned int,
- ide_expiry_t *);
-
-void ide_execute_pkt_cmd(ide_drive_t *);
+void ide_execute_command(ide_drive_t *, struct ide_cmd *, ide_handler_t *,
+ unsigned int);
void ide_pad_transfer(ide_drive_t *, int, int);
@@ -1165,44 +1080,42 @@ void ide_fix_driveid(u16 *);
extern void ide_fixstring(u8 *, const int, const int);
-int ide_busy_sleep(ide_hwif_t *, unsigned long, int);
+int ide_busy_sleep(ide_drive_t *, unsigned long, int);
int ide_wait_stat(ide_startstop_t *, ide_drive_t *, u8, u8, unsigned long);
+ide_startstop_t ide_do_park_unpark(ide_drive_t *, struct request *);
+ide_startstop_t ide_do_devset(ide_drive_t *, struct request *);
+
extern ide_startstop_t ide_do_reset (ide_drive_t *);
extern int ide_devset_execute(ide_drive_t *drive,
const struct ide_devset *setting, int arg);
-extern void ide_do_drive_cmd(ide_drive_t *, struct request *);
-
-extern void ide_end_drive_cmd(ide_drive_t *, u8, u8);
+void ide_complete_cmd(ide_drive_t *, struct ide_cmd *, u8, u8);
+int ide_complete_rq(ide_drive_t *, int, unsigned int);
-void ide_tf_dump(const char *, struct ide_taskfile *);
+void ide_tf_readback(ide_drive_t *drive, struct ide_cmd *cmd);
+void ide_tf_dump(const char *, struct ide_cmd *);
void ide_exec_command(ide_hwif_t *, u8);
u8 ide_read_status(ide_hwif_t *);
u8 ide_read_altstatus(ide_hwif_t *);
+void ide_write_devctl(ide_hwif_t *, u8);
-void ide_set_irq(ide_hwif_t *, int);
+void ide_dev_select(ide_drive_t *);
+void ide_tf_load(ide_drive_t *, struct ide_taskfile *, u8);
+void ide_tf_read(ide_drive_t *, struct ide_taskfile *, u8);
-void ide_tf_load(ide_drive_t *, ide_task_t *);
-void ide_tf_read(ide_drive_t *, ide_task_t *);
+void ide_input_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int);
+void ide_output_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int);
-void ide_input_data(ide_drive_t *, struct request *, void *, unsigned int);
-void ide_output_data(ide_drive_t *, struct request *, void *, unsigned int);
-
-int ide_io_buffers(ide_drive_t *, struct ide_atapi_pc *, unsigned int, int);
-
-extern void SELECT_DRIVE(ide_drive_t *);
void SELECT_MASK(ide_drive_t *, int);
u8 ide_read_error(ide_drive_t *);
void ide_read_bcount_and_ireason(ide_drive_t *, u16 *, u8 *);
-extern int drive_is_ready(ide_drive_t *);
-
-void ide_pktcmd_tf_load(ide_drive_t *, u32, u16, u8);
+int ide_check_ireason(ide_drive_t *, struct request *, int, int, int);
int ide_check_atapi_device(ide_drive_t *, const char *);
@@ -1228,28 +1141,36 @@ enum {
REQ_IDETAPE_WRITE = (1 << 3),
};
-int ide_queue_pc_tail(ide_drive_t *, struct gendisk *, struct ide_atapi_pc *);
+int ide_queue_pc_tail(ide_drive_t *, struct gendisk *, struct ide_atapi_pc *,
+ void *, unsigned int);
int ide_do_test_unit_ready(ide_drive_t *, struct gendisk *);
int ide_do_start_stop(ide_drive_t *, struct gendisk *, int);
int ide_set_media_lock(ide_drive_t *, struct gendisk *, int);
void ide_create_request_sense_cmd(ide_drive_t *, struct ide_atapi_pc *);
-void ide_retry_pc(ide_drive_t *, struct gendisk *);
+void ide_retry_pc(ide_drive_t *drive);
+
+void ide_prep_sense(ide_drive_t *drive, struct request *rq);
+int ide_queue_sense_rq(ide_drive_t *drive, void *special);
int ide_cd_expiry(ide_drive_t *);
int ide_cd_get_xferlen(struct request *);
-ide_startstop_t ide_issue_pc(ide_drive_t *);
+ide_startstop_t ide_issue_pc(ide_drive_t *, struct ide_cmd *);
+
+ide_startstop_t do_rw_taskfile(ide_drive_t *, struct ide_cmd *);
+
+void ide_pio_bytes(ide_drive_t *, struct ide_cmd *, unsigned int, unsigned int);
-ide_startstop_t do_rw_taskfile(ide_drive_t *, ide_task_t *);
+void ide_finish_cmd(ide_drive_t *, struct ide_cmd *, u8);
-void task_end_request(ide_drive_t *, struct request *, u8);
+int ide_raw_taskfile(ide_drive_t *, struct ide_cmd *, u8 *, u16);
+int ide_no_data_taskfile(ide_drive_t *, struct ide_cmd *);
-int ide_raw_taskfile(ide_drive_t *, ide_task_t *, u8 *, u16);
-int ide_no_data_taskfile(ide_drive_t *, ide_task_t *);
+int ide_taskfile_ioctl(ide_drive_t *, unsigned long);
-int ide_taskfile_ioctl(ide_drive_t *, unsigned int, unsigned long);
+int ide_dev_read_id(ide_drive_t *, u8, u16 *);
extern int ide_driveid_update(ide_drive_t *);
extern int ide_config_drive_speed(ide_drive_t *, u8);
@@ -1280,8 +1201,8 @@ static inline int ide_pci_is_in_compatibility_mode(struct pci_dev *dev)
return 0;
}
-void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *, int,
- hw_regs_t *, hw_regs_t **);
+void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *,
+ struct ide_hw *, struct ide_hw **);
void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *);
#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
@@ -1349,10 +1270,10 @@ enum {
IDE_HFLAG_ERROR_STOPS_FIFO = (1 << 19),
/* serialize ports */
IDE_HFLAG_SERIALIZE = (1 << 20),
- /* use legacy IRQs */
- IDE_HFLAG_LEGACY_IRQS = (1 << 21),
- /* force use of legacy IRQs */
- IDE_HFLAG_FORCE_LEGACY_IRQS = (1 << 22),
+ /* host is DTC2278 */
+ IDE_HFLAG_DTC2278 = (1 << 21),
+ /* 4 devices on a single set of I/O ports */
+ IDE_HFLAG_4DRIVES = (1 << 22),
/* host is TRM290 */
IDE_HFLAG_TRM290 = (1 << 23),
/* use 32-bit I/O ops */
@@ -1380,7 +1301,12 @@ enum {
struct ide_port_info {
char *name;
- unsigned int (*init_chipset)(struct pci_dev *);
+
+ int (*init_chipset)(struct pci_dev *);
+
+ void (*get_lock)(irq_handler_t, void *);
+ void (*release_lock)(void);
+
void (*init_iops)(ide_hwif_t *);
void (*init_hwif)(ide_hwif_t *);
int (*init_dma)(ide_hwif_t *,
@@ -1397,6 +1323,9 @@ struct ide_port_info {
u16 max_sectors; /* if < than the default one */
u32 host_flags;
+
+ int irq_flags;
+
u8 pio_mask;
u8 swdma_mask;
u8 mwdma_mask;
@@ -1416,8 +1345,8 @@ int ide_pci_resume(struct pci_dev *);
#define ide_pci_resume NULL
#endif
-void ide_map_sg(ide_drive_t *, struct request *);
-void ide_init_sg_cmd(ide_drive_t *, struct request *);
+void ide_map_sg(ide_drive_t *, struct ide_cmd *);
+void ide_init_sg_cmd(struct ide_cmd *, unsigned int);
#define BAD_DMA_DRIVE 0
#define GOOD_DMA_DRIVE 1
@@ -1451,18 +1380,18 @@ ide_startstop_t ide_dma_intr(ide_drive_t *);
int ide_allocate_dma_engine(ide_hwif_t *);
void ide_release_dma_engine(ide_hwif_t *);
-int ide_build_sglist(ide_drive_t *, struct request *);
-void ide_destroy_dmatable(ide_drive_t *);
+int ide_dma_prepare(ide_drive_t *, struct ide_cmd *);
+void ide_dma_unmap_sg(ide_drive_t *, struct ide_cmd *);
#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
int config_drive_for_dma(ide_drive_t *);
-extern int ide_build_dmatable(ide_drive_t *, struct request *);
+int ide_build_dmatable(ide_drive_t *, struct ide_cmd *);
void ide_dma_host_set(ide_drive_t *, int);
-extern int ide_dma_setup(ide_drive_t *);
-void ide_dma_exec_cmd(ide_drive_t *, u8);
+int ide_dma_setup(ide_drive_t *, struct ide_cmd *);
extern void ide_dma_start(ide_drive_t *);
int ide_dma_end(ide_drive_t *);
int ide_dma_test_irq(ide_drive_t *);
+int ide_dma_sff_timer_expiry(ide_drive_t *);
u8 ide_dma_sff_read_status(ide_hwif_t *);
extern const struct ide_dma_ops sff_dma_ops;
#else
@@ -1470,7 +1399,7 @@ static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; }
#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
void ide_dma_lost_irq(ide_drive_t *);
-void ide_dma_timeout(ide_drive_t *);
+ide_startstop_t ide_dma_timeout_retry(ide_drive_t *, int);
#else
static inline int ide_id_dma_bug(ide_drive_t *drive) { return 0; }
@@ -1482,21 +1411,29 @@ static inline void ide_dma_on(ide_drive_t *drive) { ; }
static inline void ide_dma_verbose(ide_drive_t *drive) { ; }
static inline int ide_set_dma(ide_drive_t *drive) { return 1; }
static inline void ide_check_dma_crc(ide_drive_t *drive) { ; }
+static inline ide_startstop_t ide_dma_intr(ide_drive_t *drive) { return ide_stopped; }
+static inline ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) { return ide_stopped; }
static inline void ide_release_dma_engine(ide_hwif_t *hwif) { ; }
+static inline int ide_dma_prepare(ide_drive_t *drive,
+ struct ide_cmd *cmd) { return 1; }
+static inline void ide_dma_unmap_sg(ide_drive_t *drive,
+ struct ide_cmd *cmd) { ; }
#endif /* CONFIG_BLK_DEV_IDEDMA */
#ifdef CONFIG_BLK_DEV_IDEACPI
+int ide_acpi_init(void);
extern int ide_acpi_exec_tfs(ide_drive_t *drive);
extern void ide_acpi_get_timing(ide_hwif_t *hwif);
extern void ide_acpi_push_timing(ide_hwif_t *hwif);
-extern void ide_acpi_init(ide_hwif_t *hwif);
+void ide_acpi_init_port(ide_hwif_t *);
void ide_acpi_port_init_devices(ide_hwif_t *);
extern void ide_acpi_set_state(ide_hwif_t *hwif, int on);
#else
+static inline int ide_acpi_init(void) { return 0; }
static inline int ide_acpi_exec_tfs(ide_drive_t *drive) { return 0; }
static inline void ide_acpi_get_timing(ide_hwif_t *hwif) { ; }
static inline void ide_acpi_push_timing(ide_hwif_t *hwif) { ; }
-static inline void ide_acpi_init(ide_hwif_t *hwif) { ; }
+static inline void ide_acpi_init_port(ide_hwif_t *hwif) { ; }
static inline void ide_acpi_port_init_devices(ide_hwif_t *hwif) { ; }
static inline void ide_acpi_set_state(ide_hwif_t *hwif, int on) {}
#endif
@@ -1504,16 +1441,18 @@ static inline void ide_acpi_set_state(ide_hwif_t *hwif, int on) {}
void ide_register_region(struct gendisk *);
void ide_unregister_region(struct gendisk *);
+void ide_check_nien_quirk_list(ide_drive_t *);
void ide_undecoded_slave(ide_drive_t *);
void ide_port_apply_params(ide_hwif_t *);
int ide_sysfs_register_port(ide_hwif_t *);
-struct ide_host *ide_host_alloc(const struct ide_port_info *, hw_regs_t **);
+struct ide_host *ide_host_alloc(const struct ide_port_info *, struct ide_hw **,
+ unsigned int);
void ide_host_free(struct ide_host *);
int ide_host_register(struct ide_host *, const struct ide_port_info *,
- hw_regs_t **);
-int ide_host_add(const struct ide_port_info *, hw_regs_t **,
+ struct ide_hw **);
+int ide_host_add(const struct ide_port_info *, struct ide_hw **, unsigned int,
struct ide_host **);
void ide_host_remove(struct ide_host *);
int ide_legacy_device_add(const struct ide_port_info *, unsigned long);
@@ -1530,11 +1469,9 @@ static inline void ide_set_hwifdata (ide_hwif_t * hwif, void *data)
hwif->hwif_data = data;
}
-const char *ide_xfer_verbose(u8 mode);
extern void ide_toggle_bounce(ide_drive_t *drive, int on);
-extern int ide_set_xfer_rate(ide_drive_t *drive, u8 rate);
-u64 ide_get_lba_addr(struct ide_taskfile *, int);
+u64 ide_get_lba_addr(struct ide_cmd *, int);
u8 ide_dump_status(ide_drive_t *, const char *, u8);
struct ide_timing {
@@ -1571,14 +1508,19 @@ void ide_timing_merge(struct ide_timing *, struct ide_timing *,
struct ide_timing *, unsigned int);
int ide_timing_compute(ide_drive_t *, u8, struct ide_timing *, int, int);
+#ifdef CONFIG_IDE_XFER_MODE
int ide_scan_pio_blacklist(char *);
-
+const char *ide_xfer_verbose(u8);
u8 ide_get_best_pio_mode(ide_drive_t *, u8, u8);
-
+int ide_pio_need_iordy(ide_drive_t *, const u8);
int ide_set_pio_mode(ide_drive_t *, u8);
int ide_set_dma_mode(ide_drive_t *, u8);
-
void ide_set_pio(ide_drive_t *, u8);
+int ide_set_xfer_rate(ide_drive_t *, u8);
+#else
+static inline void ide_set_pio(ide_drive_t *drive, u8 pio) { ; }
+static inline int ide_set_xfer_rate(ide_drive_t *drive, u8 rate) { return -1; }
+#endif
static inline void ide_set_max_pio(ide_drive_t *drive)
{
@@ -1608,9 +1550,23 @@ static inline ide_drive_t *ide_get_pair_dev(ide_drive_t *drive)
return (peer->dev_flags & IDE_DFLAG_PRESENT) ? peer : NULL;
}
+static inline void *ide_get_drivedata(ide_drive_t *drive)
+{
+ return drive->drive_data;
+}
+
+static inline void ide_set_drivedata(ide_drive_t *drive, void *data)
+{
+ drive->drive_data = data;
+}
+
#define ide_port_for_each_dev(i, dev, port) \
for ((i) = 0; ((dev) = (port)->devices[i]) || (i) < MAX_DRIVES; (i)++)
+#define ide_port_for_each_present_dev(i, dev, port) \
+ for ((i) = 0; ((dev) = (port)->devices[i]) || (i) < MAX_DRIVES; (i)++) \
+ if ((dev)->dev_flags & IDE_DFLAG_PRESENT)
+
#define ide_host_for_each_port(i, port, host) \
for ((i) = 0; ((port) = (host)->ports[i]) || (i) < MAX_HOST_PORTS; (i)++)
diff --git a/include/linux/idr.h b/include/linux/idr.h
index dd846df8cd3..e968db71e33 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -106,6 +106,7 @@ int idr_get_new(struct idr *idp, void *ptr, int *id);
int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id);
int idr_for_each(struct idr *idp,
int (*fn)(int id, void *p, void *data), void *data);
+void *idr_get_next(struct idr *idp, int *nextid);
void *idr_replace(struct idr *idp, void *ptr, int id);
void idr_remove(struct idr *idp, int id);
void idr_remove_all(struct idr *idp);
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index c4e6ca1a630..a9173d5434d 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -18,6 +18,22 @@
#include <linux/types.h>
#include <asm/byteorder.h>
+/*
+ * DS bit usage
+ *
+ * TA = transmitter address
+ * RA = receiver address
+ * DA = destination address
+ * SA = source address
+ *
+ * ToDS FromDS A1(RA) A2(TA) A3 A4 Use
+ * -----------------------------------------------------------------
+ * 0 0 DA SA BSSID - IBSS/DLS
+ * 0 1 DA BSSID SA - AP -> STA
+ * 1 0 BSSID SA DA - AP <- STA
+ * 1 1 RA TA DA SA unspecified (WDS)
+ */
+
#define FCS_LEN 4
#define IEEE80211_FCTL_VERS 0x0003
@@ -477,6 +493,7 @@ struct ieee80211s_hdr {
/* Mesh flags */
#define MESH_FLAGS_AE_A4 0x1
#define MESH_FLAGS_AE_A5_A6 0x2
+#define MESH_FLAGS_AE 0x3
#define MESH_FLAGS_PS_DEEP 0x4
/**
@@ -524,9 +541,11 @@ struct ieee80211_tim_ie {
u8 dtim_period;
u8 bitmap_ctrl;
/* variable size: 1 - 251 bytes */
- u8 virtual_map[0];
+ u8 virtual_map[1];
} __attribute__ ((packed));
+#define WLAN_SA_QUERY_TR_ID_LEN 2
+
struct ieee80211_mgmt {
__le16 frame_control;
__le16 duration;
@@ -646,6 +665,10 @@ struct ieee80211_mgmt {
u8 action_code;
u8 variable[0];
} __attribute__((packed)) mesh_action;
+ struct {
+ u8 action;
+ u8 trans_id[WLAN_SA_QUERY_TR_ID_LEN];
+ } __attribute__ ((packed)) sa_query;
} u;
} __attribute__ ((packed)) action;
} u;
@@ -655,6 +678,15 @@ struct ieee80211_mgmt {
#define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u)
+/* Management MIC information element (IEEE 802.11w) */
+struct ieee80211_mmie {
+ u8 element_id;
+ u8 length;
+ __le16 key_id;
+ u8 sequence_number[6];
+ u8 mic[8];
+} __attribute__ ((packed));
+
/* Control frames */
struct ieee80211_rts {
__le16 frame_control;
@@ -836,6 +868,7 @@ struct ieee80211_ht_info {
/* Authentication algorithms */
#define WLAN_AUTH_OPEN 0
#define WLAN_AUTH_SHARED_KEY 1
+#define WLAN_AUTH_FT 2
#define WLAN_AUTH_LEAP 128
#define WLAN_AUTH_CHALLENGE_LEN 128
@@ -899,6 +932,9 @@ enum ieee80211_statuscode {
/* 802.11g */
WLAN_STATUS_ASSOC_DENIED_NOSHORTTIME = 25,
WLAN_STATUS_ASSOC_DENIED_NODSSSOFDM = 26,
+ /* 802.11w */
+ WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY = 30,
+ WLAN_STATUS_ROBUST_MGMT_FRAME_POLICY_VIOLATION = 31,
/* 802.11i */
WLAN_STATUS_INVALID_IE = 40,
WLAN_STATUS_INVALID_GROUP_CIPHER = 41,
@@ -1018,6 +1054,8 @@ enum ieee80211_eid {
WLAN_EID_HT_INFORMATION = 61,
/* 802.11i */
WLAN_EID_RSN = 48,
+ WLAN_EID_TIMEOUT_INTERVAL = 56,
+ WLAN_EID_MMIE = 76 /* 802.11w */,
WLAN_EID_WPA = 221,
WLAN_EID_GENERIC = 221,
WLAN_EID_VENDOR_SPECIFIC = 221,
@@ -1030,7 +1068,13 @@ enum ieee80211_category {
WLAN_CATEGORY_QOS = 1,
WLAN_CATEGORY_DLS = 2,
WLAN_CATEGORY_BACK = 3,
+ WLAN_CATEGORY_PUBLIC = 4,
+ WLAN_CATEGORY_HT = 7,
+ WLAN_CATEGORY_SA_QUERY = 8,
+ WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9,
WLAN_CATEGORY_WMM = 17,
+ WLAN_CATEGORY_VENDOR_SPECIFIC_PROTECTED = 126,
+ WLAN_CATEGORY_VENDOR_SPECIFIC = 127,
};
/* SPECTRUM_MGMT action code */
@@ -1042,6 +1086,15 @@ enum ieee80211_spectrum_mgmt_actioncode {
WLAN_ACTION_SPCT_CHL_SWITCH = 4,
};
+/* Security key length */
+enum ieee80211_key_len {
+ WLAN_KEY_LEN_WEP40 = 5,
+ WLAN_KEY_LEN_WEP104 = 13,
+ WLAN_KEY_LEN_CCMP = 16,
+ WLAN_KEY_LEN_TKIP = 32,
+ WLAN_KEY_LEN_AES_CMAC = 16,
+};
+
/*
* IEEE 802.11-2007 7.3.2.9 Country information element
*
@@ -1104,6 +1157,12 @@ struct ieee80211_country_ie_triplet {
};
} __attribute__ ((packed));
+enum ieee80211_timeout_interval_type {
+ WLAN_TIMEOUT_REASSOC_DEADLINE = 1 /* 802.11r */,
+ WLAN_TIMEOUT_KEY_LIFETIME = 2 /* 802.11r */,
+ WLAN_TIMEOUT_ASSOC_COMEBACK = 3 /* 802.11w */,
+};
+
/* BACK action code */
enum ieee80211_back_actioncode {
WLAN_ACTION_ADDBA_REQ = 0,
@@ -1118,6 +1177,13 @@ enum ieee80211_back_parties {
WLAN_BACK_TIMER = 2,
};
+/* SA Query action */
+enum ieee80211_sa_query_action {
+ WLAN_ACTION_SA_QUERY_REQUEST = 0,
+ WLAN_ACTION_SA_QUERY_RESPONSE = 1,
+};
+
+
/* A-MSDU 802.11n */
#define IEEE80211_QOS_CONTROL_A_MSDU_PRESENT 0x0080
@@ -1128,6 +1194,7 @@ enum ieee80211_back_parties {
/* reserved: 0x000FAC03 */
#define WLAN_CIPHER_SUITE_CCMP 0x000FAC04
#define WLAN_CIPHER_SUITE_WEP104 0x000FAC05
+#define WLAN_CIPHER_SUITE_AES_CMAC 0x000FAC06
#define WLAN_MAX_KEY_LEN 32
@@ -1185,4 +1252,190 @@ static inline u8 *ieee80211_get_DA(struct ieee80211_hdr *hdr)
return hdr->addr1;
}
+/**
+ * ieee80211_is_robust_mgmt_frame - check if frame is a robust management frame
+ * @hdr: the frame (buffer must include at least the first octet of payload)
+ */
+static inline bool ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr)
+{
+ if (ieee80211_is_disassoc(hdr->frame_control) ||
+ ieee80211_is_deauth(hdr->frame_control))
+ return true;
+
+ if (ieee80211_is_action(hdr->frame_control)) {
+ u8 *category;
+
+ /*
+ * Action frames, excluding Public Action frames, are Robust
+ * Management Frames. However, if we are looking at a Protected
+ * frame, skip the check since the data may be encrypted and
+ * the frame has already been found to be a Robust Management
+ * Frame (by the other end).
+ */
+ if (ieee80211_has_protected(hdr->frame_control))
+ return true;
+ category = ((u8 *) hdr) + 24;
+ return *category != WLAN_CATEGORY_PUBLIC &&
+ *category != WLAN_CATEGORY_HT &&
+ *category != WLAN_CATEGORY_VENDOR_SPECIFIC;
+ }
+
+ return false;
+}
+
+/**
+ * ieee80211_fhss_chan_to_freq - get channel frequency
+ * @channel: the FHSS channel
+ *
+ * Convert IEEE802.11 FHSS channel to frequency (MHz)
+ * Ref IEEE 802.11-2007 section 14.6
+ */
+static inline int ieee80211_fhss_chan_to_freq(int channel)
+{
+ if ((channel > 1) && (channel < 96))
+ return channel + 2400;
+ else
+ return -1;
+}
+
+/**
+ * ieee80211_freq_to_fhss_chan - get channel
+ * @freq: the channels frequency
+ *
+ * Convert frequency (MHz) to IEEE802.11 FHSS channel
+ * Ref IEEE 802.11-2007 section 14.6
+ */
+static inline int ieee80211_freq_to_fhss_chan(int freq)
+{
+ if ((freq > 2401) && (freq < 2496))
+ return freq - 2400;
+ else
+ return -1;
+}
+
+/**
+ * ieee80211_dsss_chan_to_freq - get channel center frequency
+ * @channel: the DSSS channel
+ *
+ * Convert IEEE802.11 DSSS channel to the center frequency (MHz).
+ * Ref IEEE 802.11-2007 section 15.6
+ */
+static inline int ieee80211_dsss_chan_to_freq(int channel)
+{
+ if ((channel > 0) && (channel < 14))
+ return 2407 + (channel * 5);
+ else if (channel == 14)
+ return 2484;
+ else
+ return -1;
+}
+
+/**
+ * ieee80211_freq_to_dsss_chan - get channel
+ * @freq: the frequency
+ *
+ * Convert frequency (MHz) to IEEE802.11 DSSS channel
+ * Ref IEEE 802.11-2007 section 15.6
+ *
+ * This routine selects the channel with the closest center frequency.
+ */
+static inline int ieee80211_freq_to_dsss_chan(int freq)
+{
+ if ((freq >= 2410) && (freq < 2475))
+ return (freq - 2405) / 5;
+ else if ((freq >= 2482) && (freq < 2487))
+ return 14;
+ else
+ return -1;
+}
+
+/* Convert IEEE802.11 HR DSSS channel to frequency (MHz) and back
+ * Ref IEEE 802.11-2007 section 18.4.6.2
+ *
+ * The channels and frequencies are the same as those defined for DSSS
+ */
+#define ieee80211_hr_chan_to_freq(chan) ieee80211_dsss_chan_to_freq(chan)
+#define ieee80211_freq_to_hr_chan(freq) ieee80211_freq_to_dsss_chan(freq)
+
+/* Convert IEEE802.11 ERP channel to frequency (MHz) and back
+ * Ref IEEE 802.11-2007 section 19.4.2
+ */
+#define ieee80211_erp_chan_to_freq(chan) ieee80211_hr_chan_to_freq(chan)
+#define ieee80211_freq_to_erp_chan(freq) ieee80211_freq_to_hr_chan(freq)
+
+/**
+ * ieee80211_ofdm_chan_to_freq - get channel center frequency
+ * @s_freq: starting frequency == (dotChannelStartingFactor/2) MHz
+ * @channel: the OFDM channel
+ *
+ * Convert IEEE802.11 OFDM channel to center frequency (MHz)
+ * Ref IEEE 802.11-2007 section 17.3.8.3.2
+ */
+static inline int ieee80211_ofdm_chan_to_freq(int s_freq, int channel)
+{
+ if ((channel > 0) && (channel <= 200) &&
+ (s_freq >= 4000))
+ return s_freq + (channel * 5);
+ else
+ return -1;
+}
+
+/**
+ * ieee80211_freq_to_ofdm_channel - get channel
+ * @s_freq: starting frequency == (dotChannelStartingFactor/2) MHz
+ * @freq: the frequency
+ *
+ * Convert frequency (MHz) to IEEE802.11 OFDM channel
+ * Ref IEEE 802.11-2007 section 17.3.8.3.2
+ *
+ * This routine selects the channel with the closest center frequency.
+ */
+static inline int ieee80211_freq_to_ofdm_chan(int s_freq, int freq)
+{
+ if ((freq > (s_freq + 2)) && (freq <= (s_freq + 1202)) &&
+ (s_freq >= 4000))
+ return (freq + 2 - s_freq) / 5;
+ else
+ return -1;
+}
+
+/**
+ * ieee80211_tu_to_usec - convert time units (TU) to microseconds
+ * @tu: the TUs
+ */
+static inline unsigned long ieee80211_tu_to_usec(unsigned long tu)
+{
+ return 1024 * tu;
+}
+
+/**
+ * ieee80211_check_tim - check if AID bit is set in TIM
+ * @tim: the TIM IE
+ * @tim_len: length of the TIM IE
+ * @aid: the AID to look for
+ */
+static inline bool ieee80211_check_tim(struct ieee80211_tim_ie *tim,
+ u8 tim_len, u16 aid)
+{
+ u8 mask;
+ u8 index, indexn1, indexn2;
+
+ if (unlikely(!tim || tim_len < sizeof(*tim)))
+ return false;
+
+ aid &= 0x3fff;
+ index = aid / 8;
+ mask = 1 << (aid & 7);
+
+ indexn1 = tim->bitmap_ctrl & 0xfe;
+ indexn2 = tim_len + indexn1 - 4;
+
+ if (index < indexn1 || index > indexn2)
+ return false;
+
+ index -= indexn1;
+
+ return !!(tim->virtual_map[index] & mask);
+}
+
#endif /* LINUX_IEEE80211_H */
diff --git a/include/linux/if.h b/include/linux/if.h
index 2a6e29620a9..b9a6229f3be 100644
--- a/include/linux/if.h
+++ b/include/linux/if.h
@@ -66,6 +66,10 @@
#define IFF_SLAVE_NEEDARP 0x40 /* need ARPs for validation */
#define IFF_ISATAP 0x80 /* ISATAP interface (RFC4214) */
#define IFF_MASTER_ARPMON 0x100 /* bonding master, ARP mon in use */
+#define IFF_WAN_HDLC 0x200 /* WAN HDLC device */
+#define IFF_XMIT_DST_RELEASE 0x400 /* dev_hard_start_xmit() is allowed to
+ * release skb->dst
+ */
#define IF_GET_IFACE 0x0001 /* for querying only */
#define IF_GET_PROTO 0x0002
diff --git a/include/linux/if_arcnet.h b/include/linux/if_arcnet.h
index 27ea2ac445a..0835debab11 100644
--- a/include/linux/if_arcnet.h
+++ b/include/linux/if_arcnet.h
@@ -16,6 +16,7 @@
#ifndef _LINUX_IF_ARCNET_H
#define _LINUX_IF_ARCNET_H
+#include <linux/types.h>
#include <linux/if_ether.h>
@@ -57,10 +58,10 @@
*/
struct arc_rfc1201
{
- uint8_t proto; /* protocol ID field - varies */
- uint8_t split_flag; /* for use with split packets */
+ __u8 proto; /* protocol ID field - varies */
+ __u8 split_flag; /* for use with split packets */
__be16 sequence; /* sequence number */
- uint8_t payload[0]; /* space remaining in packet (504 bytes)*/
+ __u8 payload[0]; /* space remaining in packet (504 bytes)*/
};
#define RFC1201_HDR_SIZE 4
@@ -70,8 +71,8 @@ struct arc_rfc1201
*/
struct arc_rfc1051
{
- uint8_t proto; /* ARC_P_RFC1051_ARP/RFC1051_IP */
- uint8_t payload[0]; /* 507 bytes */
+ __u8 proto; /* ARC_P_RFC1051_ARP/RFC1051_IP */
+ __u8 payload[0]; /* 507 bytes */
};
#define RFC1051_HDR_SIZE 1
@@ -82,20 +83,20 @@ struct arc_rfc1051
*/
struct arc_eth_encap
{
- uint8_t proto; /* Always ARC_P_ETHER */
+ __u8 proto; /* Always ARC_P_ETHER */
struct ethhdr eth; /* standard ethernet header (yuck!) */
- uint8_t payload[0]; /* 493 bytes */
+ __u8 payload[0]; /* 493 bytes */
};
#define ETH_ENCAP_HDR_SIZE 14
struct arc_cap
{
- uint8_t proto;
- uint8_t cookie[sizeof(int)]; /* Actually NOT sent over the network */
+ __u8 proto;
+ __u8 cookie[sizeof(int)]; /* Actually NOT sent over the network */
union {
- uint8_t ack;
- uint8_t raw[0]; /* 507 bytes */
+ __u8 ack;
+ __u8 raw[0]; /* 507 bytes */
} mes;
};
@@ -109,7 +110,7 @@ struct arc_cap
*/
struct arc_hardware
{
- uint8_t source, /* source ARCnet - filled in automagically */
+ __u8 source, /* source ARCnet - filled in automagically */
dest, /* destination ARCnet - 0 for broadcast */
offset[2]; /* offset bytes (some weird semantics) */
};
@@ -130,7 +131,7 @@ struct archdr
struct arc_rfc1051 rfc1051;
struct arc_eth_encap eth_encap;
struct arc_cap cap;
- uint8_t raw[0]; /* 508 bytes */
+ __u8 raw[0]; /* 508 bytes */
} soft;
};
diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h
index 5ff89809a58..b554300ef8b 100644
--- a/include/linux/if_arp.h
+++ b/include/linux/if_arp.h
@@ -86,6 +86,8 @@
#define ARPHRD_IEEE80211 801 /* IEEE 802.11 */
#define ARPHRD_IEEE80211_PRISM 802 /* IEEE 802.11 + Prism2 header */
#define ARPHRD_IEEE80211_RADIOTAP 803 /* IEEE 802.11 + radiotap header */
+#define ARPHRD_IEEE802154 804
+#define ARPHRD_IEEE802154_PHY 805
#define ARPHRD_PHONET 820 /* PhoNet media type */
#define ARPHRD_PHONET_PIPE 821 /* PhoNet pipe header */
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index 7f3c735f422..ae3a1871413 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -17,7 +17,7 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
-
+
#ifndef _LINUX_IF_ETHER_H
#define _LINUX_IF_ETHER_H
@@ -25,7 +25,7 @@
/*
* IEEE 802.3 Ethernet magic constants. The frame sizes omit the preamble
- * and FCS/CRC (frame check sequence).
+ * and FCS/CRC (frame check sequence).
*/
#define ETH_ALEN 6 /* Octets in one ethernet addr */
@@ -78,12 +78,14 @@
#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */
#define ETH_P_AOE 0x88A2 /* ATA over Ethernet */
#define ETH_P_TIPC 0x88CA /* TIPC */
+#define ETH_P_FCOE 0x8906 /* Fibre Channel over Ethernet */
+#define ETH_P_FIP 0x8914 /* FCoE Initialization Protocol */
#define ETH_P_EDSA 0xDADA /* Ethertype DSA [ NOT AN OFFICIALLY REGISTERED ID ] */
/*
* Non DIX types. Won't clash for 1500 types.
*/
-
+
#define ETH_P_802_3 0x0001 /* Dummy type for 802.3 frames */
#define ETH_P_AX25 0x0002 /* Dummy protocol id for AX.25 */
#define ETH_P_ALL 0x0003 /* Every packet (be careful!!!) */
@@ -105,11 +107,12 @@
#define ETH_P_DSA 0x001B /* Distributed Switch Arch. */
#define ETH_P_TRAILER 0x001C /* Trailer switch tagging */
#define ETH_P_PHONET 0x00F5 /* Nokia Phonet frames */
+#define ETH_P_IEEE802154 0x00F6 /* IEEE802.15.4 frame */
/*
* This is an Ethernet frame header.
*/
-
+
struct ethhdr {
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
unsigned char h_source[ETH_ALEN]; /* source ether addr */
diff --git a/include/linux/if_frad.h b/include/linux/if_frad.h
index 60e16a551dd..673f2209453 100644
--- a/include/linux/if_frad.h
+++ b/include/linux/if_frad.h
@@ -153,7 +153,6 @@ struct frhdr
struct dlci_local
{
- struct net_device_stats stats;
struct net_device *master;
struct net_device *slave;
struct dlci_conf config;
diff --git a/include/linux/if_packet.h b/include/linux/if_packet.h
index 18db0668065..dea7d6b7cf9 100644
--- a/include/linux/if_packet.h
+++ b/include/linux/if_packet.h
@@ -46,6 +46,8 @@ struct sockaddr_ll
#define PACKET_VERSION 10
#define PACKET_HDRLEN 11
#define PACKET_RESERVE 12
+#define PACKET_TX_RING 13
+#define PACKET_LOSS 14
struct tpacket_stats
{
@@ -63,14 +65,22 @@ struct tpacket_auxdata
__u16 tp_vlan_tci;
};
+/* Rx ring - header status */
+#define TP_STATUS_KERNEL 0x0
+#define TP_STATUS_USER 0x1
+#define TP_STATUS_COPY 0x2
+#define TP_STATUS_LOSING 0x4
+#define TP_STATUS_CSUMNOTREADY 0x8
+
+/* Tx ring - header status */
+#define TP_STATUS_AVAILABLE 0x0
+#define TP_STATUS_SEND_REQUEST 0x1
+#define TP_STATUS_SENDING 0x2
+#define TP_STATUS_WRONG_FORMAT 0x4
+
struct tpacket_hdr
{
unsigned long tp_status;
-#define TP_STATUS_KERNEL 0
-#define TP_STATUS_USER 1
-#define TP_STATUS_COPY 2
-#define TP_STATUS_LOSING 4
-#define TP_STATUS_CSUMNOTREADY 8
unsigned int tp_len;
unsigned int tp_snaplen;
unsigned short tp_mac;
@@ -135,5 +145,6 @@ struct packet_mreq
#define PACKET_MR_MULTICAST 0
#define PACKET_MR_PROMISC 1
#define PACKET_MR_ALLMULTI 2
+#define PACKET_MR_UNICAST 3
#endif
diff --git a/include/linux/if_pppol2tp.h b/include/linux/if_pppol2tp.h
index c7a66882b6d..3a14b088c8e 100644
--- a/include/linux/if_pppol2tp.h
+++ b/include/linux/if_pppol2tp.h
@@ -26,7 +26,7 @@
*/
struct pppol2tp_addr
{
- pid_t pid; /* pid that owns the fd.
+ __kernel_pid_t pid; /* pid that owns the fd.
* 0 => current */
int fd; /* FD of UDP socket to use */
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index 30c88b2245f..90b5fae5d71 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -95,16 +95,16 @@ struct pppoe_tag {
} __attribute ((packed));
/* Tag identifiers */
-#define PTT_EOL __constant_htons(0x0000)
-#define PTT_SRV_NAME __constant_htons(0x0101)
-#define PTT_AC_NAME __constant_htons(0x0102)
-#define PTT_HOST_UNIQ __constant_htons(0x0103)
-#define PTT_AC_COOKIE __constant_htons(0x0104)
-#define PTT_VENDOR __constant_htons(0x0105)
-#define PTT_RELAY_SID __constant_htons(0x0110)
-#define PTT_SRV_ERR __constant_htons(0x0201)
-#define PTT_SYS_ERR __constant_htons(0x0202)
-#define PTT_GEN_ERR __constant_htons(0x0203)
+#define PTT_EOL __cpu_to_be16(0x0000)
+#define PTT_SRV_NAME __cpu_to_be16(0x0101)
+#define PTT_AC_NAME __cpu_to_be16(0x0102)
+#define PTT_HOST_UNIQ __cpu_to_be16(0x0103)
+#define PTT_AC_COOKIE __cpu_to_be16(0x0104)
+#define PTT_VENDOR __cpu_to_be16(0x0105)
+#define PTT_RELAY_SID __cpu_to_be16(0x0110)
+#define PTT_SRV_ERR __cpu_to_be16(0x0201)
+#define PTT_SYS_ERR __cpu_to_be16(0x0202)
+#define PTT_GEN_ERR __cpu_to_be16(0x0203)
struct pppoe_hdr {
#if defined(__LITTLE_ENDIAN_BITFIELD)
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h
index 8529f57ba26..915ba5789f0 100644
--- a/include/linux/if_tun.h
+++ b/include/linux/if_tun.h
@@ -46,6 +46,8 @@
#define TUNSETOFFLOAD _IOW('T', 208, unsigned int)
#define TUNSETTXFILTER _IOW('T', 209, unsigned int)
#define TUNGETIFF _IOR('T', 210, unsigned int)
+#define TUNGETSNDBUF _IOR('T', 211, int)
+#define TUNSETSNDBUF _IOW('T', 212, int)
/* TUNSETIFF ifr flags */
#define IFF_TUN 0x0001
@@ -53,6 +55,7 @@
#define IFF_NO_PI 0x1000
#define IFF_ONE_QUEUE 0x2000
#define IFF_VNET_HDR 0x4000
+#define IFF_TUN_EXCL 0x8000
/* Features for GSO (TUNSETOFFLOAD). */
#define TUN_F_CSUM 0x01 /* You can hand me unchecksummed packets. */
diff --git a/include/linux/if_tunnel.h b/include/linux/if_tunnel.h
index 82c43624c06..5eb9b0f857e 100644
--- a/include/linux/if_tunnel.h
+++ b/include/linux/if_tunnel.h
@@ -16,14 +16,14 @@
#define SIOCDELPRL (SIOCDEVPRIVATE + 6)
#define SIOCCHGPRL (SIOCDEVPRIVATE + 7)
-#define GRE_CSUM __constant_htons(0x8000)
-#define GRE_ROUTING __constant_htons(0x4000)
-#define GRE_KEY __constant_htons(0x2000)
-#define GRE_SEQ __constant_htons(0x1000)
-#define GRE_STRICT __constant_htons(0x0800)
-#define GRE_REC __constant_htons(0x0700)
-#define GRE_FLAGS __constant_htons(0x00F8)
-#define GRE_VERSION __constant_htons(0x0007)
+#define GRE_CSUM __cpu_to_be16(0x8000)
+#define GRE_ROUTING __cpu_to_be16(0x4000)
+#define GRE_KEY __cpu_to_be16(0x2000)
+#define GRE_SEQ __cpu_to_be16(0x1000)
+#define GRE_STRICT __cpu_to_be16(0x0800)
+#define GRE_REC __cpu_to_be16(0x0700)
+#define GRE_FLAGS __cpu_to_be16(0x00F8)
+#define GRE_VERSION __cpu_to_be16(0x0007)
struct ip_tunnel_parm
{
@@ -44,7 +44,7 @@ struct ip_tunnel_prl {
__u16 flags;
__u16 __reserved;
__u32 datalen;
- __u32 __reserved2;
+ __u32 rs_delay;
/* data follows */
};
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index e1ff5b14310..7ff9af1d0f0 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -118,8 +118,7 @@ extern int vlan_hwaccel_do_receive(struct sk_buff *skb);
extern int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
unsigned int vlan_tci, struct sk_buff *skb);
extern int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
- unsigned int vlan_tci,
- struct napi_gro_fraginfo *info);
+ unsigned int vlan_tci);
#else
static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev)
@@ -154,8 +153,7 @@ static inline int vlan_gro_receive(struct napi_struct *napi,
}
static inline int vlan_gro_frags(struct napi_struct *napi,
- struct vlan_group *grp, unsigned int vlan_tci,
- struct napi_gro_fraginfo *info)
+ struct vlan_group *grp, unsigned int vlan_tci)
{
return NET_RX_DROP;
}
diff --git a/include/linux/ima.h b/include/linux/ima.h
new file mode 100644
index 00000000000..b1b827d091a
--- /dev/null
+++ b/include/linux/ima.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2008 IBM Corporation
+ * Author: Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ */
+
+#ifndef _LINUX_IMA_H
+#define _LINUX_IMA_H
+
+#include <linux/fs.h>
+struct linux_binprm;
+
+#define IMA_COUNT_UPDATE 1
+#define IMA_COUNT_LEAVE 0
+
+#ifdef CONFIG_IMA
+extern int ima_bprm_check(struct linux_binprm *bprm);
+extern int ima_inode_alloc(struct inode *inode);
+extern void ima_inode_free(struct inode *inode);
+extern int ima_path_check(struct path *path, int mask, int update_counts);
+extern void ima_file_free(struct file *file);
+extern int ima_file_mmap(struct file *file, unsigned long prot);
+extern void ima_counts_get(struct file *file);
+
+#else
+static inline int ima_bprm_check(struct linux_binprm *bprm)
+{
+ return 0;
+}
+
+static inline int ima_inode_alloc(struct inode *inode)
+{
+ return 0;
+}
+
+static inline void ima_inode_free(struct inode *inode)
+{
+ return;
+}
+
+static inline int ima_path_check(struct path *path, int mask, int update_counts)
+{
+ return 0;
+}
+
+static inline void ima_file_free(struct file *file)
+{
+ return;
+}
+
+static inline int ima_file_mmap(struct file *file, unsigned long prot)
+{
+ return 0;
+}
+
+static inline void ima_counts_get(struct file *file)
+{
+ return;
+}
+#endif /* CONFIG_IMA_H */
+#endif /* _LINUX_IMA_H */
diff --git a/include/linux/in.h b/include/linux/in.h
index d60122a3a08..cf196da04ec 100644
--- a/include/linux/in.h
+++ b/include/linux/in.h
@@ -107,6 +107,7 @@ struct in_addr {
#define MCAST_JOIN_SOURCE_GROUP 46
#define MCAST_LEAVE_SOURCE_GROUP 47
#define MCAST_MSFILTER 48
+#define IP_MULTICAST_ALL 49
#define MCAST_EXCLUDE 0
#define MCAST_INCLUDE 1
diff --git a/include/linux/in6.h b/include/linux/in6.h
index bc492048c34..718bf21c575 100644
--- a/include/linux/in6.h
+++ b/include/linux/in6.h
@@ -44,11 +44,11 @@ struct in6_addr
* NOTE: Be aware the IN6ADDR_* constants and in6addr_* externals are defined
* in network byte order, not in host byte order as are the IPv4 equivalents
*/
+#ifdef __KERNEL__
extern const struct in6_addr in6addr_any;
#define IN6ADDR_ANY_INIT { { { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 } } }
extern const struct in6_addr in6addr_loopback;
#define IN6ADDR_LOOPBACK_INIT { { { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 } } }
-#ifdef __KERNEL__
extern const struct in6_addr in6addr_linklocal_allnodes;
#define IN6ADDR_LINKLOCAL_ALLNODES_INIT \
{ { { 0xff,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1 } } }
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 06fcdb45106..acef2a770b6 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -108,6 +108,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
#define IN_DEV_ARPFILTER(in_dev) IN_DEV_ORCONF((in_dev), ARPFILTER)
#define IN_DEV_ARP_ANNOUNCE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ANNOUNCE)
#define IN_DEV_ARP_IGNORE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_IGNORE)
+#define IN_DEV_ARP_NOTIFY(in_dev) IN_DEV_MAXCONF((in_dev), ARP_NOTIFY)
struct in_ifaddr
{
diff --git a/include/linux/init.h b/include/linux/init.h
index 68cb0265d00..13b633ed695 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -29,7 +29,7 @@
* sign followed by value, e.g.:
*
* static int init_variable __initdata = 0;
- * static char linux_logo[] __initdata = { 0x32, 0x36, ... };
+ * static const char linux_logo[] __initconst = { 0x32, 0x36, ... };
*
* Don't forget to initialize data not at file scope, i.e. within a function,
* as gcc otherwise puts the data into the bss section and not into the init
@@ -60,14 +60,6 @@
#define __refdata __section(.ref.data)
#define __refconst __section(.ref.rodata)
-/* backward compatibility note
- * A few places hardcode the old section names:
- * .text.init.refok
- * .data.init.refok
- * .exit.text.refok
- * They should be converted to use the defines from this file
- */
-
/* compatibility defines */
#define __init_refok __ref
#define __initdata_refok __refdata
@@ -142,6 +134,9 @@ typedef void (*exitcall_t)(void);
extern initcall_t __con_initcall_start[], __con_initcall_end[];
extern initcall_t __security_initcall_start[], __security_initcall_end[];
+/* Used for contructor calls. */
+typedef void (*ctor_fn_t)(void);
+
/* Defined in init/main.c */
extern int do_one_initcall(initcall_t fn);
extern char __initdata boot_command_line[];
@@ -231,7 +226,8 @@ struct obs_kernel_param {
* obs_kernel_param "array" too far apart in .init.setup.
*/
#define __setup_param(str, unique_id, fn, early) \
- static char __setup_str_##unique_id[] __initdata __aligned(1) = str; \
+ static const char __setup_str_##unique_id[] __initconst \
+ __aligned(1) = str; \
static struct obs_kernel_param __setup_##unique_id \
__used __section(.init.setup) \
__attribute__((aligned((sizeof(long))))) \
@@ -247,6 +243,7 @@ struct obs_kernel_param {
/* Relies on boot_command_line being set */
void __init parse_early_param(void);
+void __init parse_early_options(char *cmdline);
#endif /* __ASSEMBLY__ */
/**
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index e752d973fa2..5368fbdc780 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -5,6 +5,7 @@
#include <linux/irqflags.h>
#include <linux/utsname.h>
#include <linux/lockdep.h>
+#include <linux/ftrace.h>
#include <linux/ipc.h>
#include <linux/pid_namespace.h>
#include <linux/user_namespace.h>
@@ -14,31 +15,6 @@
extern struct files_struct init_files;
extern struct fs_struct init_fs;
-#define INIT_KIOCTX(name, which_mm) \
-{ \
- .users = ATOMIC_INIT(1), \
- .dead = 0, \
- .mm = &which_mm, \
- .user_id = 0, \
- .next = NULL, \
- .wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.wait), \
- .ctx_lock = __SPIN_LOCK_UNLOCKED(name.ctx_lock), \
- .reqs_active = 0U, \
- .max_reqs = ~0U, \
-}
-
-#define INIT_MM(name) \
-{ \
- .mm_rb = RB_ROOT, \
- .pgd = swapper_pg_dir, \
- .mm_users = ATOMIC_INIT(2), \
- .mm_count = ATOMIC_INIT(1), \
- .mmap_sem = __RWSEM_INITIALIZER(name.mmap_sem), \
- .page_table_lock = __SPIN_LOCK_UNLOCKED(name.page_table_lock), \
- .mmlist = LIST_HEAD_INIT(name.mmlist), \
- .cpu_vm_mask = CPU_MASK_ALL, \
-}
-
#define INIT_SIGNALS(sig) { \
.count = ATOMIC_INIT(1), \
.wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\
@@ -120,6 +96,15 @@ extern struct group_info init_groups;
extern struct cred init_cred;
+#ifdef CONFIG_PERF_COUNTERS
+# define INIT_PERF_COUNTERS(tsk) \
+ .perf_counter_mutex = \
+ __MUTEX_INITIALIZER(tsk.perf_counter_mutex), \
+ .perf_counter_list = LIST_HEAD_INIT(tsk.perf_counter_list),
+#else
+# define INIT_PERF_COUNTERS(tsk)
+#endif
+
/*
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
@@ -147,6 +132,7 @@ extern struct cred init_cred;
.nr_cpus_allowed = NR_CPUS, \
}, \
.tasks = LIST_HEAD_INIT(tsk.tasks), \
+ .pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO), \
.ptraced = LIST_HEAD_INIT(tsk.ptraced), \
.ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
.real_parent = &tsk, \
@@ -156,8 +142,8 @@ extern struct cred init_cred;
.group_leader = &tsk, \
.real_cred = &init_cred, \
.cred = &init_cred, \
- .cred_exec_mutex = \
- __MUTEX_INITIALIZER(tsk.cred_exec_mutex), \
+ .cred_guard_mutex = \
+ __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
.comm = "swapper", \
.thread = INIT_THREAD, \
.fs = &init_fs, \
@@ -182,8 +168,11 @@ extern struct cred init_cred;
}, \
.dirties = INIT_PROP_LOCAL_SINGLE(dirties), \
INIT_IDS \
+ INIT_PERF_COUNTERS(tsk) \
INIT_TRACE_IRQFLAGS \
INIT_LOCKDEP \
+ INIT_FTRACE_GRAPH \
+ INIT_TRACE_RECURSION \
}
diff --git a/include/linux/input.h b/include/linux/input.h
index 1249a0c20a3..8b3bc3e0d14 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -53,6 +53,7 @@ struct input_absinfo {
__s32 maximum;
__s32 fuzz;
__s32 flat;
+ __s32 resolution;
};
#define EVIOCGVERSION _IOR('E', 0x01, int) /* get driver version */
@@ -106,6 +107,7 @@ struct input_absinfo {
#define SYN_REPORT 0
#define SYN_CONFIG 1
+#define SYN_MT_REPORT 2
/*
* Keys and buttons
@@ -445,6 +447,7 @@ struct input_absinfo {
#define BTN_STYLUS2 0x14c
#define BTN_TOOL_DOUBLETAP 0x14d
#define BTN_TOOL_TRIPLETAP 0x14e
+#define BTN_TOOL_QUADTAP 0x14f /* Four fingers on trackpad */
#define BTN_WHEEL 0x150
#define BTN_GEAR_DOWN 0x150
@@ -644,6 +647,18 @@ struct input_absinfo {
#define ABS_TOOL_WIDTH 0x1c
#define ABS_VOLUME 0x20
#define ABS_MISC 0x28
+
+#define ABS_MT_TOUCH_MAJOR 0x30 /* Major axis of touching ellipse */
+#define ABS_MT_TOUCH_MINOR 0x31 /* Minor axis (omit if circular) */
+#define ABS_MT_WIDTH_MAJOR 0x32 /* Major axis of approaching ellipse */
+#define ABS_MT_WIDTH_MINOR 0x33 /* Minor axis (omit if circular) */
+#define ABS_MT_ORIENTATION 0x34 /* Ellipse orientation */
+#define ABS_MT_POSITION_X 0x35 /* Center X ellipse position */
+#define ABS_MT_POSITION_Y 0x36 /* Center Y ellipse position */
+#define ABS_MT_TOOL_TYPE 0x37 /* Type of touching device */
+#define ABS_MT_BLOB_ID 0x38 /* Group a set of packets as a blob */
+#define ABS_MT_TRACKING_ID 0x39 /* Unique ID of initiated contact */
+
#define ABS_MAX 0x3f
#define ABS_CNT (ABS_MAX+1)
@@ -661,6 +676,7 @@ struct input_absinfo {
#define SW_DOCK 0x05 /* set = plugged into dock */
#define SW_LINEOUT_INSERT 0x06 /* set = inserted */
#define SW_JACK_PHYSICAL_INSERT 0x07 /* set = mechanical switch set */
+#define SW_VIDEOOUT_INSERT 0x08 /* set = inserted */
#define SW_MAX 0x0f
#define SW_CNT (SW_MAX+1)
@@ -742,6 +758,12 @@ struct input_absinfo {
#define BUS_ATARI 0x1B
/*
+ * MT_TOOL types
+ */
+#define MT_TOOL_FINGER 0
+#define MT_TOOL_PEN 1
+
+/*
* Values describing the status of a force-feedback effect
*/
#define FF_STATUS_STOPPED 0x00
@@ -1088,6 +1110,7 @@ struct input_dev {
int absmin[ABS_MAX + 1];
int absfuzz[ABS_MAX + 1];
int absflat[ABS_MAX + 1];
+ int absres[ABS_MAX + 1];
int (*open)(struct input_dev *dev);
void (*close)(struct input_dev *dev);
@@ -1310,6 +1333,11 @@ static inline void input_sync(struct input_dev *dev)
input_event(dev, EV_SYN, SYN_REPORT, 0);
}
+static inline void input_mt_sync(struct input_dev *dev)
+{
+ input_event(dev, EV_SYN, SYN_MT_REPORT, 0);
+}
+
void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code);
static inline void input_set_abs_params(struct input_dev *dev, int axis, int min, int max, int fuzz, int flat)
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index d2e3cbfba14..aa8c5317123 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -123,7 +123,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
#define ecap_eim_support(e) ((e >> 4) & 0x1)
#define ecap_ir_support(e) ((e >> 3) & 0x1)
#define ecap_max_handle_mask(e) ((e >> 20) & 0xf)
-
+#define ecap_sc_support(e) ((e >> 7) & 0x1) /* Snooping Control */
/* IOTLB_REG */
#define DMA_TLB_FLUSH_GRANU_OFFSET 60
@@ -164,6 +164,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
#define DMA_GCMD_QIE (((u32)1) << 26)
#define DMA_GCMD_SIRTP (((u32)1) << 24)
#define DMA_GCMD_IRE (((u32) 1) << 25)
+#define DMA_GCMD_CFI (((u32) 1) << 23)
/* GSTS_REG */
#define DMA_GSTS_TES (((u32)1) << 31)
@@ -174,6 +175,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
#define DMA_GSTS_QIES (((u32)1) << 26)
#define DMA_GSTS_IRTPS (((u32)1) << 24)
#define DMA_GSTS_IRES (((u32)1) << 25)
+#define DMA_GSTS_CFIS (((u32)1) << 23)
/* CCMD_REG */
#define DMA_CCMD_ICC (((u64)1) << 63)
@@ -284,6 +286,14 @@ struct iommu_flush {
unsigned int size_order, u64 type, int non_present_entry_flush);
};
+enum {
+ SR_DMAR_FECTL_REG,
+ SR_DMAR_FEDATA_REG,
+ SR_DMAR_FEADDR_REG,
+ SR_DMAR_FEUADDR_REG,
+ MAX_SR_DMAR_REGS
+};
+
struct intel_iommu {
void __iomem *reg; /* Pointer to hardware regs, virtual addr */
u64 cap;
@@ -292,6 +302,8 @@ struct intel_iommu {
spinlock_t register_lock; /* protect register handling */
int seq_id; /* sequence id of the iommu */
int agaw; /* agaw of this iommu */
+ unsigned int irq;
+ unsigned char name[13]; /* Device Name */
#ifdef CONFIG_DMAR
unsigned long *domain_ids; /* bitmap of domains */
@@ -299,11 +311,11 @@ struct intel_iommu {
spinlock_t lock; /* protect context, domain ids */
struct root_entry *root_entry; /* virtual address */
- unsigned int irq;
- unsigned char name[7]; /* Device Name */
struct iommu_flush flush;
#endif
struct q_inval *qi; /* Queued invalidation info */
+ u32 *iommu_state; /* Store iommu states between suspend and resume.*/
+
#ifdef CONFIG_INTR_REMAP
struct ir_table *ir_table; /* Interrupt remapping info */
#endif
@@ -321,6 +333,8 @@ extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
extern int alloc_iommu(struct dmar_drhd_unit *drhd);
extern void free_iommu(struct intel_iommu *iommu);
extern int dmar_enable_qi(struct intel_iommu *iommu);
+extern void dmar_disable_qi(struct intel_iommu *iommu);
+extern int dmar_reenable_qi(struct intel_iommu *iommu);
extern void qi_global_iec(struct intel_iommu *iommu);
extern int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
@@ -331,11 +345,4 @@ extern int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
-extern void *intel_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
-extern void intel_free_coherent(struct device *, size_t, void *, dma_addr_t);
-extern dma_addr_t intel_map_single(struct device *, phys_addr_t, size_t, int);
-extern void intel_unmap_single(struct device *, dma_addr_t, size_t, int);
-extern int intel_map_sg(struct device *, struct scatterlist *, int, int);
-extern void intel_unmap_sg(struct device *, struct scatterlist *, int, int);
-
#endif
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 9127f6b51a3..2721f07e935 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -59,8 +59,34 @@
#define IRQF_NOBALANCING 0x00000800
#define IRQF_IRQPOLL 0x00001000
+/*
+ * Bits used by threaded handlers:
+ * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run
+ * IRQTF_DIED - handler thread died
+ * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed
+ */
+enum {
+ IRQTF_RUNTHREAD,
+ IRQTF_DIED,
+ IRQTF_WARNED,
+};
+
typedef irqreturn_t (*irq_handler_t)(int, void *);
+/**
+ * struct irqaction - per interrupt action descriptor
+ * @handler: interrupt handler function
+ * @flags: flags (see IRQF_* above)
+ * @mask: no comment as it is useless and about to be removed
+ * @name: name of the device
+ * @dev_id: cookie to identify the device
+ * @next: pointer to the next irqaction for shared interrupts
+ * @irq: interrupt number
+ * @dir: pointer to the proc/irq/NN/name entry
+ * @thread_fn: interupt handler function for threaded interrupts
+ * @thread: thread pointer for threaded interrupts
+ * @thread_flags: flags related to @thread
+ */
struct irqaction {
irq_handler_t handler;
unsigned long flags;
@@ -70,18 +96,68 @@ struct irqaction {
struct irqaction *next;
int irq;
struct proc_dir_entry *dir;
+ irq_handler_t thread_fn;
+ struct task_struct *thread;
+ unsigned long thread_flags;
};
extern irqreturn_t no_action(int cpl, void *dev_id);
-extern int __must_check request_irq(unsigned int, irq_handler_t handler,
- unsigned long, const char *, void *);
+
+#ifdef CONFIG_GENERIC_HARDIRQS
+extern int __must_check
+request_threaded_irq(unsigned int irq, irq_handler_t handler,
+ irq_handler_t thread_fn,
+ unsigned long flags, const char *name, void *dev);
+
+static inline int __must_check
+request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
+ const char *name, void *dev)
+{
+ return request_threaded_irq(irq, handler, NULL, flags, name, dev);
+}
+
+extern void exit_irq_thread(void);
+#else
+
+extern int __must_check
+request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
+ const char *name, void *dev);
+
+/*
+ * Special function to avoid ifdeffery in kernel/irq/devres.c which
+ * gets magically built by GENERIC_HARDIRQS=n architectures (sparc,
+ * m68k). I really love these $@%#!* obvious Makefile references:
+ * ../../../kernel/irq/devres.o
+ */
+static inline int __must_check
+request_threaded_irq(unsigned int irq, irq_handler_t handler,
+ irq_handler_t thread_fn,
+ unsigned long flags, const char *name, void *dev)
+{
+ return request_irq(irq, handler, flags, name, dev);
+}
+
+static inline void exit_irq_thread(void) { }
+#endif
+
extern void free_irq(unsigned int, void *);
struct device;
-extern int __must_check devm_request_irq(struct device *dev, unsigned int irq,
- irq_handler_t handler, unsigned long irqflags,
- const char *devname, void *dev_id);
+extern int __must_check
+devm_request_threaded_irq(struct device *dev, unsigned int irq,
+ irq_handler_t handler, irq_handler_t thread_fn,
+ unsigned long irqflags, const char *devname,
+ void *dev_id);
+
+static inline int __must_check
+devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
+ unsigned long irqflags, const char *devname, void *dev_id)
+{
+ return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
+ devname, dev_id);
+}
+
extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
/*
@@ -106,6 +182,21 @@ extern void disable_irq_nosync(unsigned int irq);
extern void disable_irq(unsigned int irq);
extern void enable_irq(unsigned int irq);
+/* The following three functions are for the core kernel use only. */
+#ifdef CONFIG_GENERIC_HARDIRQS
+extern void suspend_device_irqs(void);
+extern void resume_device_irqs(void);
+#ifdef CONFIG_PM_SLEEP
+extern int check_wakeup_irqs(void);
+#else
+static inline int check_wakeup_irqs(void) { return 0; }
+#endif
+#else
+static inline void suspend_device_irqs(void) { };
+static inline void resume_device_irqs(void) { };
+static inline int check_wakeup_irqs(void) { return 0; }
+#endif
+
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
extern cpumask_var_t irq_default_affinity;
@@ -258,6 +349,11 @@ enum
NR_SOFTIRQS
};
+/* map softirq index to softirq name. update 'softirq_to_name' in
+ * kernel/softirq.c when adding a new softirq.
+ */
+extern char *softirq_to_name[NR_SOFTIRQS];
+
/* softirq mask and active fields moved to irq_cpustat_t in
* asm/hardirq.h to get better cache usage. KAO
*/
@@ -274,6 +370,7 @@ extern void softirq_init(void);
#define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
extern void raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq(unsigned int nr);
+extern void wakeup_softirqd(void);
/* This is the worklist that queues up per-cpu softirq work.
*
@@ -375,6 +472,20 @@ static inline void tasklet_hi_schedule(struct tasklet_struct *t)
__tasklet_hi_schedule(t);
}
+extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
+
+/*
+ * This version avoids touching any other tasklets. Needed for kmemcheck
+ * in order not to take any page faults while enqueueing this tasklet;
+ * consider VERY carefully whether you really need this or
+ * tasklet_hi_schedule()...
+ */
+static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
+{
+ if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
+ __tasklet_hi_schedule_first(t);
+}
+
static inline void tasklet_disable_nosync(struct tasklet_struct *t)
{
@@ -462,12 +573,19 @@ static inline void init_irq_proc(void)
}
#endif
+#if defined(CONFIG_GENERIC_HARDIRQS) && defined(CONFIG_DEBUG_SHIRQ)
+extern void debug_poll_all_shared_irqs(void);
+#else
+static inline void debug_poll_all_shared_irqs(void) { }
+#endif
+
int show_interrupts(struct seq_file *p, void *v);
struct irq_desc;
extern int early_irq_init(void);
+extern int arch_probe_nr_irqs(void);
extern int arch_early_irq_init(void);
-extern int arch_init_chip_data(struct irq_desc *desc, int cpu);
+extern int arch_init_chip_data(struct irq_desc *desc, int node);
#endif
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index 08b987bccf8..dd05434fa45 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -64,7 +64,7 @@ struct cfq_io_context {
* and kmalloc'ed. These could be shared between processes.
*/
struct io_context {
- atomic_t refcount;
+ atomic_long_t refcount;
atomic_t nr_tasks;
/* all the fields below are protected by this lock */
@@ -91,8 +91,8 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc)
* if ref count is zero, don't allow sharing (ioc is going away, it's
* a race).
*/
- if (ioc && atomic_inc_not_zero(&ioc->refcount)) {
- atomic_inc(&ioc->nr_tasks);
+ if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) {
+ atomic_long_inc(&ioc->refcount);
return ioc;
}
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 8a7bfb1b6ca..3af4ffd591b 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -21,6 +21,7 @@
#define IOMMU_READ (1)
#define IOMMU_WRITE (2)
+#define IOMMU_CACHE (4) /* DMA cache coherency */
struct device;
@@ -28,6 +29,8 @@ struct iommu_domain {
void *priv;
};
+#define IOMMU_CAP_CACHE_COHERENCY 0x1
+
struct iommu_ops {
int (*domain_init)(struct iommu_domain *domain);
void (*domain_destroy)(struct iommu_domain *domain);
@@ -39,6 +42,8 @@ struct iommu_ops {
size_t size);
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
unsigned long iova);
+ int (*domain_has_cap)(struct iommu_domain *domain,
+ unsigned long cap);
};
#ifdef CONFIG_IOMMU_API
@@ -57,6 +62,8 @@ extern void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova,
size_t size);
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
unsigned long iova);
+extern int iommu_domain_has_cap(struct iommu_domain *domain,
+ unsigned long cap);
#else /* CONFIG_IOMMU_API */
@@ -107,6 +114,12 @@ static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
return 0;
}
+static inline int domain_has_cap(struct iommu_domain *domain,
+ unsigned long cap)
+{
+ return 0;
+}
+
#endif /* CONFIG_IOMMU_API */
#endif /* __LINUX_IOMMU_H */
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 32e4b2f7229..786e7b8cece 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -49,6 +49,8 @@ struct resource_list {
#define IORESOURCE_SIZEALIGN 0x00020000 /* size indicates alignment */
#define IORESOURCE_STARTALIGN 0x00040000 /* start field is alignment */
+#define IORESOURCE_MEM_64 0x00100000
+
#define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */
#define IORESOURCE_DISABLED 0x10000000
#define IORESOURCE_UNSET 0x20000000
diff --git a/include/linux/ip_vs.h b/include/linux/ip_vs.h
index 0f434a28fb5..148265e63e8 100644
--- a/include/linux/ip_vs.h
+++ b/include/linux/ip_vs.h
@@ -96,10 +96,10 @@
*/
struct ip_vs_service_user {
/* virtual service addresses */
- u_int16_t protocol;
+ __u16 protocol;
__be32 addr; /* virtual ip address */
__be16 port;
- u_int32_t fwmark; /* firwall mark of service */
+ __u32 fwmark; /* firwall mark of service */
/* virtual service options */
char sched_name[IP_VS_SCHEDNAME_MAXLEN];
@@ -119,8 +119,8 @@ struct ip_vs_dest_user {
int weight; /* destination weight */
/* thresholds for active connections */
- u_int32_t u_threshold; /* upper threshold */
- u_int32_t l_threshold; /* lower threshold */
+ __u32 u_threshold; /* upper threshold */
+ __u32 l_threshold; /* lower threshold */
};
@@ -159,10 +159,10 @@ struct ip_vs_getinfo {
/* The argument to IP_VS_SO_GET_SERVICE */
struct ip_vs_service_entry {
/* which service: user fills in these */
- u_int16_t protocol;
+ __u16 protocol;
__be32 addr; /* virtual address */
__be16 port;
- u_int32_t fwmark; /* firwall mark of service */
+ __u32 fwmark; /* firwall mark of service */
/* service options */
char sched_name[IP_VS_SCHEDNAME_MAXLEN];
@@ -184,12 +184,12 @@ struct ip_vs_dest_entry {
unsigned conn_flags; /* connection flags */
int weight; /* destination weight */
- u_int32_t u_threshold; /* upper threshold */
- u_int32_t l_threshold; /* lower threshold */
+ __u32 u_threshold; /* upper threshold */
+ __u32 l_threshold; /* lower threshold */
- u_int32_t activeconns; /* active connections */
- u_int32_t inactconns; /* inactive connections */
- u_int32_t persistconns; /* persistent connections */
+ __u32 activeconns; /* active connections */
+ __u32 inactconns; /* inactive connections */
+ __u32 persistconns; /* persistent connections */
/* statistics */
struct ip_vs_stats_user stats;
@@ -199,10 +199,10 @@ struct ip_vs_dest_entry {
/* The argument to IP_VS_SO_GET_DESTS */
struct ip_vs_get_dests {
/* which service: user fills in these */
- u_int16_t protocol;
+ __u16 protocol;
__be32 addr; /* virtual address */
__be16 port;
- u_int32_t fwmark; /* firwall mark of service */
+ __u32 fwmark; /* firwall mark of service */
/* number of real servers */
unsigned int num_dests;
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index ea330f9e710..e408722a84c 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -25,7 +25,7 @@ struct ipc_ids {
};
struct ipc_namespace {
- struct kref kref;
+ atomic_t count;
struct ipc_ids ids[3];
int sem_ctls[4];
@@ -44,42 +44,66 @@ struct ipc_namespace {
int shm_tot;
struct notifier_block ipcns_nb;
+
+ /* The kern_mount of the mqueuefs sb. We take a ref on it */
+ struct vfsmount *mq_mnt;
+
+ /* # queues in this ns, protected by mq_lock */
+ unsigned int mq_queues_count;
+
+ /* next fields are set through sysctl */
+ unsigned int mq_queues_max; /* initialized to DFLT_QUEUESMAX */
+ unsigned int mq_msg_max; /* initialized to DFLT_MSGMAX */
+ unsigned int mq_msgsize_max; /* initialized to DFLT_MSGSIZEMAX */
+
};
extern struct ipc_namespace init_ipc_ns;
extern atomic_t nr_ipc_ns;
-#ifdef CONFIG_SYSVIPC
+extern spinlock_t mq_lock;
+#if defined(CONFIG_POSIX_MQUEUE) || defined(CONFIG_SYSVIPC)
#define INIT_IPC_NS(ns) .ns = &init_ipc_ns,
+#else
+#define INIT_IPC_NS(ns)
+#endif
+#ifdef CONFIG_SYSVIPC
extern int register_ipcns_notifier(struct ipc_namespace *);
extern int cond_register_ipcns_notifier(struct ipc_namespace *);
extern void unregister_ipcns_notifier(struct ipc_namespace *);
extern int ipcns_notify(unsigned long);
-
#else /* CONFIG_SYSVIPC */
-#define INIT_IPC_NS(ns)
+static inline int register_ipcns_notifier(struct ipc_namespace *ns)
+{ return 0; }
+static inline int cond_register_ipcns_notifier(struct ipc_namespace *ns)
+{ return 0; }
+static inline void unregister_ipcns_notifier(struct ipc_namespace *ns) { }
+static inline int ipcns_notify(unsigned long l) { return 0; }
#endif /* CONFIG_SYSVIPC */
-#if defined(CONFIG_SYSVIPC) && defined(CONFIG_IPC_NS)
-extern void free_ipc_ns(struct kref *kref);
+#ifdef CONFIG_POSIX_MQUEUE
+extern int mq_init_ns(struct ipc_namespace *ns);
+/* default values */
+#define DFLT_QUEUESMAX 256 /* max number of message queues */
+#define DFLT_MSGMAX 10 /* max number of messages in each queue */
+#define HARD_MSGMAX (131072/sizeof(void *))
+#define DFLT_MSGSIZEMAX 8192 /* max message size */
+#else
+static inline int mq_init_ns(struct ipc_namespace *ns) { return 0; }
+#endif
+
+#if defined(CONFIG_IPC_NS)
extern struct ipc_namespace *copy_ipcs(unsigned long flags,
struct ipc_namespace *ns);
-extern void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids,
- void (*free)(struct ipc_namespace *,
- struct kern_ipc_perm *));
-
static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
{
if (ns)
- kref_get(&ns->kref);
+ atomic_inc(&ns->count);
return ns;
}
-static inline void put_ipc_ns(struct ipc_namespace *ns)
-{
- kref_put(&ns->kref, free_ipc_ns);
-}
+extern void put_ipc_ns(struct ipc_namespace *ns);
#else
static inline struct ipc_namespace *copy_ipcs(unsigned long flags,
struct ipc_namespace *ns)
@@ -99,4 +123,18 @@ static inline void put_ipc_ns(struct ipc_namespace *ns)
{
}
#endif
+
+#ifdef CONFIG_POSIX_MQUEUE_SYSCTL
+
+struct ctl_table_header;
+extern struct ctl_table_header *mq_register_sysctl_table(void);
+
+#else /* CONFIG_POSIX_MQUEUE_SYSCTL */
+
+static inline struct ctl_table_header *mq_register_sysctl_table(void)
+{
+ return NULL;
+}
+
+#endif /* CONFIG_POSIX_MQUEUE_SYSCTL */
#endif
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
index 7ebdb4fb4e5..65aae34759d 100644
--- a/include/linux/ipmi.h
+++ b/include/linux/ipmi.h
@@ -198,6 +198,8 @@ struct kernel_ipmi_msg {
response. When you send a
response message, this will
be returned. */
+#define IPMI_OEM_RECV_TYPE 5 /* The response for OEM Channels */
+
/* Note that async events and received commands do not have a completion
code as the first byte of the incoming data, unlike a response. */
diff --git a/include/linux/ipmi_msgdefs.h b/include/linux/ipmi_msgdefs.h
index b56a158d587..df97e6e31e8 100644
--- a/include/linux/ipmi_msgdefs.h
+++ b/include/linux/ipmi_msgdefs.h
@@ -58,6 +58,12 @@
#define IPMI_READ_EVENT_MSG_BUFFER_CMD 0x35
#define IPMI_GET_CHANNEL_INFO_CMD 0x42
+/* Bit for BMC global enables. */
+#define IPMI_BMC_RCV_MSG_INTR 0x01
+#define IPMI_BMC_EVT_MSG_INTR 0x02
+#define IPMI_BMC_EVT_MSG_BUFF 0x04
+#define IPMI_BMC_SYS_LOG 0x08
+
#define IPMI_NETFN_STORAGE_REQUEST 0x0a
#define IPMI_NETFN_STORAGE_RESPONSE 0x0b
#define IPMI_ADD_SEL_ENTRY_CMD 0x44
@@ -109,5 +115,7 @@
#define IPMI_CHANNEL_MEDIUM_USB1 10
#define IPMI_CHANNEL_MEDIUM_USB2 11
#define IPMI_CHANNEL_MEDIUM_SYSINTF 12
+#define IPMI_CHANNEL_MEDIUM_OEM_MIN 0x60
+#define IPMI_CHANNEL_MEDIUM_OEM_MAX 0x7f
#endif /* __LINUX_IPMI_MSGDEFS_H */
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index 62b73668b60..f7c9c75a277 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -230,6 +230,6 @@ static inline void ipmi_free_smi_msg(struct ipmi_smi_msg *msg)
automatically be dstroyed when the interface is destroyed. */
int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
read_proc_t *read_proc,
- void *data, struct module *owner);
+ void *data);
#endif /* __LINUX_IPMI_SMI_H */
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 476d9464ac8..c662efa6828 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -169,6 +169,12 @@ struct ipv6_devconf {
__s32 accept_dad;
void *sysctl;
};
+
+struct ipv6_params {
+ __s32 disable_ipv6;
+ __s32 autoconf;
+};
+extern struct ipv6_params ipv6_defaults;
#endif
/* index values for the variables in ipv6_devconf */
diff --git a/include/linux/irq.h b/include/linux/irq.h
index f899b502f18..cb2e77a3f7f 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -17,9 +17,12 @@
#include <linux/cache.h>
#include <linux/spinlock.h>
#include <linux/cpumask.h>
+#include <linux/gfp.h>
#include <linux/irqreturn.h>
#include <linux/irqnr.h>
#include <linux/errno.h>
+#include <linux/topology.h>
+#include <linux/wait.h>
#include <asm/irq.h>
#include <asm/ptrace.h>
@@ -65,6 +68,7 @@ typedef void (*irq_flow_handler_t)(unsigned int irq,
#define IRQ_SPURIOUS_DISABLED 0x00800000 /* IRQ was disabled by the spurious trap */
#define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */
#define IRQ_AFFINITY_SET 0x02000000 /* IRQ affinity was set from userspace*/
+#define IRQ_SUSPENDED 0x04000000 /* IRQ has gone through suspend sequence */
#ifdef CONFIG_IRQ_PER_CPU
# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU)
@@ -113,7 +117,7 @@ struct irq_chip {
void (*eoi)(unsigned int irq);
void (*end)(unsigned int irq);
- void (*set_affinity)(unsigned int irq,
+ int (*set_affinity)(unsigned int irq,
const struct cpumask *dest);
int (*retrigger)(unsigned int irq);
int (*set_type)(unsigned int irq, unsigned int flow_type);
@@ -153,19 +157,19 @@ struct irq_2_iommu;
* @irqs_unhandled: stats field for spurious unhandled interrupts
* @lock: locking for SMP
* @affinity: IRQ affinity on SMP
- * @cpu: cpu index useful for balancing
+ * @node: node index useful for balancing
* @pending_mask: pending rebalanced interrupts
+ * @threads_active: number of irqaction threads currently running
+ * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers
* @dir: /proc/irq/ procfs entry
* @name: flow handler name for /proc/interrupts output
*/
struct irq_desc {
unsigned int irq;
-#ifdef CONFIG_SPARSE_IRQ
struct timer_rand_state *timer_rand_state;
unsigned int *kstat_irqs;
-# ifdef CONFIG_INTR_REMAP
+#ifdef CONFIG_INTR_REMAP
struct irq_2_iommu *irq_2_iommu;
-# endif
#endif
irq_flow_handler_t handle_irq;
struct irq_chip *chip;
@@ -182,12 +186,14 @@ struct irq_desc {
unsigned int irqs_unhandled;
spinlock_t lock;
#ifdef CONFIG_SMP
- cpumask_t affinity;
- unsigned int cpu;
-#endif
+ cpumask_var_t affinity;
+ unsigned int node;
#ifdef CONFIG_GENERIC_PENDING_IRQ
- cpumask_t pending_mask;
+ cpumask_var_t pending_mask;
+#endif
#endif
+ atomic_t threads_active;
+ wait_queue_head_t wait_for_threads;
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *dir;
#endif
@@ -195,38 +201,28 @@ struct irq_desc {
} ____cacheline_internodealigned_in_smp;
extern void arch_init_copy_chip_data(struct irq_desc *old_desc,
- struct irq_desc *desc, int cpu);
+ struct irq_desc *desc, int node);
extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc);
#ifndef CONFIG_SPARSE_IRQ
extern struct irq_desc irq_desc[NR_IRQS];
-#else /* CONFIG_SPARSE_IRQ */
-extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int cpu);
-
-#define kstat_irqs_this_cpu(DESC) \
- ((DESC)->kstat_irqs[smp_processor_id()])
-#define kstat_incr_irqs_this_cpu(irqno, DESC) \
- ((DESC)->kstat_irqs[smp_processor_id()]++)
-
-#endif /* CONFIG_SPARSE_IRQ */
-
-extern struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu);
+#endif
-static inline struct irq_desc *
-irq_remap_to_desc(unsigned int irq, struct irq_desc *desc)
-{
-#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
- return irq_to_desc(irq);
+#ifdef CONFIG_NUMA_IRQ_DESC
+extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int node);
#else
+static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
+{
return desc;
-#endif
}
+#endif
+
+extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node);
/*
* Migration helpers for obsolete names, they will go away:
*/
#define hw_interrupt_type irq_chip
-typedef struct irq_chip hw_irq_controller;
#define no_irq_type no_irq_chip
typedef struct irq_desc irq_desc_t;
@@ -236,6 +232,7 @@ typedef struct irq_desc irq_desc_t;
#include <asm/hw_irq.h>
extern int setup_irq(unsigned int irq, struct irqaction *new);
+extern void remove_irq(unsigned int irq, struct irqaction *act);
#ifdef CONFIG_GENERIC_HARDIRQS
@@ -280,7 +277,7 @@ static inline int irq_balancing_disabled(unsigned int irq)
}
/* Handle irq action chains: */
-extern int handle_IRQ_event(unsigned int irq, struct irqaction *action);
+extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action);
/*
* Built-in IRQ handlers for various IRQ types,
@@ -325,7 +322,7 @@ static inline void generic_handle_irq(unsigned int irq)
/* Handling of unhandled and spurious interrupts: */
extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
- int action_ret);
+ irqreturn_t action_ret);
/* Resending of interrupts :*/
void check_irq_resend(struct irq_desc *desc, unsigned int irq);
@@ -386,7 +383,7 @@ extern void set_irq_noprobe(unsigned int irq);
extern void set_irq_probe(unsigned int irq);
/* Handle dynamic irq creation and destruction */
-extern unsigned int create_irq_nr(unsigned int irq_want);
+extern unsigned int create_irq_nr(unsigned int irq_want, int node);
extern int create_irq(void);
extern void destroy_irq(unsigned int irq);
@@ -422,4 +419,99 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
#endif /* !CONFIG_S390 */
+#ifdef CONFIG_SMP
+/**
+ * alloc_desc_masks - allocate cpumasks for irq_desc
+ * @desc: pointer to irq_desc struct
+ * @node: node which will be handling the cpumasks
+ * @boot: true if need bootmem
+ *
+ * Allocates affinity and pending_mask cpumask if required.
+ * Returns true if successful (or not required).
+ */
+static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
+ bool boot)
+{
+ gfp_t gfp = GFP_ATOMIC;
+
+ if (boot)
+ gfp = GFP_NOWAIT;
+
+#ifdef CONFIG_CPUMASK_OFFSTACK
+ if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
+ return false;
+
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+ if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
+ free_cpumask_var(desc->affinity);
+ return false;
+ }
+#endif
+#endif
+ return true;
+}
+
+static inline void init_desc_masks(struct irq_desc *desc)
+{
+ cpumask_setall(desc->affinity);
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+ cpumask_clear(desc->pending_mask);
+#endif
+}
+
+/**
+ * init_copy_desc_masks - copy cpumasks for irq_desc
+ * @old_desc: pointer to old irq_desc struct
+ * @new_desc: pointer to new irq_desc struct
+ *
+ * Insures affinity and pending_masks are copied to new irq_desc.
+ * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the
+ * irq_desc struct so the copy is redundant.
+ */
+
+static inline void init_copy_desc_masks(struct irq_desc *old_desc,
+ struct irq_desc *new_desc)
+{
+#ifdef CONFIG_CPUMASK_OFFSTACK
+ cpumask_copy(new_desc->affinity, old_desc->affinity);
+
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+ cpumask_copy(new_desc->pending_mask, old_desc->pending_mask);
+#endif
+#endif
+}
+
+static inline void free_desc_masks(struct irq_desc *old_desc,
+ struct irq_desc *new_desc)
+{
+ free_cpumask_var(old_desc->affinity);
+
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+ free_cpumask_var(old_desc->pending_mask);
+#endif
+}
+
+#else /* !CONFIG_SMP */
+
+static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
+ bool boot)
+{
+ return true;
+}
+
+static inline void init_desc_masks(struct irq_desc *desc)
+{
+}
+
+static inline void init_copy_desc_masks(struct irq_desc *old_desc,
+ struct irq_desc *new_desc)
+{
+}
+
+static inline void free_desc_masks(struct irq_desc *old_desc,
+ struct irq_desc *new_desc)
+{
+}
+#endif /* CONFIG_SMP */
+
#endif /* _LINUX_IRQ_H */
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 74bde13224c..b02a3f1d46a 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -24,8 +24,8 @@
# define trace_softirqs_enabled(p) ((p)->softirqs_enabled)
# define trace_hardirq_enter() do { current->hardirq_context++; } while (0)
# define trace_hardirq_exit() do { current->hardirq_context--; } while (0)
-# define trace_softirq_enter() do { current->softirq_context++; } while (0)
-# define trace_softirq_exit() do { current->softirq_context--; } while (0)
+# define lockdep_softirq_enter() do { current->softirq_context++; } while (0)
+# define lockdep_softirq_exit() do { current->softirq_context--; } while (0)
# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1,
#else
# define trace_hardirqs_on() do { } while (0)
@@ -38,8 +38,8 @@
# define trace_softirqs_enabled(p) 0
# define trace_hardirq_enter() do { } while (0)
# define trace_hardirq_exit() do { } while (0)
-# define trace_softirq_enter() do { } while (0)
-# define trace_softirq_exit() do { } while (0)
+# define lockdep_softirq_enter() do { } while (0)
+# define lockdep_softirq_exit() do { } while (0)
# define INIT_TRACE_IRQFLAGS
#endif
diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h
index 86af92e9e84..ec87b212ff7 100644
--- a/include/linux/irqnr.h
+++ b/include/linux/irqnr.h
@@ -20,6 +20,7 @@
# define for_each_irq_desc_reverse(irq, desc) \
for (irq = nr_irqs - 1; irq >= 0; irq--)
+
#else /* CONFIG_GENERIC_HARDIRQS */
extern int nr_irqs;
@@ -28,13 +29,17 @@ extern struct irq_desc *irq_to_desc(unsigned int irq);
# define for_each_irq_desc(irq, desc) \
for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; \
irq++, desc = irq_to_desc(irq)) \
- if (desc)
+ if (!desc) \
+ ; \
+ else
# define for_each_irq_desc_reverse(irq, desc) \
for (irq = nr_irqs - 1, desc = irq_to_desc(irq); irq >= 0; \
irq--, desc = irq_to_desc(irq)) \
- if (desc)
+ if (!desc) \
+ ; \
+ else
#endif /* CONFIG_GENERIC_HARDIRQS */
diff --git a/include/linux/irqreturn.h b/include/linux/irqreturn.h
index 881883c2009..819acaaac3f 100644
--- a/include/linux/irqreturn.h
+++ b/include/linux/irqreturn.h
@@ -1,25 +1,19 @@
-/* irqreturn.h */
#ifndef _LINUX_IRQRETURN_H
#define _LINUX_IRQRETURN_H
-/*
- * For 2.4.x compatibility, 2.4.x can use
- *
- * typedef void irqreturn_t;
- * #define IRQ_NONE
- * #define IRQ_HANDLED
- * #define IRQ_RETVAL(x)
- *
- * To mix old-style and new-style irq handler returns.
- *
- * IRQ_NONE means we didn't handle it.
- * IRQ_HANDLED means that we did have a valid interrupt and handled it.
- * IRQ_RETVAL(x) selects on the two depending on x being non-zero (for handled)
+/**
+ * enum irqreturn
+ * @IRQ_NONE interrupt was not from this device
+ * @IRQ_HANDLED interrupt was handled by this device
+ * @IRQ_WAKE_THREAD handler requests to wake the handler thread
*/
-typedef int irqreturn_t;
+enum irqreturn {
+ IRQ_NONE,
+ IRQ_HANDLED,
+ IRQ_WAKE_THREAD,
+};
-#define IRQ_NONE (0)
-#define IRQ_HANDLED (1)
-#define IRQ_RETVAL(x) ((x) != 0)
+typedef enum irqreturn irqreturn_t;
+#define IRQ_RETVAL(x) ((x) != IRQ_NONE)
#endif
diff --git a/include/linux/isdn/capilli.h b/include/linux/isdn/capilli.h
index 35e9b0fd014..7acb87a4487 100644
--- a/include/linux/isdn/capilli.h
+++ b/include/linux/isdn/capilli.h
@@ -79,7 +79,7 @@ int attach_capi_ctr(struct capi_ctr *);
int detach_capi_ctr(struct capi_ctr *);
void capi_ctr_ready(struct capi_ctr * card);
-void capi_ctr_reseted(struct capi_ctr * card);
+void capi_ctr_down(struct capi_ctr * card);
void capi_ctr_suspend_output(struct capi_ctr * card);
void capi_ctr_resume_output(struct capi_ctr * card);
void capi_ctr_handle_message(struct capi_ctr * card, u16 appl, struct sk_buff *skb);
diff --git a/include/linux/ivtv.h b/include/linux/ivtv.h
index f2720280b9e..062d20f7432 100644
--- a/include/linux/ivtv.h
+++ b/include/linux/ivtv.h
@@ -60,10 +60,10 @@ struct ivtv_dma_frame {
#define IVTV_IOC_DMA_FRAME _IOW ('V', BASE_VIDIOC_PRIVATE+0, struct ivtv_dma_frame)
-/* These are the VBI types as they appear in the embedded VBI private packets. */
-#define IVTV_SLICED_TYPE_TELETEXT_B (1)
-#define IVTV_SLICED_TYPE_CAPTION_525 (4)
-#define IVTV_SLICED_TYPE_WSS_625 (5)
-#define IVTV_SLICED_TYPE_VPS (7)
+/* Deprecated defines: applications should use the defines from videodev2.h */
+#define IVTV_SLICED_TYPE_TELETEXT_B V4L2_MPEG_VBI_IVTV_TELETEXT_B
+#define IVTV_SLICED_TYPE_CAPTION_525 V4L2_MPEG_VBI_IVTV_CAPTION_525
+#define IVTV_SLICED_TYPE_WSS_625 V4L2_MPEG_VBI_IVTV_WSS_625
+#define IVTV_SLICED_TYPE_VPS V4L2_MPEG_VBI_IVTV_VPS
#endif /* _LINUX_IVTV_H */
diff --git a/include/linux/ivtvfb.h b/include/linux/ivtvfb.h
index e20af47b59a..9d88b29ddf5 100644
--- a/include/linux/ivtvfb.h
+++ b/include/linux/ivtvfb.h
@@ -33,6 +33,6 @@ struct ivtvfb_dma_frame {
};
#define IVTVFB_IOC_DMA_FRAME _IOW('V', BASE_VIDIOC_PRIVATE+0, struct ivtvfb_dma_frame)
-#define FBIO_WAITFORVSYNC _IOW('F', 0x20, u_int32_t)
+#define FBIO_WAITFORVSYNC _IOW('F', 0x20, __u32)
#endif
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 64246dce566..c2049a04fa0 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -35,7 +35,7 @@
#define journal_oom_retry 1
/*
- * Define JBD_PARANIOD_IOFAIL to cause a kernel BUG() if ext3 finds
+ * Define JBD_PARANOID_IOFAIL to cause a kernel BUG() if ext3 finds
* certain classes of error which can occur due to failed IOs. Under
* normal use we want ext3 to continue after such errors, because
* hardware _can_ fail, but for debugging purposes when running tests on
@@ -552,6 +552,11 @@ struct transaction_s
*/
int t_handle_count;
+ /*
+ * This transaction is being forced and some process is
+ * waiting for it to finish.
+ */
+ int t_synchronous_commit:1;
};
/**
@@ -973,7 +978,8 @@ extern void journal_destroy_revoke(journal_t *);
extern int journal_revoke (handle_t *,
unsigned long, struct buffer_head *);
extern int journal_cancel_revoke(handle_t *, struct journal_head *);
-extern void journal_write_revoke_records(journal_t *, transaction_t *);
+extern void journal_write_revoke_records(journal_t *,
+ transaction_t *, int);
/* Recovery revoke support */
extern int journal_set_revoke(journal_t *, unsigned long, tid_t);
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 4d248b3f132..d97eb652d6c 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -649,6 +649,12 @@ struct transaction_s
int t_handle_count;
/*
+ * This transaction is being forced and some process is
+ * waiting for it to finish.
+ */
+ int t_synchronous_commit:1;
+
+ /*
* For use by the filesystem to store fs-specific data
* structures associated with the transaction
*/
@@ -1187,7 +1193,8 @@ extern int jbd2_journal_init_revoke_caches(void);
extern void jbd2_journal_destroy_revoke(journal_t *);
extern int jbd2_journal_revoke (handle_t *, unsigned long long, struct buffer_head *);
extern int jbd2_journal_cancel_revoke(handle_t *, struct journal_head *);
-extern void jbd2_journal_write_revoke_records(journal_t *, transaction_t *);
+extern void jbd2_journal_write_revoke_records(journal_t *,
+ transaction_t *, int);
/* Recovery revoke support */
extern int jbd2_journal_set_revoke(journal_t *, unsigned long long, tid_t);
@@ -1308,6 +1315,12 @@ extern int jbd_blocks_per_page(struct inode *inode);
#define BUFFER_TRACE2(bh, bh2, info) do {} while (0)
#define JBUFFER_TRACE(jh, info) do {} while (0)
+/*
+ * jbd2_dev_to_name is a utility function used by the jbd2 and ext4
+ * tracing infrastructure to map a dev_t to a device name.
+ */
+extern const char *jbd2_dev_to_name(dev_t device);
+
#endif /* __KERNEL__ */
#endif /* _LINUX_JBD2_H */
diff --git a/include/linux/jffs2.h b/include/linux/jffs2.h
index da720bc3eb1..2b32d638147 100644
--- a/include/linux/jffs2.h
+++ b/include/linux/jffs2.h
@@ -12,6 +12,7 @@
#ifndef __LINUX_JFFS2_H__
#define __LINUX_JFFS2_H__
+#include <linux/types.h>
#include <linux/magic.h>
/* You must include something which defines the C99 uintXX_t types.
@@ -91,15 +92,15 @@
byteswapping */
typedef struct {
- uint32_t v32;
+ __u32 v32;
} __attribute__((packed)) jint32_t;
typedef struct {
- uint32_t m;
+ __u32 m;
} __attribute__((packed)) jmode_t;
typedef struct {
- uint16_t v16;
+ __u16 v16;
} __attribute__((packed)) jint16_t;
struct jffs2_unknown_node
@@ -121,12 +122,12 @@ struct jffs2_raw_dirent
jint32_t version;
jint32_t ino; /* == zero for unlink */
jint32_t mctime;
- uint8_t nsize;
- uint8_t type;
- uint8_t unused[2];
+ __u8 nsize;
+ __u8 type;
+ __u8 unused[2];
jint32_t node_crc;
jint32_t name_crc;
- uint8_t name[0];
+ __u8 name[0];
};
/* The JFFS2 raw inode structure: Used for storage on physical media. */
@@ -153,12 +154,12 @@ struct jffs2_raw_inode
jint32_t offset; /* Where to begin to write. */
jint32_t csize; /* (Compressed) data size */
jint32_t dsize; /* Size of the node's data. (after decompression) */
- uint8_t compr; /* Compression algorithm used */
- uint8_t usercompr; /* Compression algorithm requested by the user */
+ __u8 compr; /* Compression algorithm used */
+ __u8 usercompr; /* Compression algorithm requested by the user */
jint16_t flags; /* See JFFS2_INO_FLAG_* */
jint32_t data_crc; /* CRC for the (compressed) data. */
jint32_t node_crc; /* CRC for the raw inode (excluding data) */
- uint8_t data[0];
+ __u8 data[0];
};
struct jffs2_raw_xattr {
@@ -168,12 +169,12 @@ struct jffs2_raw_xattr {
jint32_t hdr_crc;
jint32_t xid; /* XATTR identifier number */
jint32_t version;
- uint8_t xprefix;
- uint8_t name_len;
+ __u8 xprefix;
+ __u8 name_len;
jint16_t value_len;
jint32_t data_crc;
jint32_t node_crc;
- uint8_t data[0];
+ __u8 data[0];
} __attribute__((packed));
struct jffs2_raw_xref
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index f3fe34391d8..792274269f2 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -13,10 +13,17 @@
#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \
2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1)
+struct module;
+
#ifdef CONFIG_KALLSYMS
/* Lookup the address for a symbol. Returns 0 if not found. */
unsigned long kallsyms_lookup_name(const char *name);
+/* Call a function on each kallsyms symbol in the core kernel */
+int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *,
+ unsigned long),
+ void *data);
+
extern int kallsyms_lookup_size_offset(unsigned long addr,
unsigned long *symbolsize,
unsigned long *offset);
@@ -43,6 +50,14 @@ static inline unsigned long kallsyms_lookup_name(const char *name)
return 0;
}
+static inline int kallsyms_on_each_symbol(int (*fn)(void *, const char *,
+ struct module *,
+ unsigned long),
+ void *data)
+{
+ return 0;
+}
+
static inline int kallsyms_lookup_size_offset(unsigned long addr,
unsigned long *symbolsize,
unsigned long *offset)
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 7fa371898e3..fac104e7186 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -16,7 +16,7 @@
#include <linux/log2.h>
#include <linux/typecheck.h>
#include <linux/ratelimit.h>
-#include <linux/dynamic_printk.h>
+#include <linux/dynamic_debug.h>
#include <asm/byteorder.h>
#include <asm/bug.h>
@@ -58,7 +58,7 @@ extern const char linux_proc_banner[];
#define _RET_IP_ (unsigned long)__builtin_return_address(0)
#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
-#ifdef CONFIG_LBD
+#ifdef CONFIG_LBDAF
# include <asm/div64.h>
# define sector_div(a, b) do_div(a, b)
#else
@@ -97,12 +97,14 @@ extern const char linux_proc_banner[];
#define KERN_INFO "<6>" /* informational */
#define KERN_DEBUG "<7>" /* debug-level messages */
+/* Use the default kernel loglevel */
+#define KERN_DEFAULT "<d>"
/*
* Annotation for a "continued" line of log printout (only done after a
* line that had no enclosing \n). Only to be used by core/arch code
* during early bootup (a continued line is not SMP-safe otherwise).
*/
-#define KERN_CONT ""
+#define KERN_CONT "<c>"
extern int console_printk[];
@@ -242,6 +244,20 @@ extern struct ratelimit_state printk_ratelimit_state;
extern int printk_ratelimit(void);
extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
unsigned int interval_msec);
+
+/*
+ * Print a one-time message (analogous to WARN_ONCE() et al):
+ */
+#define printk_once(x...) ({ \
+ static int __print_once = 1; \
+ \
+ if (__print_once) { \
+ __print_once = 0; \
+ printk(x); \
+ } \
+})
+
+void log_buf_kexec_setup(void);
#else
static inline int vprintk(const char *s, va_list args)
__attribute__ ((format (printf, 1, 0)));
@@ -253,6 +269,13 @@ static inline int printk_ratelimit(void) { return 0; }
static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, \
unsigned int interval_msec) \
{ return false; }
+
+/* No effect, but we still get type checking even in the !PRINTK case: */
+#define printk_once(x...) printk(x)
+
+static inline void log_buf_kexec_setup(void)
+{
+}
#endif
extern int printk_needs_cpu(int cpu);
@@ -353,14 +376,26 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
#define pr_info(fmt, ...) \
printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_cont(fmt, ...) \
+ printk(KERN_CONT fmt, ##__VA_ARGS__)
+
+/* pr_devel() should produce zero code unless DEBUG is defined */
+#ifdef DEBUG
+#define pr_devel(fmt, ...) \
+ printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+#else
+#define pr_devel(fmt, ...) \
+ ({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; })
+#endif
/* If you are writing a driver, please use dev_dbg instead */
#if defined(DEBUG)
#define pr_debug(fmt, ...) \
printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
-#elif defined(CONFIG_DYNAMIC_PRINTK_DEBUG)
+#elif defined(CONFIG_DYNAMIC_DEBUG)
+/* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */
#define pr_debug(fmt, ...) do { \
- dynamic_pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \
+ dynamic_pr_debug(fmt, ##__VA_ARGS__); \
} while (0)
#else
#define pr_debug(fmt, ...) \
@@ -368,6 +403,139 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
#endif
/*
+ * General tracing related utility functions - trace_printk(),
+ * tracing_on/tracing_off and tracing_start()/tracing_stop
+ *
+ * Use tracing_on/tracing_off when you want to quickly turn on or off
+ * tracing. It simply enables or disables the recording of the trace events.
+ * This also corresponds to the user space /sys/kernel/debug/tracing/tracing_on
+ * file, which gives a means for the kernel and userspace to interact.
+ * Place a tracing_off() in the kernel where you want tracing to end.
+ * From user space, examine the trace, and then echo 1 > tracing_on
+ * to continue tracing.
+ *
+ * tracing_stop/tracing_start has slightly more overhead. It is used
+ * by things like suspend to ram where disabling the recording of the
+ * trace is not enough, but tracing must actually stop because things
+ * like calling smp_processor_id() may crash the system.
+ *
+ * Most likely, you want to use tracing_on/tracing_off.
+ */
+#ifdef CONFIG_RING_BUFFER
+void tracing_on(void);
+void tracing_off(void);
+/* trace_off_permanent stops recording with no way to bring it back */
+void tracing_off_permanent(void);
+int tracing_is_on(void);
+#else
+static inline void tracing_on(void) { }
+static inline void tracing_off(void) { }
+static inline void tracing_off_permanent(void) { }
+static inline int tracing_is_on(void) { return 0; }
+#endif
+#ifdef CONFIG_TRACING
+extern void tracing_start(void);
+extern void tracing_stop(void);
+extern void ftrace_off_permanent(void);
+
+extern void
+ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
+
+static inline void __attribute__ ((format (printf, 1, 2)))
+____trace_printk_check_format(const char *fmt, ...)
+{
+}
+#define __trace_printk_check_format(fmt, args...) \
+do { \
+ if (0) \
+ ____trace_printk_check_format(fmt, ##args); \
+} while (0)
+
+/**
+ * trace_printk - printf formatting in the ftrace buffer
+ * @fmt: the printf format for printing
+ *
+ * Note: __trace_printk is an internal function for trace_printk and
+ * the @ip is passed in via the trace_printk macro.
+ *
+ * This function allows a kernel developer to debug fast path sections
+ * that printk is not appropriate for. By scattering in various
+ * printk like tracing in the code, a developer can quickly see
+ * where problems are occurring.
+ *
+ * This is intended as a debugging tool for the developer only.
+ * Please refrain from leaving trace_printks scattered around in
+ * your code.
+ */
+
+#define trace_printk(fmt, args...) \
+do { \
+ __trace_printk_check_format(fmt, ##args); \
+ if (__builtin_constant_p(fmt)) { \
+ static const char *trace_printk_fmt \
+ __attribute__((section("__trace_printk_fmt"))) = \
+ __builtin_constant_p(fmt) ? fmt : NULL; \
+ \
+ __trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \
+ } else \
+ __trace_printk(_THIS_IP_, fmt, ##args); \
+} while (0)
+
+extern int
+__trace_bprintk(unsigned long ip, const char *fmt, ...)
+ __attribute__ ((format (printf, 2, 3)));
+
+extern int
+__trace_printk(unsigned long ip, const char *fmt, ...)
+ __attribute__ ((format (printf, 2, 3)));
+
+/*
+ * The double __builtin_constant_p is because gcc will give us an error
+ * if we try to allocate the static variable to fmt if it is not a
+ * constant. Even with the outer if statement.
+ */
+#define ftrace_vprintk(fmt, vargs) \
+do { \
+ if (__builtin_constant_p(fmt)) { \
+ static const char *trace_printk_fmt \
+ __attribute__((section("__trace_printk_fmt"))) = \
+ __builtin_constant_p(fmt) ? fmt : NULL; \
+ \
+ __ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \
+ } else \
+ __ftrace_vprintk(_THIS_IP_, fmt, vargs); \
+} while (0)
+
+extern int
+__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
+
+extern int
+__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
+
+extern void ftrace_dump(void);
+#else
+static inline void
+ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
+static inline int
+trace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2)));
+
+static inline void tracing_start(void) { }
+static inline void tracing_stop(void) { }
+static inline void ftrace_off_permanent(void) { }
+static inline int
+trace_printk(const char *fmt, ...)
+{
+ return 0;
+}
+static inline int
+ftrace_vprintk(const char *fmt, va_list ap)
+{
+ return 0;
+}
+static inline void ftrace_dump(void) { }
+#endif /* CONFIG_TRACING */
+
+/*
* Display an IP address in readable format.
*/
@@ -378,18 +546,6 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
((unsigned char *)&addr)[3]
#define NIPQUAD_FMT "%u.%u.%u.%u"
-#if defined(__LITTLE_ENDIAN)
-#define HIPQUAD(addr) \
- ((unsigned char *)&addr)[3], \
- ((unsigned char *)&addr)[2], \
- ((unsigned char *)&addr)[1], \
- ((unsigned char *)&addr)[0]
-#elif defined(__BIG_ENDIAN)
-#define HIPQUAD NIPQUAD
-#else
-#error "Please fix asm/byteorder.h"
-#endif /* __LITTLE_ENDIAN */
-
/*
* min()/max()/clamp() macros that also do
* strict type-checking.. See the
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 570d2041311..348fa8874b5 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -5,6 +5,7 @@
#include <linux/threads.h>
#include <linux/percpu.h>
#include <linux/cpumask.h>
+#include <linux/interrupt.h>
#include <asm/irq.h>
#include <asm/cputime.h>
@@ -28,9 +29,10 @@ struct cpu_usage_stat {
struct kernel_stat {
struct cpu_usage_stat cpustat;
-#ifndef CONFIG_SPARSE_IRQ
+#ifndef CONFIG_GENERIC_HARDIRQS
unsigned int irqs[NR_IRQS];
#endif
+ unsigned int softirqs[NR_SOFTIRQS];
};
DECLARE_PER_CPU(struct kernel_stat, kstat);
@@ -41,7 +43,7 @@ DECLARE_PER_CPU(struct kernel_stat, kstat);
extern unsigned long long nr_context_switches(void);
-#ifndef CONFIG_SPARSE_IRQ
+#ifndef CONFIG_GENERIC_HARDIRQS
#define kstat_irqs_this_cpu(irq) \
(kstat_this_cpu.irqs[irq])
@@ -52,18 +54,31 @@ static inline void kstat_incr_irqs_this_cpu(unsigned int irq,
{
kstat_this_cpu.irqs[irq]++;
}
-#endif
-
-#ifndef CONFIG_SPARSE_IRQ
static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
{
return kstat_cpu(cpu).irqs[irq];
}
#else
+#include <linux/irq.h>
extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
+#define kstat_irqs_this_cpu(DESC) \
+ ((DESC)->kstat_irqs[smp_processor_id()])
+#define kstat_incr_irqs_this_cpu(irqno, DESC) \
+ ((DESC)->kstat_irqs[smp_processor_id()]++)
+
#endif
+static inline void kstat_incr_softirqs_this_cpu(unsigned int irq)
+{
+ kstat_this_cpu.softirqs[irq]++;
+}
+
+static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
+{
+ return kstat_cpu(cpu).softirqs[irq];
+}
+
/*
* Number of interrupts per specific IRQ source, since bootup
*/
@@ -78,7 +93,12 @@ static inline unsigned int kstat_irqs(unsigned int irq)
return sum;
}
+
+/*
+ * Lock/unlock the current runqueue - to extract task statistics:
+ */
extern unsigned long long task_delta_exec(struct task_struct *);
+
extern void account_user_time(struct task_struct *, cputime_t, cputime_t);
extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t);
extern void account_steal_time(cputime_t);
diff --git a/include/linux/key.h b/include/linux/key.h
index 21d32a142c0..e544f466d69 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -20,6 +20,7 @@
#include <linux/rbtree.h>
#include <linux/rcupdate.h>
#include <linux/sysctl.h>
+#include <linux/rwsem.h>
#include <asm/atomic.h>
#ifdef __KERNEL__
diff --git a/include/linux/keyboard.h b/include/linux/keyboard.h
index a3c984d780f..33a63f62d57 100644
--- a/include/linux/keyboard.h
+++ b/include/linux/keyboard.h
@@ -56,6 +56,7 @@ extern int unregister_keyboard_notifier(struct notifier_block *nb);
#define KT_ASCII 9
#define KT_LOCK 10
#define KT_SLOCK 12
+#define KT_DEAD2 13
#define KT_BRL 14
#define K(t,v) (((t)<<8)|(v))
diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h
new file mode 100644
index 00000000000..47b39b7c7e8
--- /dev/null
+++ b/include/linux/kmemcheck.h
@@ -0,0 +1,153 @@
+#ifndef LINUX_KMEMCHECK_H
+#define LINUX_KMEMCHECK_H
+
+#include <linux/mm_types.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_KMEMCHECK
+extern int kmemcheck_enabled;
+
+/* The slab-related functions. */
+void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node);
+void kmemcheck_free_shadow(struct page *page, int order);
+void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
+ size_t size);
+void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size);
+
+void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order,
+ gfp_t gfpflags);
+
+void kmemcheck_show_pages(struct page *p, unsigned int n);
+void kmemcheck_hide_pages(struct page *p, unsigned int n);
+
+bool kmemcheck_page_is_tracked(struct page *p);
+
+void kmemcheck_mark_unallocated(void *address, unsigned int n);
+void kmemcheck_mark_uninitialized(void *address, unsigned int n);
+void kmemcheck_mark_initialized(void *address, unsigned int n);
+void kmemcheck_mark_freed(void *address, unsigned int n);
+
+void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n);
+void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n);
+void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);
+
+int kmemcheck_show_addr(unsigned long address);
+int kmemcheck_hide_addr(unsigned long address);
+
+#else
+#define kmemcheck_enabled 0
+
+static inline void
+kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
+{
+}
+
+static inline void
+kmemcheck_free_shadow(struct page *page, int order)
+{
+}
+
+static inline void
+kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
+ size_t size)
+{
+}
+
+static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object,
+ size_t size)
+{
+}
+
+static inline void kmemcheck_pagealloc_alloc(struct page *p,
+ unsigned int order, gfp_t gfpflags)
+{
+}
+
+static inline bool kmemcheck_page_is_tracked(struct page *p)
+{
+ return false;
+}
+
+static inline void kmemcheck_mark_unallocated(void *address, unsigned int n)
+{
+}
+
+static inline void kmemcheck_mark_uninitialized(void *address, unsigned int n)
+{
+}
+
+static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
+{
+}
+
+static inline void kmemcheck_mark_freed(void *address, unsigned int n)
+{
+}
+
+static inline void kmemcheck_mark_unallocated_pages(struct page *p,
+ unsigned int n)
+{
+}
+
+static inline void kmemcheck_mark_uninitialized_pages(struct page *p,
+ unsigned int n)
+{
+}
+
+static inline void kmemcheck_mark_initialized_pages(struct page *p,
+ unsigned int n)
+{
+}
+
+#endif /* CONFIG_KMEMCHECK */
+
+/*
+ * Bitfield annotations
+ *
+ * How to use: If you have a struct using bitfields, for example
+ *
+ * struct a {
+ * int x:8, y:8;
+ * };
+ *
+ * then this should be rewritten as
+ *
+ * struct a {
+ * kmemcheck_bitfield_begin(flags);
+ * int x:8, y:8;
+ * kmemcheck_bitfield_end(flags);
+ * };
+ *
+ * Now the "flags_begin" and "flags_end" members may be used to refer to the
+ * beginning and end, respectively, of the bitfield (and things like
+ * &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
+ * fields should be annotated:
+ *
+ * struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
+ * kmemcheck_annotate_bitfield(a, flags);
+ *
+ * Note: We provide the same definitions for both kmemcheck and non-
+ * kmemcheck kernels. This makes it harder to introduce accidental errors. It
+ * is also allowed to pass NULL pointers to kmemcheck_annotate_bitfield().
+ */
+#define kmemcheck_bitfield_begin(name) \
+ int name##_begin[0];
+
+#define kmemcheck_bitfield_end(name) \
+ int name##_end[0];
+
+#define kmemcheck_annotate_bitfield(ptr, name) \
+ do if (ptr) { \
+ int _n = (long) &((ptr)->name##_end) \
+ - (long) &((ptr)->name##_begin); \
+ BUILD_BUG_ON(_n < 0); \
+ \
+ kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
+ } while (0)
+
+#define kmemcheck_annotate_variable(var) \
+ do { \
+ kmemcheck_mark_initialized(&(var), sizeof(var)); \
+ } while (0) \
+
+#endif /* LINUX_KMEMCHECK_H */
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
new file mode 100644
index 00000000000..7796aed6cdd
--- /dev/null
+++ b/include/linux/kmemleak.h
@@ -0,0 +1,96 @@
+/*
+ * include/linux/kmemleak.h
+ *
+ * Copyright (C) 2008 ARM Limited
+ * Written by Catalin Marinas <catalin.marinas@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __KMEMLEAK_H
+#define __KMEMLEAK_H
+
+#ifdef CONFIG_DEBUG_KMEMLEAK
+
+extern void kmemleak_init(void);
+extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
+ gfp_t gfp);
+extern void kmemleak_free(const void *ptr);
+extern void kmemleak_padding(const void *ptr, unsigned long offset,
+ size_t size);
+extern void kmemleak_not_leak(const void *ptr);
+extern void kmemleak_ignore(const void *ptr);
+extern void kmemleak_scan_area(const void *ptr, unsigned long offset,
+ size_t length, gfp_t gfp);
+extern void kmemleak_no_scan(const void *ptr);
+
+static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
+ int min_count, unsigned long flags,
+ gfp_t gfp)
+{
+ if (!(flags & SLAB_NOLEAKTRACE))
+ kmemleak_alloc(ptr, size, min_count, gfp);
+}
+
+static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags)
+{
+ if (!(flags & SLAB_NOLEAKTRACE))
+ kmemleak_free(ptr);
+}
+
+static inline void kmemleak_erase(void **ptr)
+{
+ *ptr = NULL;
+}
+
+#else
+
+static inline void kmemleak_init(void)
+{
+}
+static inline void kmemleak_alloc(const void *ptr, size_t size, int min_count,
+ gfp_t gfp)
+{
+}
+static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
+ int min_count, unsigned long flags,
+ gfp_t gfp)
+{
+}
+static inline void kmemleak_free(const void *ptr)
+{
+}
+static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags)
+{
+}
+static inline void kmemleak_not_leak(const void *ptr)
+{
+}
+static inline void kmemleak_ignore(const void *ptr)
+{
+}
+static inline void kmemleak_scan_area(const void *ptr, unsigned long offset,
+ size_t length, gfp_t gfp)
+{
+}
+static inline void kmemleak_erase(void **ptr)
+{
+}
+static inline void kmemleak_no_scan(const void *ptr)
+{
+}
+
+#endif /* CONFIG_DEBUG_KMEMLEAK */
+
+#endif /* __KMEMLEAK_H */
diff --git a/include/linux/kmemtrace.h b/include/linux/kmemtrace.h
new file mode 100644
index 00000000000..b616d3930c3
--- /dev/null
+++ b/include/linux/kmemtrace.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2008 Eduard - Gabriel Munteanu
+ *
+ * This file is released under GPL version 2.
+ */
+
+#ifndef _LINUX_KMEMTRACE_H
+#define _LINUX_KMEMTRACE_H
+
+#ifdef __KERNEL__
+
+#include <trace/events/kmem.h>
+
+#ifdef CONFIG_KMEMTRACE
+extern void kmemtrace_init(void);
+#else
+static inline void kmemtrace_init(void)
+{
+}
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_KMEMTRACE_H */
+
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index 92213a9194e..384ca8bbf1a 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -29,10 +29,15 @@
#ifdef CONFIG_MODULES
/* modprobe exit status on success, -ve on error. Return value
* usually useless though. */
-extern int request_module(const char * name, ...) __attribute__ ((format (printf, 1, 2)));
-#define try_then_request_module(x, mod...) ((x) ?: (request_module(mod), (x)))
+extern int __request_module(bool wait, const char *name, ...) \
+ __attribute__((format(printf, 2, 3)));
+#define request_module(mod...) __request_module(true, mod)
+#define request_module_nowait(mod...) __request_module(false, mod)
+#define try_then_request_module(x, mod...) \
+ ((x) ?: (__request_module(true, mod), (x)))
#else
-static inline int request_module(const char * name, ...) { return -ENOSYS; }
+static inline int request_module(const char *name, ...) { return -ENOSYS; }
+static inline int request_module_nowait(const char *name, ...) { return -ENOSYS; }
#define try_then_request_module(x, mod...) (x)
#endif
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index 5437ac0276e..58ae8e00fcd 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -68,10 +68,13 @@ struct kobject {
unsigned int state_in_sysfs:1;
unsigned int state_add_uevent_sent:1;
unsigned int state_remove_uevent_sent:1;
+ unsigned int uevent_suppress:1;
};
extern int kobject_set_name(struct kobject *kobj, const char *name, ...)
__attribute__((format(printf, 2, 3)));
+extern int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
+ va_list vargs);
static inline const char *kobject_name(const struct kobject *kobj)
{
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 32851eef48f..bcd9c07848b 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -94,12 +94,16 @@ struct kprobe {
/* Called after addr is executed, unless... */
kprobe_post_handler_t post_handler;
- /* ... called if executing addr causes a fault (eg. page fault).
- * Return 1 if it handled fault, otherwise kernel will see it. */
+ /*
+ * ... called if executing addr causes a fault (eg. page fault).
+ * Return 1 if it handled fault, otherwise kernel will see it.
+ */
kprobe_fault_handler_t fault_handler;
- /* ... called if breakpoint trap occurs in probe handler.
- * Return 1 if it handled break, otherwise kernel will see it. */
+ /*
+ * ... called if breakpoint trap occurs in probe handler.
+ * Return 1 if it handled break, otherwise kernel will see it.
+ */
kprobe_break_handler_t break_handler;
/* Saved opcode (which has been replaced with breakpoint) */
@@ -108,18 +112,28 @@ struct kprobe {
/* copy of the original instruction */
struct arch_specific_insn ainsn;
- /* Indicates various status flags. Protected by kprobe_mutex. */
+ /*
+ * Indicates various status flags.
+ * Protected by kprobe_mutex after this kprobe is registered.
+ */
u32 flags;
};
/* Kprobe status flags */
#define KPROBE_FLAG_GONE 1 /* breakpoint has already gone */
+#define KPROBE_FLAG_DISABLED 2 /* probe is temporarily disabled */
+/* Has this kprobe gone ? */
static inline int kprobe_gone(struct kprobe *p)
{
return p->flags & KPROBE_FLAG_GONE;
}
+/* Is this kprobe disabled ? */
+static inline int kprobe_disabled(struct kprobe *p)
+{
+ return p->flags & (KPROBE_FLAG_DISABLED | KPROBE_FLAG_GONE);
+}
/*
* Special probe type that uses setjmp-longjmp type tricks to resume
* execution at a specified entry with a matching prototype corresponding
@@ -182,6 +196,14 @@ struct kprobe_blackpoint {
DECLARE_PER_CPU(struct kprobe *, current_kprobe);
DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+/*
+ * For #ifdef avoidance:
+ */
+static inline int kprobes_built_in(void)
+{
+ return 1;
+}
+
#ifdef CONFIG_KRETPROBES
extern void arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs);
@@ -271,8 +293,19 @@ void unregister_kretprobes(struct kretprobe **rps, int num);
void kprobe_flush_task(struct task_struct *tk);
void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head);
-#else /* CONFIG_KPROBES */
+int disable_kprobe(struct kprobe *kp);
+int enable_kprobe(struct kprobe *kp);
+#else /* !CONFIG_KPROBES: */
+
+static inline int kprobes_built_in(void)
+{
+ return 0;
+}
+static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+{
+ return 0;
+}
static inline struct kprobe *get_kprobe(void *addr)
{
return NULL;
@@ -329,5 +362,30 @@ static inline void unregister_kretprobes(struct kretprobe **rps, int num)
static inline void kprobe_flush_task(struct task_struct *tk)
{
}
-#endif /* CONFIG_KPROBES */
-#endif /* _LINUX_KPROBES_H */
+static inline int disable_kprobe(struct kprobe *kp)
+{
+ return -ENOSYS;
+}
+static inline int enable_kprobe(struct kprobe *kp)
+{
+ return -ENOSYS;
+}
+#endif /* CONFIG_KPROBES */
+static inline int disable_kretprobe(struct kretprobe *rp)
+{
+ return disable_kprobe(&rp->kp);
+}
+static inline int enable_kretprobe(struct kretprobe *rp)
+{
+ return enable_kprobe(&rp->kp);
+}
+static inline int disable_jprobe(struct jprobe *jp)
+{
+ return disable_kprobe(&jp->kp);
+}
+static inline int enable_jprobe(struct jprobe *jp)
+{
+ return enable_kprobe(&jp->kp);
+}
+
+#endif /* _LINUX_KPROBES_H */
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 0424326f167..3db5d8d3748 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -48,7 +48,10 @@ struct kvm_irq_level {
* For IA-64 (APIC model) IOAPIC0: irq 0-23; IOAPIC1: irq 24-47..
* For X86 (standard AT mode) PIC0/1: irq 0-15. IOAPIC0: 0-23..
*/
- __u32 irq;
+ union {
+ __u32 irq;
+ __s32 status;
+ };
__u32 level;
};
@@ -116,7 +119,7 @@ struct kvm_run {
__u32 error_code;
} ex;
/* KVM_EXIT_IO */
- struct kvm_io {
+ struct {
#define KVM_EXIT_IO_IN 0
#define KVM_EXIT_IO_OUT 1
__u8 direction;
@@ -126,6 +129,7 @@ struct kvm_run {
__u64 data_offset; /* relative to kvm_run start */
} io;
struct {
+ struct kvm_debug_exit_arch arch;
} debug;
/* KVM_EXIT_MMIO */
struct {
@@ -217,28 +221,13 @@ struct kvm_interrupt {
__u32 irq;
};
-struct kvm_breakpoint {
- __u32 enabled;
- __u32 padding;
- __u64 address;
-};
-
-/* for KVM_DEBUG_GUEST */
-struct kvm_debug_guest {
- /* int */
- __u32 enabled;
- __u32 pad;
- struct kvm_breakpoint breakpoints[4];
- __u32 singlestep;
-};
-
/* for KVM_GET_DIRTY_LOG */
struct kvm_dirty_log {
__u32 slot;
- __u32 padding;
+ __u32 padding1;
union {
void __user *dirty_bitmap; /* one bit per page */
- __u64 padding;
+ __u64 padding2;
};
};
@@ -292,6 +281,17 @@ struct kvm_s390_interrupt {
__u64 parm64;
};
+/* for KVM_SET_GUEST_DEBUG */
+
+#define KVM_GUESTDBG_ENABLE 0x00000001
+#define KVM_GUESTDBG_SINGLESTEP 0x00000002
+
+struct kvm_guest_debug {
+ __u32 control;
+ __u32 pad;
+ struct kvm_guest_debug_arch arch;
+};
+
#define KVM_TRC_SHIFT 16
/*
* kvm trace categories
@@ -396,6 +396,63 @@ struct kvm_trace_rec {
#ifdef __KVM_HAVE_USER_NMI
#define KVM_CAP_USER_NMI 22
#endif
+#ifdef __KVM_HAVE_GUEST_DEBUG
+#define KVM_CAP_SET_GUEST_DEBUG 23
+#endif
+#ifdef __KVM_HAVE_PIT
+#define KVM_CAP_REINJECT_CONTROL 24
+#endif
+#ifdef __KVM_HAVE_IOAPIC
+#define KVM_CAP_IRQ_ROUTING 25
+#endif
+#define KVM_CAP_IRQ_INJECT_STATUS 26
+#ifdef __KVM_HAVE_DEVICE_ASSIGNMENT
+#define KVM_CAP_DEVICE_DEASSIGNMENT 27
+#endif
+#ifdef __KVM_HAVE_MSIX
+#define KVM_CAP_DEVICE_MSIX 28
+#endif
+#define KVM_CAP_ASSIGN_DEV_IRQ 29
+/* Another bug in KVM_SET_USER_MEMORY_REGION fixed: */
+#define KVM_CAP_JOIN_MEMORY_REGIONS_WORKS 30
+
+#ifdef KVM_CAP_IRQ_ROUTING
+
+struct kvm_irq_routing_irqchip {
+ __u32 irqchip;
+ __u32 pin;
+};
+
+struct kvm_irq_routing_msi {
+ __u32 address_lo;
+ __u32 address_hi;
+ __u32 data;
+ __u32 pad;
+};
+
+/* gsi routing entry types */
+#define KVM_IRQ_ROUTING_IRQCHIP 1
+#define KVM_IRQ_ROUTING_MSI 2
+
+struct kvm_irq_routing_entry {
+ __u32 gsi;
+ __u32 type;
+ __u32 flags;
+ __u32 pad;
+ union {
+ struct kvm_irq_routing_irqchip irqchip;
+ struct kvm_irq_routing_msi msi;
+ __u32 pad[8];
+ } u;
+};
+
+struct kvm_irq_routing {
+ __u32 nr;
+ __u32 flags;
+ struct kvm_irq_routing_entry entries[0];
+};
+
+#endif
/*
* ioctls for VM fds
@@ -421,14 +478,26 @@ struct kvm_trace_rec {
#define KVM_CREATE_PIT _IO(KVMIO, 0x64)
#define KVM_GET_PIT _IOWR(KVMIO, 0x65, struct kvm_pit_state)
#define KVM_SET_PIT _IOR(KVMIO, 0x66, struct kvm_pit_state)
+#define KVM_IRQ_LINE_STATUS _IOWR(KVMIO, 0x67, struct kvm_irq_level)
#define KVM_REGISTER_COALESCED_MMIO \
_IOW(KVMIO, 0x67, struct kvm_coalesced_mmio_zone)
#define KVM_UNREGISTER_COALESCED_MMIO \
_IOW(KVMIO, 0x68, struct kvm_coalesced_mmio_zone)
#define KVM_ASSIGN_PCI_DEVICE _IOR(KVMIO, 0x69, \
struct kvm_assigned_pci_dev)
+#define KVM_SET_GSI_ROUTING _IOW(KVMIO, 0x6a, struct kvm_irq_routing)
+/* deprecated, replaced by KVM_ASSIGN_DEV_IRQ */
#define KVM_ASSIGN_IRQ _IOR(KVMIO, 0x70, \
struct kvm_assigned_irq)
+#define KVM_ASSIGN_DEV_IRQ _IOW(KVMIO, 0x70, struct kvm_assigned_irq)
+#define KVM_REINJECT_CONTROL _IO(KVMIO, 0x71)
+#define KVM_DEASSIGN_PCI_DEVICE _IOW(KVMIO, 0x72, \
+ struct kvm_assigned_pci_dev)
+#define KVM_ASSIGN_SET_MSIX_NR \
+ _IOW(KVMIO, 0x73, struct kvm_assigned_msix_nr)
+#define KVM_ASSIGN_SET_MSIX_ENTRY \
+ _IOW(KVMIO, 0x74, struct kvm_assigned_msix_entry)
+#define KVM_DEASSIGN_DEV_IRQ _IOW(KVMIO, 0x75, struct kvm_assigned_irq)
/*
* ioctls for vcpu fds
@@ -440,7 +509,8 @@ struct kvm_trace_rec {
#define KVM_SET_SREGS _IOW(KVMIO, 0x84, struct kvm_sregs)
#define KVM_TRANSLATE _IOWR(KVMIO, 0x85, struct kvm_translation)
#define KVM_INTERRUPT _IOW(KVMIO, 0x86, struct kvm_interrupt)
-#define KVM_DEBUG_GUEST _IOW(KVMIO, 0x87, struct kvm_debug_guest)
+/* KVM_DEBUG_GUEST is no longer supported, use KVM_SET_GUEST_DEBUG instead */
+#define KVM_DEBUG_GUEST __KVM_DEPRECATED_DEBUG_GUEST
#define KVM_GET_MSRS _IOWR(KVMIO, 0x88, struct kvm_msrs)
#define KVM_SET_MSRS _IOW(KVMIO, 0x89, struct kvm_msrs)
#define KVM_SET_CPUID _IOW(KVMIO, 0x8a, struct kvm_cpuid)
@@ -469,6 +539,29 @@ struct kvm_trace_rec {
#define KVM_SET_MP_STATE _IOW(KVMIO, 0x99, struct kvm_mp_state)
/* Available with KVM_CAP_NMI */
#define KVM_NMI _IO(KVMIO, 0x9a)
+/* Available with KVM_CAP_SET_GUEST_DEBUG */
+#define KVM_SET_GUEST_DEBUG _IOW(KVMIO, 0x9b, struct kvm_guest_debug)
+
+/*
+ * Deprecated interfaces
+ */
+struct kvm_breakpoint {
+ __u32 enabled;
+ __u32 padding;
+ __u64 address;
+};
+
+struct kvm_debug_guest {
+ __u32 enabled;
+ __u32 pad;
+ struct kvm_breakpoint breakpoints[4];
+ __u32 singlestep;
+};
+
+#define __KVM_DEPRECATED_DEBUG_GUEST _IOW(KVMIO, 0x87, struct kvm_debug_guest)
+
+#define KVM_IA64_VCPU_GET_STACK _IOR(KVMIO, 0x9a, void *)
+#define KVM_IA64_VCPU_SET_STACK _IOW(KVMIO, 0x9b, void *)
#define KVM_TRC_INJ_VIRQ (KVM_TRC_HANDLER + 0x02)
#define KVM_TRC_REDELIVER_EVT (KVM_TRC_HANDLER + 0x03)
@@ -495,6 +588,8 @@ struct kvm_trace_rec {
#define KVM_TRC_STLB_INVAL (KVM_TRC_HANDLER + 0x18)
#define KVM_TRC_PPC_INSTR (KVM_TRC_HANDLER + 0x19)
+#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
+
struct kvm_assigned_pci_dev {
__u32 assigned_dev_id;
__u32 busnr;
@@ -505,6 +600,17 @@ struct kvm_assigned_pci_dev {
};
};
+#define KVM_DEV_IRQ_HOST_INTX (1 << 0)
+#define KVM_DEV_IRQ_HOST_MSI (1 << 1)
+#define KVM_DEV_IRQ_HOST_MSIX (1 << 2)
+
+#define KVM_DEV_IRQ_GUEST_INTX (1 << 8)
+#define KVM_DEV_IRQ_GUEST_MSI (1 << 9)
+#define KVM_DEV_IRQ_GUEST_MSIX (1 << 10)
+
+#define KVM_DEV_IRQ_HOST_MASK 0x00ff
+#define KVM_DEV_IRQ_GUEST_MASK 0xff00
+
struct kvm_assigned_irq {
__u32 assigned_dev_id;
__u32 host_irq;
@@ -520,8 +626,19 @@ struct kvm_assigned_irq {
};
};
-#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
-#define KVM_DEV_IRQ_ASSIGN_ENABLE_MSI (1 << 0)
+struct kvm_assigned_msix_nr {
+ __u32 assigned_dev_id;
+ __u16 entry_nr;
+ __u16 padding;
+};
+
+#define KVM_MAX_MSIX_PER_DEV 512
+struct kvm_assigned_msix_entry {
+ __u32 assigned_dev_id;
+ __u32 gsi;
+ __u16 entry; /* The index of entry in the MSI-X table */
+ __u16 padding[3];
+};
#endif
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index bf6f703642f..aacc5449f58 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -37,6 +37,8 @@
#define KVM_REQ_PENDING_TIMER 5
#define KVM_REQ_UNHALT 6
#define KVM_REQ_MMU_SYNC 7
+#define KVM_REQ_KVMCLOCK_UPDATE 8
+#define KVM_REQ_KICK 9
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
@@ -71,9 +73,8 @@ struct kvm_vcpu {
struct mutex mutex;
int cpu;
struct kvm_run *run;
- int guest_mode;
unsigned long requests;
- struct kvm_guest_debug guest_debug;
+ unsigned long guest_debug;
int fpu_active;
int guest_fpu_loaded;
wait_queue_head_t wq;
@@ -107,6 +108,20 @@ struct kvm_memory_slot {
int user_alloc;
};
+struct kvm_kernel_irq_routing_entry {
+ u32 gsi;
+ int (*set)(struct kvm_kernel_irq_routing_entry *e,
+ struct kvm *kvm, int level);
+ union {
+ struct {
+ unsigned irqchip;
+ unsigned pin;
+ } irqchip;
+ struct msi_msg msi;
+ };
+ struct list_head link;
+};
+
struct kvm {
struct mutex lock; /* protects the vcpus array and APIC accesses */
spinlock_t mmu_lock;
@@ -127,6 +142,11 @@ struct kvm {
struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
#endif
+#ifdef CONFIG_HAVE_KVM_IRQCHIP
+ struct list_head irq_routing; /* of kvm_kernel_irq_routing_entry */
+ struct hlist_head mask_notifier_list;
+#endif
+
#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
struct mmu_notifier mmu_notifier;
unsigned long mmu_notifier_seq;
@@ -237,7 +257,6 @@ int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
int user_alloc);
long kvm_arch_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
-void kvm_arch_destroy_vm(struct kvm *kvm);
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
@@ -255,8 +274,8 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state);
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state);
-int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
- struct kvm_debug_guest *dbg);
+int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
+ struct kvm_guest_debug *dbg);
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
int kvm_arch_init(void *opaque);
@@ -279,6 +298,7 @@ int kvm_arch_hardware_setup(void);
void kvm_arch_hardware_unsetup(void);
void kvm_arch_check_processor_compat(void *rtn);
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
+int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
void kvm_free_physmem(struct kvm *kvm);
@@ -300,6 +320,13 @@ struct kvm_irq_ack_notifier {
void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
};
+#define KVM_ASSIGNED_MSIX_PENDING 0x1
+struct kvm_guest_msix_entry {
+ u32 vector;
+ u16 entry;
+ u16 flags;
+};
+
struct kvm_assigned_dev_kernel {
struct kvm_irq_ack_notifier ack_notifier;
struct work_struct interrupt_work;
@@ -307,28 +334,43 @@ struct kvm_assigned_dev_kernel {
int assigned_dev_id;
int host_busnr;
int host_devfn;
+ unsigned int entries_nr;
int host_irq;
bool host_irq_disabled;
+ struct msix_entry *host_msix_entries;
int guest_irq;
- struct msi_msg guest_msi;
-#define KVM_ASSIGNED_DEV_GUEST_INTX (1 << 0)
-#define KVM_ASSIGNED_DEV_GUEST_MSI (1 << 1)
-#define KVM_ASSIGNED_DEV_HOST_INTX (1 << 8)
-#define KVM_ASSIGNED_DEV_HOST_MSI (1 << 9)
+ struct kvm_guest_msix_entry *guest_msix_entries;
unsigned long irq_requested_type;
int irq_source_id;
int flags;
struct pci_dev *dev;
struct kvm *kvm;
+ spinlock_t assigned_dev_lock;
};
-void kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level);
-void kvm_notify_acked_irq(struct kvm *kvm, unsigned gsi);
+
+struct kvm_irq_mask_notifier {
+ void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
+ int irq;
+ struct hlist_node link;
+};
+
+void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
+ struct kvm_irq_mask_notifier *kimn);
+void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
+ struct kvm_irq_mask_notifier *kimn);
+void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask);
+
+int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level);
+void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
void kvm_register_irq_ack_notifier(struct kvm *kvm,
struct kvm_irq_ack_notifier *kian);
void kvm_unregister_irq_ack_notifier(struct kvm_irq_ack_notifier *kian);
int kvm_request_irq_source_id(struct kvm *kvm);
void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
+/* For vcpu->arch.iommu_flags */
+#define KVM_IOMMU_CACHE_COHERENCY 0x1
+
#ifdef CONFIG_IOMMU_API
int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn,
unsigned long npages);
@@ -464,4 +506,21 @@ static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_se
}
#endif
+#ifdef CONFIG_HAVE_KVM_IRQCHIP
+
+#define KVM_MAX_IRQ_ROUTES 1024
+
+int kvm_setup_default_irq_routing(struct kvm *kvm);
+int kvm_set_irq_routing(struct kvm *kvm,
+ const struct kvm_irq_routing_entry *entries,
+ unsigned nr,
+ unsigned flags);
+void kvm_free_irq_routing(struct kvm *kvm);
+
+#else
+
+static inline void kvm_free_irq_routing(struct kvm *kvm) {}
+
+#endif
+
#endif
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index 9b6f395c962..fb46efbeabe 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -40,17 +40,31 @@ typedef unsigned long hfn_t;
typedef hfn_t pfn_t;
-struct kvm_pio_request {
- unsigned long count;
- int cur_count;
- struct page *guest_pages[2];
- unsigned guest_page_offset;
- int in;
- int port;
- int size;
- int string;
- int down;
- int rep;
+union kvm_ioapic_redirect_entry {
+ u64 bits;
+ struct {
+ u8 vector;
+ u8 delivery_mode:3;
+ u8 dest_mode:1;
+ u8 delivery_status:1;
+ u8 polarity:1;
+ u8 remote_irr:1;
+ u8 trig_mode:1;
+ u8 mask:1;
+ u8 reserve:7;
+ u8 reserved[4];
+ u8 dest_id;
+ } fields;
+};
+
+struct kvm_lapic_irq {
+ u32 vector;
+ u32 delivery_mode;
+ u32 dest_mode;
+ u32 level;
+ u32 trig_mode;
+ u32 shorthand;
+ u32 dest_id;
};
#endif /* __KVM_TYPES_H__ */
diff --git a/include/linux/latencytop.h b/include/linux/latencytop.h
index 901c2d6377a..b0e99898527 100644
--- a/include/linux/latencytop.h
+++ b/include/linux/latencytop.h
@@ -9,6 +9,7 @@
#ifndef _INCLUDE_GUARD_LATENCYTOP_H_
#define _INCLUDE_GUARD_LATENCYTOP_H_
+#include <linux/compiler.h>
#ifdef CONFIG_LATENCYTOP
#define LT_SAVECOUNT 32
@@ -24,7 +25,14 @@ struct latency_record {
struct task_struct;
-void account_scheduler_latency(struct task_struct *task, int usecs, int inter);
+extern int latencytop_enabled;
+void __account_scheduler_latency(struct task_struct *task, int usecs, int inter);
+static inline void
+account_scheduler_latency(struct task_struct *task, int usecs, int inter)
+{
+ if (unlikely(latencytop_enabled))
+ __account_scheduler_latency(task, usecs, inter);
+}
void clear_all_latency_tracing(struct task_struct *p);
diff --git a/include/linux/leds-bd2802.h b/include/linux/leds-bd2802.h
new file mode 100644
index 00000000000..42f854a1a19
--- /dev/null
+++ b/include/linux/leds-bd2802.h
@@ -0,0 +1,26 @@
+/*
+ * leds-bd2802.h - RGB LED Driver
+ *
+ * Copyright (C) 2009 Samsung Electronics
+ * Kim Kyuwon <q1.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Datasheet: http://www.rohm.com/products/databook/driver/pdf/bd2802gu-e.pdf
+ *
+ */
+#ifndef _LEDS_BD2802_H_
+#define _LEDS_BD2802_H_
+
+struct bd2802_led_platform_data{
+ int reset_gpio;
+ u8 rgb_time;
+};
+
+#define RGB_TIME(slopedown, slopeup, waveform) \
+ ((slopedown) << 6 | (slopeup) << 4 | (waveform))
+
+#endif /* _LEDS_BD2802_H_ */
+
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 24489da701e..376fe07732e 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -30,6 +30,7 @@ enum led_brightness {
struct led_classdev {
const char *name;
int brightness;
+ int max_brightness;
int flags;
/* Lower 16 bits reflect status */
@@ -140,7 +141,8 @@ struct gpio_led {
const char *name;
const char *default_trigger;
unsigned gpio;
- u8 active_low;
+ u8 active_low : 1;
+ u8 retain_state_suspended : 1;
};
struct gpio_led_platform_data {
diff --git a/include/linux/leds_pwm.h b/include/linux/leds_pwm.h
new file mode 100644
index 00000000000..33a07116748
--- /dev/null
+++ b/include/linux/leds_pwm.h
@@ -0,0 +1,21 @@
+/*
+ * PWM LED driver data - see drivers/leds/leds-pwm.c
+ */
+#ifndef __LINUX_LEDS_PWM_H
+#define __LINUX_LEDS_PWM_H
+
+struct led_pwm {
+ const char *name;
+ const char *default_trigger;
+ unsigned pwm_id;
+ u8 active_low;
+ unsigned max_brightness;
+ unsigned pwm_period_ns;
+};
+
+struct led_pwm_platform_data {
+ int num_leds;
+ struct led_pwm *leds;
+};
+
+#endif
diff --git a/include/linux/lguest.h b/include/linux/lguest.h
index 175e63f4a8c..7bc1440fc47 100644
--- a/include/linux/lguest.h
+++ b/include/linux/lguest.h
@@ -30,6 +30,10 @@ struct lguest_data
/* Wallclock time set by the Host. */
struct timespec time;
+ /* Interrupt pending set by the Host. The Guest should do a hypercall
+ * if it re-enables interrupts and sees this set (to X86_EFLAGS_IF). */
+ int irq_pending;
+
/* Async hypercall ring. Instead of directly making hypercalls, we can
* place them in here for processing the next time the Host wants.
* This batching can be quite efficient. */
diff --git a/include/linux/lguest_launcher.h b/include/linux/lguest_launcher.h
index a53407a4165..bfefbdf7498 100644
--- a/include/linux/lguest_launcher.h
+++ b/include/linux/lguest_launcher.h
@@ -57,7 +57,8 @@ enum lguest_req
LHREQ_INITIALIZE, /* + base, pfnlimit, start */
LHREQ_GETDMA, /* No longer used */
LHREQ_IRQ, /* + irq */
- LHREQ_BREAK, /* + on/off flag (on blocks until someone does off) */
+ LHREQ_BREAK, /* No longer used */
+ LHREQ_EVENTFD, /* + address, fd. */
};
/* The alignment to use between consumer and producer parts of vring.
diff --git a/include/linux/libata.h b/include/linux/libata.h
index dc18b87ed72..3d501db36a2 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -209,6 +209,7 @@ enum {
/* bits 24:31 of ap->flags are reserved for LLD specific flags */
+
/* struct ata_port pflags */
ATA_PFLAG_EH_PENDING = (1 << 0), /* EH pending */
ATA_PFLAG_EH_IN_PROGRESS = (1 << 1), /* EH in progress */
@@ -225,6 +226,9 @@ enum {
ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */
ATA_PFLAG_INIT_GTM_VALID = (1 << 19), /* initial gtm data valid */
+ ATA_PFLAG_PIO32 = (1 << 20), /* 32bit PIO */
+ ATA_PFLAG_PIO32CHANGE = (1 << 21), /* 32bit PIO can be turned on/off */
+
/* struct ata_queued_cmd flags */
ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */
ATA_QCFLAG_DMAMAP = (1 << 1), /* SG table is DMA mapped */
@@ -379,7 +383,7 @@ enum {
ATA_HORKAGE_BRIDGE_OK = (1 << 10), /* no bridge limits */
ATA_HORKAGE_ATAPI_MOD16_DMA = (1 << 11), /* use ATAPI DMA for commands
not multiple of 16 bytes */
- ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firwmare update warning */
+ ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firmware update warning */
ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */
/* DMA mask for user DMA control: User visible values; DO NOT
@@ -689,7 +693,10 @@ struct ata_port {
struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
struct ata_port_operations *ops;
spinlock_t *lock;
+ /* Flags owned by the EH context. Only EH should touch these once the
+ port is active */
unsigned long flags; /* ATA_FLAG_xxx */
+ /* Flags that change dynamically, protected by ap->lock */
unsigned int pflags; /* ATA_PFLAG_xxx */
unsigned int print_id; /* user visible unique port ID */
unsigned int port_no; /* 0 based port no. inside the host */
@@ -795,6 +802,7 @@ struct ata_port_operations {
ata_reset_fn_t pmp_hardreset;
ata_postreset_fn_t pmp_postreset;
void (*error_handler)(struct ata_port *ap);
+ void (*lost_interrupt)(struct ata_port *ap);
void (*post_internal_cmd)(struct ata_queued_cmd *qc);
/*
@@ -836,6 +844,8 @@ struct ata_port_operations {
void (*bmdma_start)(struct ata_queued_cmd *qc);
void (*bmdma_stop)(struct ata_queued_cmd *qc);
u8 (*bmdma_status)(struct ata_port *ap);
+
+ void (*drain_fifo)(struct ata_queued_cmd *qc);
#endif /* CONFIG_ATA_SFF */
ssize_t (*em_show)(struct ata_port *ap, char *buf);
@@ -1008,6 +1018,9 @@ extern int ata_cable_sata(struct ata_port *ap);
extern int ata_cable_ignore(struct ata_port *ap);
extern int ata_cable_unknown(struct ata_port *ap);
+extern void ata_pio_queue_task(struct ata_port *ap, void *data,
+ unsigned long delay);
+
/* Timing helpers */
extern unsigned int ata_pio_need_iordy(const struct ata_device *);
extern const struct ata_timing *ata_timing_find_mode(u8 xfer_mode);
@@ -1572,6 +1585,7 @@ extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc);
extern unsigned int ata_sff_host_intr(struct ata_port *ap,
struct ata_queued_cmd *qc);
extern irqreturn_t ata_sff_interrupt(int irq, void *dev_instance);
+extern void ata_sff_lost_interrupt(struct ata_port *ap);
extern void ata_sff_freeze(struct ata_port *ap);
extern void ata_sff_thaw(struct ata_port *ap);
extern int ata_sff_prereset(struct ata_link *link, unsigned long deadline);
@@ -1584,9 +1598,11 @@ extern int ata_sff_softreset(struct ata_link *link, unsigned int *classes,
extern int sata_sff_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
extern void ata_sff_postreset(struct ata_link *link, unsigned int *classes);
+extern void ata_sff_drain_fifo(struct ata_queued_cmd *qc);
extern void ata_sff_error_handler(struct ata_port *ap);
extern void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc);
extern int ata_sff_port_start(struct ata_port *ap);
+extern int ata_sff_port_start32(struct ata_port *ap);
extern void ata_sff_std_ports(struct ata_ioports *ioaddr);
extern unsigned long ata_bmdma_mode_filter(struct ata_device *dev,
unsigned long xfer_mask);
diff --git a/include/linux/linux_logo.h b/include/linux/linux_logo.h
index 08a92969c76..ca5bd91d12e 100644
--- a/include/linux/linux_logo.h
+++ b/include/linux/linux_logo.h
@@ -32,6 +32,22 @@ struct linux_logo {
const unsigned char *data;
};
+extern const struct linux_logo logo_linux_mono;
+extern const struct linux_logo logo_linux_vga16;
+extern const struct linux_logo logo_linux_clut224;
+extern const struct linux_logo logo_blackfin_vga16;
+extern const struct linux_logo logo_blackfin_clut224;
+extern const struct linux_logo logo_dec_clut224;
+extern const struct linux_logo logo_mac_clut224;
+extern const struct linux_logo logo_parisc_clut224;
+extern const struct linux_logo logo_sgi_clut224;
+extern const struct linux_logo logo_sun_clut224;
+extern const struct linux_logo logo_superh_mono;
+extern const struct linux_logo logo_superh_vga16;
+extern const struct linux_logo logo_superh_clut224;
+extern const struct linux_logo logo_m32r_clut224;
+extern const struct linux_logo logo_spe_clut224;
+
extern const struct linux_logo *fb_find_logo(int depth);
#ifdef CONFIG_FB_LOGO_EXTRA
extern void fb_append_extra_logo(const struct linux_logo *logo,
diff --git a/include/linux/lis3lv02d.h b/include/linux/lis3lv02d.h
new file mode 100644
index 00000000000..ad651f4e45a
--- /dev/null
+++ b/include/linux/lis3lv02d.h
@@ -0,0 +1,39 @@
+#ifndef __LIS3LV02D_H_
+#define __LIS3LV02D_H_
+
+struct lis3lv02d_platform_data {
+ /* please note: the 'click' feature is only supported for
+ * LIS[32]02DL variants of the chip and will be ignored for
+ * others */
+#define LIS3_CLICK_SINGLE_X (1 << 0)
+#define LIS3_CLICK_DOUBLE_X (1 << 1)
+#define LIS3_CLICK_SINGLE_Y (1 << 2)
+#define LIS3_CLICK_DOUBLE_Y (1 << 3)
+#define LIS3_CLICK_SINGLE_Z (1 << 4)
+#define LIS3_CLICK_DOUBLE_Z (1 << 5)
+ unsigned char click_flags;
+ unsigned char click_thresh_x;
+ unsigned char click_thresh_y;
+ unsigned char click_thresh_z;
+ unsigned char click_time_limit;
+ unsigned char click_latency;
+ unsigned char click_window;
+
+#define LIS3_IRQ1_DISABLE (0 << 0)
+#define LIS3_IRQ1_FF_WU_1 (1 << 0)
+#define LIS3_IRQ1_FF_WU_2 (2 << 0)
+#define LIS3_IRQ1_FF_WU_12 (3 << 0)
+#define LIS3_IRQ1_DATA_READY (4 << 0)
+#define LIS3_IRQ1_CLICK (7 << 0)
+#define LIS3_IRQ2_DISABLE (0 << 3)
+#define LIS3_IRQ2_FF_WU_1 (1 << 3)
+#define LIS3_IRQ2_FF_WU_2 (2 << 3)
+#define LIS3_IRQ2_FF_WU_12 (3 << 3)
+#define LIS3_IRQ2_DATA_READY (4 << 3)
+#define LIS3_IRQ2_CLICK (7 << 3)
+#define LIS3_IRQ_OPEN_DRAIN (1 << 6)
+#define LIS3_IRQ_ACTIVE_HIGH (1 << 7)
+ unsigned char irq_cfg;
+};
+
+#endif /* __LIS3LV02D_H_ */
diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h
index 93150ecf3ea..5d10ae364b5 100644
--- a/include/linux/list_nulls.h
+++ b/include/linux/list_nulls.h
@@ -56,6 +56,18 @@ static inline int hlist_nulls_empty(const struct hlist_nulls_head *h)
return is_a_nulls(h->first);
}
+static inline void hlist_nulls_add_head(struct hlist_nulls_node *n,
+ struct hlist_nulls_head *h)
+{
+ struct hlist_nulls_node *first = h->first;
+
+ n->next = first;
+ n->pprev = &h->first;
+ h->first = n;
+ if (!is_a_nulls(first))
+ first->pprev = &n->next;
+}
+
static inline void __hlist_nulls_del(struct hlist_nulls_node *n)
{
struct hlist_nulls_node *next = n->next;
@@ -65,6 +77,12 @@ static inline void __hlist_nulls_del(struct hlist_nulls_node *n)
next->pprev = pprev;
}
+static inline void hlist_nulls_del(struct hlist_nulls_node *n)
+{
+ __hlist_nulls_del(n);
+ n->pprev = LIST_POISON2;
+}
+
/**
* hlist_nulls_for_each_entry - iterate over list of given type
* @tpos: the type * to use as a loop cursor.
diff --git a/include/linux/lm_interface.h b/include/linux/lm_interface.h
deleted file mode 100644
index 2ed8fa1b762..00000000000
--- a/include/linux/lm_interface.h
+++ /dev/null
@@ -1,277 +0,0 @@
-/*
- * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
- * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
- *
- * This copyrighted material is made available to anyone wishing to use,
- * modify, copy, or redistribute it subject to the terms and conditions
- * of the GNU General Public License version 2.
- */
-
-#ifndef __LM_INTERFACE_DOT_H__
-#define __LM_INTERFACE_DOT_H__
-
-
-typedef void (*lm_callback_t) (void *ptr, unsigned int type, void *data);
-
-/*
- * lm_mount() flags
- *
- * LM_MFLAG_SPECTATOR
- * GFS is asking to join the filesystem's lockspace, but it doesn't want to
- * modify the filesystem. The lock module shouldn't assign a journal to the FS
- * mount. It shouldn't send recovery callbacks to the FS mount. If the node
- * dies or withdraws, all locks can be wiped immediately.
- *
- * LM_MFLAG_CONV_NODROP
- * Do not allow the dlm to internally resolve conversion deadlocks by demoting
- * the lock to unlocked and then reacquiring it in the requested mode. Instead,
- * it should cancel the request and return LM_OUT_CONV_DEADLK.
- */
-
-#define LM_MFLAG_SPECTATOR 0x00000001
-#define LM_MFLAG_CONV_NODROP 0x00000002
-
-/*
- * lm_lockstruct flags
- *
- * LM_LSFLAG_LOCAL
- * The lock_nolock module returns LM_LSFLAG_LOCAL to GFS, indicating that GFS
- * can make single-node optimizations.
- */
-
-#define LM_LSFLAG_LOCAL 0x00000001
-
-/*
- * lm_lockname types
- */
-
-#define LM_TYPE_RESERVED 0x00
-#define LM_TYPE_NONDISK 0x01
-#define LM_TYPE_INODE 0x02
-#define LM_TYPE_RGRP 0x03
-#define LM_TYPE_META 0x04
-#define LM_TYPE_IOPEN 0x05
-#define LM_TYPE_FLOCK 0x06
-#define LM_TYPE_PLOCK 0x07
-#define LM_TYPE_QUOTA 0x08
-#define LM_TYPE_JOURNAL 0x09
-
-/*
- * lm_lock() states
- *
- * SHARED is compatible with SHARED, not with DEFERRED or EX.
- * DEFERRED is compatible with DEFERRED, not with SHARED or EX.
- */
-
-#define LM_ST_UNLOCKED 0
-#define LM_ST_EXCLUSIVE 1
-#define LM_ST_DEFERRED 2
-#define LM_ST_SHARED 3
-
-/*
- * lm_lock() flags
- *
- * LM_FLAG_TRY
- * Don't wait to acquire the lock if it can't be granted immediately.
- *
- * LM_FLAG_TRY_1CB
- * Send one blocking callback if TRY is set and the lock is not granted.
- *
- * LM_FLAG_NOEXP
- * GFS sets this flag on lock requests it makes while doing journal recovery.
- * These special requests should not be blocked due to the recovery like
- * ordinary locks would be.
- *
- * LM_FLAG_ANY
- * A SHARED request may also be granted in DEFERRED, or a DEFERRED request may
- * also be granted in SHARED. The preferred state is whichever is compatible
- * with other granted locks, or the specified state if no other locks exist.
- *
- * LM_FLAG_PRIORITY
- * Override fairness considerations. Suppose a lock is held in a shared state
- * and there is a pending request for the deferred state. A shared lock
- * request with the priority flag would be allowed to bypass the deferred
- * request and directly join the other shared lock. A shared lock request
- * without the priority flag might be forced to wait until the deferred
- * requested had acquired and released the lock.
- */
-
-#define LM_FLAG_TRY 0x00000001
-#define LM_FLAG_TRY_1CB 0x00000002
-#define LM_FLAG_NOEXP 0x00000004
-#define LM_FLAG_ANY 0x00000008
-#define LM_FLAG_PRIORITY 0x00000010
-
-/*
- * lm_lock() and lm_async_cb return flags
- *
- * LM_OUT_ST_MASK
- * Masks the lower two bits of lock state in the returned value.
- *
- * LM_OUT_CACHEABLE
- * The lock hasn't been released so GFS can continue to cache data for it.
- *
- * LM_OUT_CANCELED
- * The lock request was canceled.
- *
- * LM_OUT_ASYNC
- * The result of the request will be returned in an LM_CB_ASYNC callback.
- *
- * LM_OUT_CONV_DEADLK
- * The lock request was canceled do to a conversion deadlock.
- */
-
-#define LM_OUT_ST_MASK 0x00000003
-#define LM_OUT_CANCELED 0x00000008
-#define LM_OUT_ASYNC 0x00000080
-#define LM_OUT_ERROR 0x00000100
-
-/*
- * lm_callback_t types
- *
- * LM_CB_NEED_E LM_CB_NEED_D LM_CB_NEED_S
- * Blocking callback, a remote node is requesting the given lock in
- * EXCLUSIVE, DEFERRED, or SHARED.
- *
- * LM_CB_NEED_RECOVERY
- * The given journal needs to be recovered.
- *
- * LM_CB_ASYNC
- * The given lock has been granted.
- */
-
-#define LM_CB_NEED_E 257
-#define LM_CB_NEED_D 258
-#define LM_CB_NEED_S 259
-#define LM_CB_NEED_RECOVERY 260
-#define LM_CB_ASYNC 262
-
-/*
- * lm_recovery_done() messages
- */
-
-#define LM_RD_GAVEUP 308
-#define LM_RD_SUCCESS 309
-
-
-struct lm_lockname {
- u64 ln_number;
- unsigned int ln_type;
-};
-
-#define lm_name_equal(name1, name2) \
- (((name1)->ln_number == (name2)->ln_number) && \
- ((name1)->ln_type == (name2)->ln_type)) \
-
-struct lm_async_cb {
- struct lm_lockname lc_name;
- int lc_ret;
-};
-
-struct lm_lockstruct;
-
-struct lm_lockops {
- const char *lm_proto_name;
-
- /*
- * Mount/Unmount
- */
-
- int (*lm_mount) (char *table_name, char *host_data,
- lm_callback_t cb, void *cb_data,
- unsigned int min_lvb_size, int flags,
- struct lm_lockstruct *lockstruct,
- struct kobject *fskobj);
-
- void (*lm_others_may_mount) (void *lockspace);
-
- void (*lm_unmount) (void *lockspace);
-
- void (*lm_withdraw) (void *lockspace);
-
- /*
- * Lock oriented operations
- */
-
- int (*lm_get_lock) (void *lockspace, struct lm_lockname *name, void **lockp);
-
- void (*lm_put_lock) (void *lock);
-
- unsigned int (*lm_lock) (void *lock, unsigned int cur_state,
- unsigned int req_state, unsigned int flags);
-
- unsigned int (*lm_unlock) (void *lock, unsigned int cur_state);
-
- void (*lm_cancel) (void *lock);
-
- int (*lm_hold_lvb) (void *lock, char **lvbp);
- void (*lm_unhold_lvb) (void *lock, char *lvb);
-
- /*
- * Posix Lock oriented operations
- */
-
- int (*lm_plock_get) (void *lockspace, struct lm_lockname *name,
- struct file *file, struct file_lock *fl);
-
- int (*lm_plock) (void *lockspace, struct lm_lockname *name,
- struct file *file, int cmd, struct file_lock *fl);
-
- int (*lm_punlock) (void *lockspace, struct lm_lockname *name,
- struct file *file, struct file_lock *fl);
-
- /*
- * Client oriented operations
- */
-
- void (*lm_recovery_done) (void *lockspace, unsigned int jid,
- unsigned int message);
-
- struct module *lm_owner;
-};
-
-/*
- * lm_mount() return values
- *
- * ls_jid - the journal ID this node should use
- * ls_first - this node is the first to mount the file system
- * ls_lvb_size - size in bytes of lock value blocks
- * ls_lockspace - lock module's context for this file system
- * ls_ops - lock module's functions
- * ls_flags - lock module features
- */
-
-struct lm_lockstruct {
- unsigned int ls_jid;
- unsigned int ls_first;
- unsigned int ls_lvb_size;
- void *ls_lockspace;
- const struct lm_lockops *ls_ops;
- int ls_flags;
-};
-
-/*
- * Lock module bottom interface. A lock module makes itself available to GFS
- * with these functions.
- */
-
-int gfs2_register_lockproto(const struct lm_lockops *proto);
-void gfs2_unregister_lockproto(const struct lm_lockops *proto);
-
-/*
- * Lock module top interface. GFS calls these functions when mounting or
- * unmounting a file system.
- */
-
-int gfs2_mount_lockproto(char *proto_name, char *table_name, char *host_data,
- lm_callback_t cb, void *cb_data,
- unsigned int min_lvb_size, int flags,
- struct lm_lockstruct *lockstruct,
- struct kobject *fskobj);
-
-void gfs2_unmount_lockproto(struct lm_lockstruct *lockstruct);
-
-void gfs2_withdraw_lockproto(struct lm_lockstruct *lockstruct);
-
-#endif /* __LM_INTERFACE_DOT_H__ */
-
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 51855dfd8ad..c325b187966 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -195,7 +195,7 @@ extern struct svc_procedure nlmsvc_procedures4[];
extern int nlmsvc_grace_period;
extern unsigned long nlmsvc_timeout;
extern int nsm_use_hostnames;
-extern int nsm_local_state;
+extern u32 nsm_local_state;
/*
* Lockd client functions
diff --git a/include/linux/lockd/xdr.h b/include/linux/lockd/xdr.h
index 7dc5b6cb44c..d39ed1cc5fb 100644
--- a/include/linux/lockd/xdr.h
+++ b/include/linux/lockd/xdr.h
@@ -25,13 +25,13 @@ struct svc_rqst;
#define NLM_MAXCOOKIELEN 32
#define NLM_MAXSTRLEN 1024
-#define nlm_granted __constant_htonl(NLM_LCK_GRANTED)
-#define nlm_lck_denied __constant_htonl(NLM_LCK_DENIED)
-#define nlm_lck_denied_nolocks __constant_htonl(NLM_LCK_DENIED_NOLOCKS)
-#define nlm_lck_blocked __constant_htonl(NLM_LCK_BLOCKED)
-#define nlm_lck_denied_grace_period __constant_htonl(NLM_LCK_DENIED_GRACE_PERIOD)
+#define nlm_granted cpu_to_be32(NLM_LCK_GRANTED)
+#define nlm_lck_denied cpu_to_be32(NLM_LCK_DENIED)
+#define nlm_lck_denied_nolocks cpu_to_be32(NLM_LCK_DENIED_NOLOCKS)
+#define nlm_lck_blocked cpu_to_be32(NLM_LCK_BLOCKED)
+#define nlm_lck_denied_grace_period cpu_to_be32(NLM_LCK_DENIED_GRACE_PERIOD)
-#define nlm_drop_reply __constant_htonl(30000)
+#define nlm_drop_reply cpu_to_be32(30000)
/* Lock info passed via NLM */
struct nlm_lock {
diff --git a/include/linux/lockd/xdr4.h b/include/linux/lockd/xdr4.h
index 12bfe09de2b..7353821341e 100644
--- a/include/linux/lockd/xdr4.h
+++ b/include/linux/lockd/xdr4.h
@@ -15,11 +15,11 @@
#include <linux/lockd/xdr.h>
/* error codes new to NLMv4 */
-#define nlm4_deadlock __constant_htonl(NLM_DEADLCK)
-#define nlm4_rofs __constant_htonl(NLM_ROFS)
-#define nlm4_stale_fh __constant_htonl(NLM_STALE_FH)
-#define nlm4_fbig __constant_htonl(NLM_FBIG)
-#define nlm4_failed __constant_htonl(NLM_FAILED)
+#define nlm4_deadlock cpu_to_be32(NLM_DEADLCK)
+#define nlm4_rofs cpu_to_be32(NLM_ROFS)
+#define nlm4_stale_fh cpu_to_be32(NLM_STALE_FH)
+#define nlm4_fbig cpu_to_be32(NLM_FBIG)
+#define nlm4_failed cpu_to_be32(NLM_FAILED)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 23bf02fb124..da5a5a1f4cd 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -20,43 +20,10 @@ struct lockdep_map;
#include <linux/stacktrace.h>
/*
- * Lock-class usage-state bits:
+ * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
+ * the total number of states... :-(
*/
-enum lock_usage_bit
-{
- LOCK_USED = 0,
- LOCK_USED_IN_HARDIRQ,
- LOCK_USED_IN_SOFTIRQ,
- LOCK_ENABLED_SOFTIRQS,
- LOCK_ENABLED_HARDIRQS,
- LOCK_USED_IN_HARDIRQ_READ,
- LOCK_USED_IN_SOFTIRQ_READ,
- LOCK_ENABLED_SOFTIRQS_READ,
- LOCK_ENABLED_HARDIRQS_READ,
- LOCK_USAGE_STATES
-};
-
-/*
- * Usage-state bitmasks:
- */
-#define LOCKF_USED (1 << LOCK_USED)
-#define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ)
-#define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ)
-#define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS)
-#define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS)
-
-#define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS)
-#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
-
-#define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ)
-#define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ)
-#define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ)
-#define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ)
-
-#define LOCKF_ENABLED_IRQS_READ \
- (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ)
-#define LOCKF_USED_IN_IRQ_READ \
- (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
+#define XXX_LOCK_USAGE_STATES (1+3*4)
#define MAX_LOCKDEP_SUBCLASSES 8UL
@@ -97,7 +64,7 @@ struct lock_class {
* IRQ/softirq usage tracking bits:
*/
unsigned long usage_mask;
- struct stack_trace usage_traces[LOCK_USAGE_STATES];
+ struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES];
/*
* These fields represent a directed graph of lock dependencies,
@@ -324,7 +291,11 @@ static inline void lock_set_subclass(struct lockdep_map *lock,
lock_set_class(lock, lock->name, lock->key, subclass, ip);
}
-# define INIT_LOCKDEP .lockdep_recursion = 0,
+extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
+extern void lockdep_clear_current_reclaim_state(void);
+extern void lockdep_trace_alloc(gfp_t mask);
+
+# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
@@ -342,6 +313,9 @@ static inline void lockdep_on(void)
# define lock_release(l, n, i) do { } while (0)
# define lock_set_class(l, n, k, s, i) do { } while (0)
# define lock_set_subclass(l, s, i) do { } while (0)
+# define lockdep_set_current_reclaim_state(g) do { } while (0)
+# define lockdep_clear_current_reclaim_state() do { } while (0)
+# define lockdep_trace_alloc(g) do { } while (0)
# define lockdep_init() do { } while (0)
# define lockdep_info() do { } while (0)
# define lockdep_init_map(lock, name, key, sub) \
@@ -390,6 +364,23 @@ do { \
#endif /* CONFIG_LOCK_STAT */
+#ifdef CONFIG_LOCKDEP
+
+/*
+ * On lockdep we dont want the hand-coded irq-enable of
+ * _raw_*_lock_flags() code, because lockdep assumes
+ * that interrupts are not re-enabled during lock-acquire:
+ */
+#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
+ LOCK_CONTENDED((_lock), (try), (lock))
+
+#else /* CONFIG_LOCKDEP */
+
+#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
+ lockfl((_lock), (flags))
+
+#endif /* CONFIG_LOCKDEP */
+
#ifdef CONFIG_GENERIC_HARDIRQS
extern void early_init_irq_lock_class(void);
#else
diff --git a/include/linux/loop.h b/include/linux/loop.h
index 6ffd6db5bb0..66c194e2d9b 100644
--- a/include/linux/loop.h
+++ b/include/linux/loop.h
@@ -56,8 +56,7 @@ struct loop_device {
gfp_t old_gfp_mask;
spinlock_t lo_lock;
- struct bio *lo_bio;
- struct bio *lo_biotail;
+ struct bio_list lo_bio_list;
int lo_state;
struct mutex lo_ctl_mutex;
struct task_struct *lo_thread;
@@ -160,5 +159,6 @@ int loop_unregister_transfer(int number);
#define LOOP_SET_STATUS64 0x4C04
#define LOOP_GET_STATUS64 0x4C05
#define LOOP_CHANGE_FD 0x4C06
+#define LOOP_SET_CAPACITY 0x4C07
#endif
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h
new file mode 100644
index 00000000000..e461b2c3d71
--- /dev/null
+++ b/include/linux/lsm_audit.h
@@ -0,0 +1,111 @@
+/*
+ * Common LSM logging functions
+ * Heavily borrowed from selinux/avc.h
+ *
+ * Author : Etienne BASSET <etienne.basset@ensta.org>
+ *
+ * All credits to : Stephen Smalley, <sds@epoch.ncsc.mil>
+ * All BUGS to : Etienne BASSET <etienne.basset@ensta.org>
+ */
+#ifndef _LSM_COMMON_LOGGING_
+#define _LSM_COMMON_LOGGING_
+
+#include <linux/stddef.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/kdev_t.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/audit.h>
+#include <linux/in6.h>
+#include <linux/path.h>
+#include <linux/key.h>
+#include <linux/skbuff.h>
+#include <asm/system.h>
+
+
+/* Auxiliary data to use in generating the audit record. */
+struct common_audit_data {
+ char type;
+#define LSM_AUDIT_DATA_FS 1
+#define LSM_AUDIT_DATA_NET 2
+#define LSM_AUDIT_DATA_CAP 3
+#define LSM_AUDIT_DATA_IPC 4
+#define LSM_AUDIT_DATA_TASK 5
+#define LSM_AUDIT_DATA_KEY 6
+ struct task_struct *tsk;
+ union {
+ struct {
+ struct path path;
+ struct inode *inode;
+ } fs;
+ struct {
+ int netif;
+ struct sock *sk;
+ u16 family;
+ __be16 dport;
+ __be16 sport;
+ union {
+ struct {
+ __be32 daddr;
+ __be32 saddr;
+ } v4;
+ struct {
+ struct in6_addr daddr;
+ struct in6_addr saddr;
+ } v6;
+ } fam;
+ } net;
+ int cap;
+ int ipc_id;
+ struct task_struct *tsk;
+#ifdef CONFIG_KEYS
+ struct {
+ key_serial_t key;
+ char *key_desc;
+ } key_struct;
+#endif
+ } u;
+ const char *function;
+ /* this union contains LSM specific data */
+ union {
+ /* SMACK data */
+ struct smack_audit_data {
+ char *subject;
+ char *object;
+ char *request;
+ int result;
+ } smack_audit_data;
+ /* SELinux data */
+ struct {
+ u32 ssid;
+ u32 tsid;
+ u16 tclass;
+ u32 requested;
+ u32 audited;
+ struct av_decision *avd;
+ int result;
+ } selinux_audit_data;
+ } lsm_priv;
+ /* these callback will be implemented by a specific LSM */
+ void (*lsm_pre_audit)(struct audit_buffer *, void *);
+ void (*lsm_post_audit)(struct audit_buffer *, void *);
+};
+
+#define v4info fam.v4
+#define v6info fam.v6
+
+int ipv4_skb_to_auditdata(struct sk_buff *skb,
+ struct common_audit_data *ad, u8 *proto);
+
+int ipv6_skb_to_auditdata(struct sk_buff *skb,
+ struct common_audit_data *ad, u8 *proto);
+
+/* Initialize an LSM audit data structure. */
+#define COMMON_AUDIT_DATA_INIT(_d, _t) \
+ { memset((_d), 0, sizeof(struct common_audit_data)); \
+ (_d)->type = LSM_AUDIT_DATA_##_t; (_d)->function = __func__; }
+
+void common_lsm_audit(struct common_audit_data *a);
+
+#endif
diff --git a/include/linux/mISDNdsp.h b/include/linux/mISDNdsp.h
index 6b71d2dce50..41d1eeb9b3b 100644
--- a/include/linux/mISDNdsp.h
+++ b/include/linux/mISDNdsp.h
@@ -12,7 +12,8 @@ struct mISDN_dsp_element {
void *(*new)(const char *arg);
void (*free)(void *p);
void (*process_tx)(void *p, unsigned char *data, int len);
- void (*process_rx)(void *p, unsigned char *data, int len);
+ void (*process_rx)(void *p, unsigned char *data, int len,
+ unsigned int txlen);
int num_args;
struct mISDN_dsp_element_arg
*args;
@@ -24,6 +25,7 @@ extern void mISDN_dsp_element_unregister(struct mISDN_dsp_element *elem);
struct dsp_features {
int hfc_id; /* unique id to identify the chip (or -1) */
int hfc_dtmf; /* set if HFCmulti card supports dtmf */
+ int hfc_conf; /* set if HFCmulti card supports conferences */
int hfc_loops; /* set if card supports tone loops */
int hfc_echocanhw; /* set if card supports echocancelation*/
int pcm_id; /* unique id to identify the pcm bus (or -1) */
diff --git a/include/linux/mISDNhw.h b/include/linux/mISDNhw.h
index 97ffdc1d344..7f9831da847 100644
--- a/include/linux/mISDNhw.h
+++ b/include/linux/mISDNhw.h
@@ -89,11 +89,6 @@ struct dchannel {
void (*phfunc) (struct dchannel *);
u_int state;
void *l1;
- /* HW access */
- u_char (*read_reg) (void *, u_char);
- void (*write_reg) (void *, u_char, u_char);
- void (*read_fifo) (void *, u_char *, int);
- void (*write_fifo) (void *, u_char *, int);
void *hw;
int slot; /* multiport card channel slot */
struct timer_list timer;
@@ -151,11 +146,6 @@ struct bchannel {
u_long Flags;
struct work_struct workq;
u_int state;
- /* HW access */
- u_char (*read_reg) (void *, u_char);
- void (*write_reg) (void *, u_char, u_char);
- void (*read_fifo) (void *, u_char *, int);
- void (*write_fifo) (void *, u_char *, int);
void *hw;
int slot; /* multiport card channel slot */
struct timer_list timer;
@@ -185,7 +175,7 @@ extern int dchannel_senddata(struct dchannel *, struct sk_buff *);
extern int bchannel_senddata(struct bchannel *, struct sk_buff *);
extern void recv_Dchannel(struct dchannel *);
extern void recv_Echannel(struct dchannel *, struct dchannel *);
-extern void recv_Bchannel(struct bchannel *);
+extern void recv_Bchannel(struct bchannel *, unsigned int id);
extern void recv_Dchannel_skb(struct dchannel *, struct sk_buff *);
extern void recv_Bchannel_skb(struct bchannel *, struct sk_buff *);
extern void confirm_Bsend(struct bchannel *bch);
diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h
index 5da3d95b27f..45100b39a7c 100644
--- a/include/linux/mISDNif.h
+++ b/include/linux/mISDNif.h
@@ -229,6 +229,7 @@
#define OPTION_L2_PTP 2
#define OPTION_L2_FIXEDTEI 3
#define OPTION_L2_CLEANUP 4
+#define OPTION_L1_HOLD 5
/* should be in sync with linux/kobject.h:KOBJ_NAME_LEN */
#define MISDN_MAX_IDLEN 20
@@ -291,19 +292,19 @@ struct mISDN_devrename {
/* MPH_INFORMATION_REQ payload */
struct ph_info_ch {
- __u32 protocol;
- __u64 Flags;
+ __u32 protocol;
+ __u64 Flags;
};
struct ph_info_dch {
- struct ph_info_ch ch;
- __u16 state;
- __u16 num_bch;
+ struct ph_info_ch ch;
+ __u16 state;
+ __u16 num_bch;
};
struct ph_info {
- struct ph_info_dch dch;
- struct ph_info_ch bch[];
+ struct ph_info_dch dch;
+ struct ph_info_ch bch[];
};
/* timer device ioctl */
@@ -317,6 +318,7 @@ struct ph_info {
#define IMCTRLREQ _IOR('I', 69, int)
#define IMCLEAR_L2 _IOR('I', 70, int)
#define IMSETDEVNAME _IOR('I', 71, struct mISDN_devrename)
+#define IMHOLD_L1 _IOR('I', 72, int)
static inline int
test_channelmap(u_int nr, u_char *map)
@@ -362,7 +364,8 @@ clear_channelmap(u_int nr, u_char *map)
#define MISDN_CTRL_HFC_RECEIVE_ON 0x4006
#define MISDN_CTRL_HFC_ECHOCAN_ON 0x4007
#define MISDN_CTRL_HFC_ECHOCAN_OFF 0x4008
-
+#define MISDN_CTRL_HFC_WD_INIT 0x4009
+#define MISDN_CTRL_HFC_WD_RESET 0x400A
/* socket options */
#define MISDN_TIME_STAMP 0x0001
diff --git a/include/linux/magic.h b/include/linux/magic.h
index 0b4df7eba85..1923327b986 100644
--- a/include/linux/magic.h
+++ b/include/linux/magic.h
@@ -6,9 +6,12 @@
#define AFS_SUPER_MAGIC 0x5346414F
#define AUTOFS_SUPER_MAGIC 0x0187
#define CODA_SUPER_MAGIC 0x73757245
+#define CRAMFS_MAGIC 0x28cd3d45 /* some random number */
+#define CRAMFS_MAGIC_WEND 0x453dcd28 /* magic number with the wrong endianess */
#define DEBUGFS_MAGIC 0x64626720
#define SYSFS_MAGIC 0x62656572
#define SECURITYFS_MAGIC 0x73636673
+#define SELINUX_MAGIC 0xf97cff8c
#define TMPFS_MAGIC 0x01021994
#define SQUASHFS_MAGIC 0x73717368
#define EFS_SUPER_MAGIC 0x414A53
@@ -49,4 +52,5 @@
#define FUTEXFS_SUPER_MAGIC 0xBAD1DEA
#define INOTIFYFS_SUPER_MAGIC 0x2BAD1DEA
+#define STACK_END_MAGIC 0x57AC6E9D
#endif /* __LINUX_MAGIC_H__ */
diff --git a/include/linux/major.h b/include/linux/major.h
index 88249452b93..6a8ca98c9a9 100644
--- a/include/linux/major.h
+++ b/include/linux/major.h
@@ -145,6 +145,7 @@
#define UNIX98_PTY_MAJOR_COUNT 8
#define UNIX98_PTY_SLAVE_MAJOR (UNIX98_PTY_MASTER_MAJOR+UNIX98_PTY_MAJOR_COUNT)
+#define DRBD_MAJOR 147
#define RTF_MAJOR 150
#define RAW_MAJOR 162
@@ -171,5 +172,6 @@
#define VIOTAPE_MAJOR 230
#define BLOCK_EXT_MAJOR 259
+#define SCSI_OSD_MAJOR 260 /* open-osd's OSD scsi device */
#endif
diff --git a/include/linux/maple.h b/include/linux/maple.h
index c23d3f51ba4..d9a51b9b330 100644
--- a/include/linux/maple.h
+++ b/include/linux/maple.h
@@ -8,33 +8,49 @@ extern struct bus_type maple_bus_type;
/* Maple Bus command and response codes */
enum maple_code {
- MAPLE_RESPONSE_FILEERR = -5,
- MAPLE_RESPONSE_AGAIN = -4, /* request should be retransmitted */
- MAPLE_RESPONSE_BADCMD = -3,
- MAPLE_RESPONSE_BADFUNC = -2,
- MAPLE_RESPONSE_NONE = -1, /* unit didn't respond at all */
- MAPLE_COMMAND_DEVINFO = 1,
- MAPLE_COMMAND_ALLINFO = 2,
- MAPLE_COMMAND_RESET = 3,
- MAPLE_COMMAND_KILL = 4,
- MAPLE_RESPONSE_DEVINFO = 5,
- MAPLE_RESPONSE_ALLINFO = 6,
- MAPLE_RESPONSE_OK = 7,
- MAPLE_RESPONSE_DATATRF = 8,
- MAPLE_COMMAND_GETCOND = 9,
- MAPLE_COMMAND_GETMINFO = 10,
- MAPLE_COMMAND_BREAD = 11,
- MAPLE_COMMAND_BWRITE = 12,
- MAPLE_COMMAND_SETCOND = 14
+ MAPLE_RESPONSE_FILEERR = -5,
+ MAPLE_RESPONSE_AGAIN, /* retransmit */
+ MAPLE_RESPONSE_BADCMD,
+ MAPLE_RESPONSE_BADFUNC,
+ MAPLE_RESPONSE_NONE, /* unit didn't respond*/
+ MAPLE_COMMAND_DEVINFO = 1,
+ MAPLE_COMMAND_ALLINFO,
+ MAPLE_COMMAND_RESET,
+ MAPLE_COMMAND_KILL,
+ MAPLE_RESPONSE_DEVINFO,
+ MAPLE_RESPONSE_ALLINFO,
+ MAPLE_RESPONSE_OK,
+ MAPLE_RESPONSE_DATATRF,
+ MAPLE_COMMAND_GETCOND,
+ MAPLE_COMMAND_GETMINFO,
+ MAPLE_COMMAND_BREAD,
+ MAPLE_COMMAND_BWRITE,
+ MAPLE_COMMAND_BSYNC,
+ MAPLE_COMMAND_SETCOND,
+ MAPLE_COMMAND_MICCONTROL
+};
+
+enum maple_file_errors {
+ MAPLE_FILEERR_INVALID_PARTITION = 0x01000000,
+ MAPLE_FILEERR_PHASE_ERROR = 0x02000000,
+ MAPLE_FILEERR_INVALID_BLOCK = 0x04000000,
+ MAPLE_FILEERR_WRITE_ERROR = 0x08000000,
+ MAPLE_FILEERR_INVALID_WRITE_LENGTH = 0x10000000,
+ MAPLE_FILEERR_BAD_CRC = 0x20000000
+};
+
+struct maple_buffer {
+ char bufx[0x400];
+ void *buf;
};
struct mapleq {
struct list_head list;
struct maple_device *dev;
- void *sendbuf, *recvbuf, *recvbufdcsp;
+ struct maple_buffer *recvbuf;
+ void *sendbuf, *recvbuf_p2;
unsigned char length;
enum maple_code command;
- struct mutex mutex;
};
struct maple_devinfo {
@@ -52,11 +68,15 @@ struct maple_device {
struct maple_driver *driver;
struct mapleq *mq;
void (*callback) (struct mapleq * mq);
+ void (*fileerr_handler)(struct maple_device *mdev, void *recvbuf);
+ int (*can_unload)(struct maple_device *mdev);
unsigned long when, interval, function;
struct maple_devinfo devinfo;
unsigned char port, unit;
char product_name[32];
char product_licence[64];
+ atomic_t busy;
+ wait_queue_head_t maple_wait;
struct device dev;
};
@@ -72,7 +92,7 @@ void maple_getcond_callback(struct maple_device *dev,
int maple_driver_register(struct maple_driver *);
void maple_driver_unregister(struct maple_driver *);
-int maple_add_packet_sleeps(struct maple_device *mdev, u32 function,
+int maple_add_packet(struct maple_device *mdev, u32 function,
u32 command, u32 length, void *data);
void maple_clear_dev(struct maple_device *mdev);
diff --git a/include/linux/matroxfb.h b/include/linux/matroxfb.h
index 404f678e734..2203121a43e 100644
--- a/include/linux/matroxfb.h
+++ b/include/linux/matroxfb.h
@@ -37,7 +37,7 @@ enum matroxfb_ctrl_id {
MATROXFB_CID_LAST
};
-#define FBIO_WAITFORVSYNC _IOW('F', 0x20, u_int32_t)
+#define FBIO_WAITFORVSYNC _IOW('F', 0x20, __u32)
#endif
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
new file mode 100644
index 00000000000..cfdf1df2875
--- /dev/null
+++ b/include/linux/mdio.h
@@ -0,0 +1,356 @@
+/*
+ * linux/mdio.h: definitions for MDIO (clause 45) transceivers
+ * Copyright 2006-2009 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef __LINUX_MDIO_H__
+#define __LINUX_MDIO_H__
+
+#include <linux/mii.h>
+
+/* MDIO Manageable Devices (MMDs). */
+#define MDIO_MMD_PMAPMD 1 /* Physical Medium Attachment/
+ * Physical Medium Dependent */
+#define MDIO_MMD_WIS 2 /* WAN Interface Sublayer */
+#define MDIO_MMD_PCS 3 /* Physical Coding Sublayer */
+#define MDIO_MMD_PHYXS 4 /* PHY Extender Sublayer */
+#define MDIO_MMD_DTEXS 5 /* DTE Extender Sublayer */
+#define MDIO_MMD_TC 6 /* Transmission Convergence */
+#define MDIO_MMD_AN 7 /* Auto-Negotiation */
+#define MDIO_MMD_C22EXT 29 /* Clause 22 extension */
+#define MDIO_MMD_VEND1 30 /* Vendor specific 1 */
+#define MDIO_MMD_VEND2 31 /* Vendor specific 2 */
+
+/* Generic MDIO registers. */
+#define MDIO_CTRL1 MII_BMCR
+#define MDIO_STAT1 MII_BMSR
+#define MDIO_DEVID1 MII_PHYSID1
+#define MDIO_DEVID2 MII_PHYSID2
+#define MDIO_SPEED 4 /* Speed ability */
+#define MDIO_DEVS1 5 /* Devices in package */
+#define MDIO_DEVS2 6
+#define MDIO_CTRL2 7 /* 10G control 2 */
+#define MDIO_STAT2 8 /* 10G status 2 */
+#define MDIO_PMA_TXDIS 9 /* 10G PMA/PMD transmit disable */
+#define MDIO_PMA_RXDET 10 /* 10G PMA/PMD receive signal detect */
+#define MDIO_PMA_EXTABLE 11 /* 10G PMA/PMD extended ability */
+#define MDIO_PKGID1 14 /* Package identifier */
+#define MDIO_PKGID2 15
+#define MDIO_AN_ADVERTISE 16 /* AN advertising (base page) */
+#define MDIO_AN_LPA 19 /* AN LP abilities (base page) */
+#define MDIO_PHYXS_LNSTAT 24 /* PHY XGXS lane state */
+
+/* Media-dependent registers. */
+#define MDIO_PMA_10GBT_SWAPPOL 130 /* 10GBASE-T pair swap & polarity */
+#define MDIO_PMA_10GBT_TXPWR 131 /* 10GBASE-T TX power control */
+#define MDIO_PMA_10GBT_SNR 133 /* 10GBASE-T SNR margin, lane A.
+ * Lanes B-D are numbered 134-136. */
+#define MDIO_PMA_10GBR_FECABLE 170 /* 10GBASE-R FEC ability */
+#define MDIO_PCS_10GBX_STAT1 24 /* 10GBASE-X PCS status 1 */
+#define MDIO_PCS_10GBRT_STAT1 32 /* 10GBASE-R/-T PCS status 1 */
+#define MDIO_PCS_10GBRT_STAT2 33 /* 10GBASE-R/-T PCS status 2 */
+#define MDIO_AN_10GBT_CTRL 32 /* 10GBASE-T auto-negotiation control */
+#define MDIO_AN_10GBT_STAT 33 /* 10GBASE-T auto-negotiation status */
+
+/* LASI (Link Alarm Status Interrupt) registers, defined by XENPAK MSA. */
+#define MDIO_PMA_LASI_RXCTRL 0x9000 /* RX_ALARM control */
+#define MDIO_PMA_LASI_TXCTRL 0x9001 /* TX_ALARM control */
+#define MDIO_PMA_LASI_CTRL 0x9002 /* LASI control */
+#define MDIO_PMA_LASI_RXSTAT 0x9003 /* RX_ALARM status */
+#define MDIO_PMA_LASI_TXSTAT 0x9004 /* TX_ALARM status */
+#define MDIO_PMA_LASI_STAT 0x9005 /* LASI status */
+
+/* Control register 1. */
+/* Enable extended speed selection */
+#define MDIO_CTRL1_SPEEDSELEXT (BMCR_SPEED1000 | BMCR_SPEED100)
+/* All speed selection bits */
+#define MDIO_CTRL1_SPEEDSEL (MDIO_CTRL1_SPEEDSELEXT | 0x003c)
+#define MDIO_CTRL1_FULLDPLX BMCR_FULLDPLX
+#define MDIO_CTRL1_LPOWER BMCR_PDOWN
+#define MDIO_CTRL1_RESET BMCR_RESET
+#define MDIO_PMA_CTRL1_LOOPBACK 0x0001
+#define MDIO_PMA_CTRL1_SPEED1000 BMCR_SPEED1000
+#define MDIO_PMA_CTRL1_SPEED100 BMCR_SPEED100
+#define MDIO_PCS_CTRL1_LOOPBACK BMCR_LOOPBACK
+#define MDIO_PHYXS_CTRL1_LOOPBACK BMCR_LOOPBACK
+#define MDIO_AN_CTRL1_RESTART BMCR_ANRESTART
+#define MDIO_AN_CTRL1_ENABLE BMCR_ANENABLE
+#define MDIO_AN_CTRL1_XNP 0x2000 /* Enable extended next page */
+
+/* 10 Gb/s */
+#define MDIO_CTRL1_SPEED10G (MDIO_CTRL1_SPEEDSELEXT | 0x00)
+/* 10PASS-TS/2BASE-TL */
+#define MDIO_CTRL1_SPEED10P2B (MDIO_CTRL1_SPEEDSELEXT | 0x04)
+
+/* Status register 1. */
+#define MDIO_STAT1_LPOWERABLE 0x0002 /* Low-power ability */
+#define MDIO_STAT1_LSTATUS BMSR_LSTATUS
+#define MDIO_STAT1_FAULT 0x0080 /* Fault */
+#define MDIO_AN_STAT1_LPABLE 0x0001 /* Link partner AN ability */
+#define MDIO_AN_STAT1_ABLE BMSR_ANEGCAPABLE
+#define MDIO_AN_STAT1_RFAULT BMSR_RFAULT
+#define MDIO_AN_STAT1_COMPLETE BMSR_ANEGCOMPLETE
+#define MDIO_AN_STAT1_PAGE 0x0040 /* Page received */
+#define MDIO_AN_STAT1_XNP 0x0080 /* Extended next page status */
+
+/* Speed register. */
+#define MDIO_SPEED_10G 0x0001 /* 10G capable */
+#define MDIO_PMA_SPEED_2B 0x0002 /* 2BASE-TL capable */
+#define MDIO_PMA_SPEED_10P 0x0004 /* 10PASS-TS capable */
+#define MDIO_PMA_SPEED_1000 0x0010 /* 1000M capable */
+#define MDIO_PMA_SPEED_100 0x0020 /* 100M capable */
+#define MDIO_PMA_SPEED_10 0x0040 /* 10M capable */
+#define MDIO_PCS_SPEED_10P2B 0x0002 /* 10PASS-TS/2BASE-TL capable */
+
+/* Device present registers. */
+#define MDIO_DEVS_PRESENT(devad) (1 << (devad))
+#define MDIO_DEVS_PMAPMD MDIO_DEVS_PRESENT(MDIO_MMD_PMAPMD)
+#define MDIO_DEVS_WIS MDIO_DEVS_PRESENT(MDIO_MMD_WIS)
+#define MDIO_DEVS_PCS MDIO_DEVS_PRESENT(MDIO_MMD_PCS)
+#define MDIO_DEVS_PHYXS MDIO_DEVS_PRESENT(MDIO_MMD_PHYXS)
+#define MDIO_DEVS_DTEXS MDIO_DEVS_PRESENT(MDIO_MMD_DTEXS)
+#define MDIO_DEVS_TC MDIO_DEVS_PRESENT(MDIO_MMD_TC)
+#define MDIO_DEVS_AN MDIO_DEVS_PRESENT(MDIO_MMD_AN)
+#define MDIO_DEVS_C22EXT MDIO_DEVS_PRESENT(MDIO_MMD_C22EXT)
+
+/* Control register 2. */
+#define MDIO_PMA_CTRL2_TYPE 0x000f /* PMA/PMD type selection */
+#define MDIO_PMA_CTRL2_10GBCX4 0x0000 /* 10GBASE-CX4 type */
+#define MDIO_PMA_CTRL2_10GBEW 0x0001 /* 10GBASE-EW type */
+#define MDIO_PMA_CTRL2_10GBLW 0x0002 /* 10GBASE-LW type */
+#define MDIO_PMA_CTRL2_10GBSW 0x0003 /* 10GBASE-SW type */
+#define MDIO_PMA_CTRL2_10GBLX4 0x0004 /* 10GBASE-LX4 type */
+#define MDIO_PMA_CTRL2_10GBER 0x0005 /* 10GBASE-ER type */
+#define MDIO_PMA_CTRL2_10GBLR 0x0006 /* 10GBASE-LR type */
+#define MDIO_PMA_CTRL2_10GBSR 0x0007 /* 10GBASE-SR type */
+#define MDIO_PMA_CTRL2_10GBLRM 0x0008 /* 10GBASE-LRM type */
+#define MDIO_PMA_CTRL2_10GBT 0x0009 /* 10GBASE-T type */
+#define MDIO_PMA_CTRL2_10GBKX4 0x000a /* 10GBASE-KX4 type */
+#define MDIO_PMA_CTRL2_10GBKR 0x000b /* 10GBASE-KR type */
+#define MDIO_PMA_CTRL2_1000BT 0x000c /* 1000BASE-T type */
+#define MDIO_PMA_CTRL2_1000BKX 0x000d /* 1000BASE-KX type */
+#define MDIO_PMA_CTRL2_100BTX 0x000e /* 100BASE-TX type */
+#define MDIO_PMA_CTRL2_10BT 0x000f /* 10BASE-T type */
+#define MDIO_PCS_CTRL2_TYPE 0x0003 /* PCS type selection */
+#define MDIO_PCS_CTRL2_10GBR 0x0000 /* 10GBASE-R type */
+#define MDIO_PCS_CTRL2_10GBX 0x0001 /* 10GBASE-X type */
+#define MDIO_PCS_CTRL2_10GBW 0x0002 /* 10GBASE-W type */
+#define MDIO_PCS_CTRL2_10GBT 0x0003 /* 10GBASE-T type */
+
+/* Status register 2. */
+#define MDIO_STAT2_RXFAULT 0x0400 /* Receive fault */
+#define MDIO_STAT2_TXFAULT 0x0800 /* Transmit fault */
+#define MDIO_STAT2_DEVPRST 0xc000 /* Device present */
+#define MDIO_STAT2_DEVPRST_VAL 0x8000 /* Device present value */
+#define MDIO_PMA_STAT2_LBABLE 0x0001 /* PMA loopback ability */
+#define MDIO_PMA_STAT2_10GBEW 0x0002 /* 10GBASE-EW ability */
+#define MDIO_PMA_STAT2_10GBLW 0x0004 /* 10GBASE-LW ability */
+#define MDIO_PMA_STAT2_10GBSW 0x0008 /* 10GBASE-SW ability */
+#define MDIO_PMA_STAT2_10GBLX4 0x0010 /* 10GBASE-LX4 ability */
+#define MDIO_PMA_STAT2_10GBER 0x0020 /* 10GBASE-ER ability */
+#define MDIO_PMA_STAT2_10GBLR 0x0040 /* 10GBASE-LR ability */
+#define MDIO_PMA_STAT2_10GBSR 0x0080 /* 10GBASE-SR ability */
+#define MDIO_PMD_STAT2_TXDISAB 0x0100 /* PMD TX disable ability */
+#define MDIO_PMA_STAT2_EXTABLE 0x0200 /* Extended abilities */
+#define MDIO_PMA_STAT2_RXFLTABLE 0x1000 /* Receive fault ability */
+#define MDIO_PMA_STAT2_TXFLTABLE 0x2000 /* Transmit fault ability */
+#define MDIO_PCS_STAT2_10GBR 0x0001 /* 10GBASE-R capable */
+#define MDIO_PCS_STAT2_10GBX 0x0002 /* 10GBASE-X capable */
+#define MDIO_PCS_STAT2_10GBW 0x0004 /* 10GBASE-W capable */
+#define MDIO_PCS_STAT2_RXFLTABLE 0x1000 /* Receive fault ability */
+#define MDIO_PCS_STAT2_TXFLTABLE 0x2000 /* Transmit fault ability */
+
+/* Transmit disable register. */
+#define MDIO_PMD_TXDIS_GLOBAL 0x0001 /* Global PMD TX disable */
+#define MDIO_PMD_TXDIS_0 0x0002 /* PMD TX disable 0 */
+#define MDIO_PMD_TXDIS_1 0x0004 /* PMD TX disable 1 */
+#define MDIO_PMD_TXDIS_2 0x0008 /* PMD TX disable 2 */
+#define MDIO_PMD_TXDIS_3 0x0010 /* PMD TX disable 3 */
+
+/* Receive signal detect register. */
+#define MDIO_PMD_RXDET_GLOBAL 0x0001 /* Global PMD RX signal detect */
+#define MDIO_PMD_RXDET_0 0x0002 /* PMD RX signal detect 0 */
+#define MDIO_PMD_RXDET_1 0x0004 /* PMD RX signal detect 1 */
+#define MDIO_PMD_RXDET_2 0x0008 /* PMD RX signal detect 2 */
+#define MDIO_PMD_RXDET_3 0x0010 /* PMD RX signal detect 3 */
+
+/* Extended abilities register. */
+#define MDIO_PMA_EXTABLE_10GCX4 0x0001 /* 10GBASE-CX4 ability */
+#define MDIO_PMA_EXTABLE_10GBLRM 0x0002 /* 10GBASE-LRM ability */
+#define MDIO_PMA_EXTABLE_10GBT 0x0004 /* 10GBASE-T ability */
+#define MDIO_PMA_EXTABLE_10GBKX4 0x0008 /* 10GBASE-KX4 ability */
+#define MDIO_PMA_EXTABLE_10GBKR 0x0010 /* 10GBASE-KR ability */
+#define MDIO_PMA_EXTABLE_1000BT 0x0020 /* 1000BASE-T ability */
+#define MDIO_PMA_EXTABLE_1000BKX 0x0040 /* 1000BASE-KX ability */
+#define MDIO_PMA_EXTABLE_100BTX 0x0080 /* 100BASE-TX ability */
+#define MDIO_PMA_EXTABLE_10BT 0x0100 /* 10BASE-T ability */
+
+/* PHY XGXS lane state register. */
+#define MDIO_PHYXS_LNSTAT_SYNC0 0x0001
+#define MDIO_PHYXS_LNSTAT_SYNC1 0x0002
+#define MDIO_PHYXS_LNSTAT_SYNC2 0x0004
+#define MDIO_PHYXS_LNSTAT_SYNC3 0x0008
+#define MDIO_PHYXS_LNSTAT_ALIGN 0x1000
+
+/* PMA 10GBASE-T pair swap & polarity */
+#define MDIO_PMA_10GBT_SWAPPOL_ABNX 0x0001 /* Pair A/B uncrossed */
+#define MDIO_PMA_10GBT_SWAPPOL_CDNX 0x0002 /* Pair C/D uncrossed */
+#define MDIO_PMA_10GBT_SWAPPOL_AREV 0x0100 /* Pair A polarity reversed */
+#define MDIO_PMA_10GBT_SWAPPOL_BREV 0x0200 /* Pair B polarity reversed */
+#define MDIO_PMA_10GBT_SWAPPOL_CREV 0x0400 /* Pair C polarity reversed */
+#define MDIO_PMA_10GBT_SWAPPOL_DREV 0x0800 /* Pair D polarity reversed */
+
+/* PMA 10GBASE-T TX power register. */
+#define MDIO_PMA_10GBT_TXPWR_SHORT 0x0001 /* Short-reach mode */
+
+/* PMA 10GBASE-T SNR registers. */
+/* Value is SNR margin in dB, clamped to range [-127, 127], plus 0x8000. */
+#define MDIO_PMA_10GBT_SNR_BIAS 0x8000
+#define MDIO_PMA_10GBT_SNR_MAX 127
+
+/* PMA 10GBASE-R FEC ability register. */
+#define MDIO_PMA_10GBR_FECABLE_ABLE 0x0001 /* FEC ability */
+#define MDIO_PMA_10GBR_FECABLE_ERRABLE 0x0002 /* FEC error indic. ability */
+
+/* PCS 10GBASE-R/-T status register 1. */
+#define MDIO_PCS_10GBRT_STAT1_BLKLK 0x0001 /* Block lock attained */
+
+/* PCS 10GBASE-R/-T status register 2. */
+#define MDIO_PCS_10GBRT_STAT2_ERR 0x00ff
+#define MDIO_PCS_10GBRT_STAT2_BER 0x3f00
+
+/* AN 10GBASE-T control register. */
+#define MDIO_AN_10GBT_CTRL_ADV10G 0x1000 /* Advertise 10GBASE-T */
+
+/* AN 10GBASE-T status register. */
+#define MDIO_AN_10GBT_STAT_LPTRR 0x0200 /* LP training reset req. */
+#define MDIO_AN_10GBT_STAT_LPLTABLE 0x0400 /* LP loop timing ability */
+#define MDIO_AN_10GBT_STAT_LP10G 0x0800 /* LP is 10GBT capable */
+#define MDIO_AN_10GBT_STAT_REMOK 0x1000 /* Remote OK */
+#define MDIO_AN_10GBT_STAT_LOCOK 0x2000 /* Local OK */
+#define MDIO_AN_10GBT_STAT_MS 0x4000 /* Master/slave config */
+#define MDIO_AN_10GBT_STAT_MSFLT 0x8000 /* Master/slave config fault */
+
+/* LASI RX_ALARM control/status registers. */
+#define MDIO_PMA_LASI_RX_PHYXSLFLT 0x0001 /* PHY XS RX local fault */
+#define MDIO_PMA_LASI_RX_PCSLFLT 0x0008 /* PCS RX local fault */
+#define MDIO_PMA_LASI_RX_PMALFLT 0x0010 /* PMA/PMD RX local fault */
+#define MDIO_PMA_LASI_RX_OPTICPOWERFLT 0x0020 /* RX optical power fault */
+#define MDIO_PMA_LASI_RX_WISLFLT 0x0200 /* WIS local fault */
+
+/* LASI TX_ALARM control/status registers. */
+#define MDIO_PMA_LASI_TX_PHYXSLFLT 0x0001 /* PHY XS TX local fault */
+#define MDIO_PMA_LASI_TX_PCSLFLT 0x0008 /* PCS TX local fault */
+#define MDIO_PMA_LASI_TX_PMALFLT 0x0010 /* PMA/PMD TX local fault */
+#define MDIO_PMA_LASI_TX_LASERPOWERFLT 0x0080 /* Laser output power fault */
+#define MDIO_PMA_LASI_TX_LASERTEMPFLT 0x0100 /* Laser temperature fault */
+#define MDIO_PMA_LASI_TX_LASERBICURRFLT 0x0200 /* Laser bias current fault */
+
+/* LASI control/status registers. */
+#define MDIO_PMA_LASI_LSALARM 0x0001 /* LS_ALARM enable/status */
+#define MDIO_PMA_LASI_TXALARM 0x0002 /* TX_ALARM enable/status */
+#define MDIO_PMA_LASI_RXALARM 0x0004 /* RX_ALARM enable/status */
+
+/* Mapping between MDIO PRTAD/DEVAD and mii_ioctl_data::phy_id */
+
+#define MDIO_PHY_ID_C45 0x8000
+#define MDIO_PHY_ID_PRTAD 0x03e0
+#define MDIO_PHY_ID_DEVAD 0x001f
+#define MDIO_PHY_ID_C45_MASK \
+ (MDIO_PHY_ID_C45 | MDIO_PHY_ID_PRTAD | MDIO_PHY_ID_DEVAD)
+
+static inline __u16 mdio_phy_id_c45(int prtad, int devad)
+{
+ return MDIO_PHY_ID_C45 | (prtad << 5) | devad;
+}
+
+static inline bool mdio_phy_id_is_c45(int phy_id)
+{
+ return (phy_id & MDIO_PHY_ID_C45) && !(phy_id & ~MDIO_PHY_ID_C45_MASK);
+}
+
+static inline __u16 mdio_phy_id_prtad(int phy_id)
+{
+ return (phy_id & MDIO_PHY_ID_PRTAD) >> 5;
+}
+
+static inline __u16 mdio_phy_id_devad(int phy_id)
+{
+ return phy_id & MDIO_PHY_ID_DEVAD;
+}
+
+#define MDIO_SUPPORTS_C22 1
+#define MDIO_SUPPORTS_C45 2
+
+#ifdef __KERNEL__
+
+/**
+ * struct mdio_if_info - Ethernet controller MDIO interface
+ * @prtad: PRTAD of the PHY (%MDIO_PRTAD_NONE if not present/unknown)
+ * @mmds: Mask of MMDs expected to be present in the PHY. This must be
+ * non-zero unless @prtad = %MDIO_PRTAD_NONE.
+ * @mode_support: MDIO modes supported. If %MDIO_SUPPORTS_C22 is set then
+ * MII register access will be passed through with @devad =
+ * %MDIO_DEVAD_NONE. If %MDIO_EMULATE_C22 is set then access to
+ * commonly used clause 22 registers will be translated into
+ * clause 45 registers.
+ * @dev: Net device structure
+ * @mdio_read: Register read function; returns value or negative error code
+ * @mdio_write: Register write function; returns 0 or negative error code
+ */
+struct mdio_if_info {
+ int prtad;
+ u32 __bitwise mmds;
+ unsigned mode_support;
+
+ struct net_device *dev;
+ int (*mdio_read)(struct net_device *dev, int prtad, int devad,
+ u16 addr);
+ int (*mdio_write)(struct net_device *dev, int prtad, int devad,
+ u16 addr, u16 val);
+};
+
+#define MDIO_PRTAD_NONE (-1)
+#define MDIO_DEVAD_NONE (-1)
+#define MDIO_EMULATE_C22 4
+
+struct ethtool_cmd;
+struct ethtool_pauseparam;
+extern int mdio45_probe(struct mdio_if_info *mdio, int prtad);
+extern int mdio_set_flag(const struct mdio_if_info *mdio,
+ int prtad, int devad, u16 addr, int mask,
+ bool sense);
+extern int mdio45_links_ok(const struct mdio_if_info *mdio, u32 mmds);
+extern int mdio45_nway_restart(const struct mdio_if_info *mdio);
+extern void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio,
+ struct ethtool_cmd *ecmd,
+ u32 npage_adv, u32 npage_lpa);
+extern void
+mdio45_ethtool_spauseparam_an(const struct mdio_if_info *mdio,
+ const struct ethtool_pauseparam *ecmd);
+
+/**
+ * mdio45_ethtool_gset - get settings for ETHTOOL_GSET
+ * @mdio: MDIO interface
+ * @ecmd: Ethtool request structure
+ *
+ * Since the CSRs for auto-negotiation using next pages are not fully
+ * standardised, this function does not attempt to decode them. Use
+ * mdio45_ethtool_gset_npage() to specify advertisement bits from next
+ * pages.
+ */
+static inline void mdio45_ethtool_gset(const struct mdio_if_info *mdio,
+ struct ethtool_cmd *ecmd)
+{
+ mdio45_ethtool_gset_npage(mdio, ecmd, 0, 0);
+}
+
+extern int mdio_mii_ioctl(const struct mdio_if_info *mdio,
+ struct mii_ioctl_data *mii_data, int cmd);
+
+#endif /* __KERNEL__ */
+#endif /* __LINUX_MDIO_H__ */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 326f45c8653..e46a0734ab6 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -56,7 +56,7 @@ extern void mem_cgroup_move_lists(struct page *page,
enum lru_list from, enum lru_list to);
extern void mem_cgroup_uncharge_page(struct page *page);
extern void mem_cgroup_uncharge_cache_page(struct page *page);
-extern int mem_cgroup_shrink_usage(struct page *page,
+extern int mem_cgroup_shmem_charge_fallback(struct page *page,
struct mm_struct *mm, gfp_t gfp_mask);
extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
@@ -75,7 +75,7 @@ int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
{
struct mem_cgroup *mem;
rcu_read_lock();
- mem = mem_cgroup_from_task((mm)->owner);
+ mem = mem_cgroup_from_task(rcu_dereference((mm)->owner));
rcu_read_unlock();
return cgroup == mem;
}
@@ -88,15 +88,13 @@ extern void mem_cgroup_end_migration(struct mem_cgroup *mem,
/*
* For memory reclaim.
*/
-extern int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem);
-extern long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem);
-
extern int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem);
extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
int priority);
extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
int priority);
int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg);
+int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg);
unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
struct zone *zone,
enum lru_list lru);
@@ -104,6 +102,8 @@ struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
struct zone *zone);
struct zone_reclaim_stat*
mem_cgroup_get_reclaim_stat_from_page(struct page *page);
+extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
+ struct task_struct *p);
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
extern int do_swap_account;
@@ -117,7 +117,7 @@ static inline bool mem_cgroup_disabled(void)
}
extern bool mem_cgroup_oom_called(struct task_struct *task);
-
+void mem_cgroup_update_mapped_file_stat(struct page *page, int val);
#else /* CONFIG_CGROUP_MEM_RES_CTLR */
struct mem_cgroup;
@@ -156,7 +156,7 @@ static inline void mem_cgroup_uncharge_cache_page(struct page *page)
{
}
-static inline int mem_cgroup_shrink_usage(struct page *page,
+static inline int mem_cgroup_shmem_charge_fallback(struct page *page,
struct mm_struct *mm, gfp_t gfp_mask)
{
return 0;
@@ -209,16 +209,6 @@ static inline void mem_cgroup_end_migration(struct mem_cgroup *mem,
{
}
-static inline int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
-{
- return 0;
-}
-
-static inline int mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem)
-{
- return 0;
-}
-
static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
{
return 0;
@@ -250,6 +240,12 @@ mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
return 1;
}
+static inline int
+mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
+{
+ return 1;
+}
+
static inline unsigned long
mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, struct zone *zone,
enum lru_list lru)
@@ -270,6 +266,16 @@ mem_cgroup_get_reclaim_stat_from_page(struct page *page)
return NULL;
}
+static inline void
+mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
+{
+}
+
+static inline void mem_cgroup_update_mapped_file_stat(struct page *page,
+ int val)
+{
+}
+
#endif /* CONFIG_CGROUP_MEM_CONT */
#endif /* _LINUX_MEMCONTROL_H */
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 3fdc10806d3..37fa19b34ef 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -99,4 +99,21 @@ enum mem_add_context { BOOT, HOTPLUG };
#define hotplug_memory_notifier(fn, pri) do { } while (0)
#endif
+/*
+ * 'struct memory_accessor' is a generic interface to provide
+ * in-kernel access to persistent memory such as i2c or SPI EEPROMs
+ */
+struct memory_accessor {
+ ssize_t (*read)(struct memory_accessor *, char *buf, off_t offset,
+ size_t count);
+ ssize_t (*write)(struct memory_accessor *, const char *buf,
+ off_t offset, size_t count);
+};
+
+/*
+ * Kernel text modification mutex, used for code patching. Users of this lock
+ * can sleep.
+ */
+extern struct mutex text_mutex;
+
#endif /* _LINUX_MEMORY_H_ */
diff --git a/include/linux/mfd/ab3100.h b/include/linux/mfd/ab3100.h
new file mode 100644
index 00000000000..7a3f316e384
--- /dev/null
+++ b/include/linux/mfd/ab3100.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2007-2009 ST-Ericsson AB
+ * License terms: GNU General Public License (GPL) version 2
+ * AB3100 core access functions
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ */
+
+#include <linux/device.h>
+
+#ifndef MFD_AB3100_H
+#define MFD_AB3100_H
+
+#define ABUNKNOWN 0
+#define AB3000 1
+#define AB3100 2
+
+/*
+ * AB3100, EVENTA1, A2 and A3 event register flags
+ * these are catenated into a single 32-bit flag in the code
+ * for event notification broadcasts.
+ */
+#define AB3100_EVENTA1_ONSWA (0x01<<16)
+#define AB3100_EVENTA1_ONSWB (0x02<<16)
+#define AB3100_EVENTA1_ONSWC (0x04<<16)
+#define AB3100_EVENTA1_DCIO (0x08<<16)
+#define AB3100_EVENTA1_OVER_TEMP (0x10<<16)
+#define AB3100_EVENTA1_SIM_OFF (0x20<<16)
+#define AB3100_EVENTA1_VBUS (0x40<<16)
+#define AB3100_EVENTA1_VSET_USB (0x80<<16)
+
+#define AB3100_EVENTA2_READY_TX (0x01<<8)
+#define AB3100_EVENTA2_READY_RX (0x02<<8)
+#define AB3100_EVENTA2_OVERRUN_ERROR (0x04<<8)
+#define AB3100_EVENTA2_FRAMING_ERROR (0x08<<8)
+#define AB3100_EVENTA2_CHARG_OVERCURRENT (0x10<<8)
+#define AB3100_EVENTA2_MIDR (0x20<<8)
+#define AB3100_EVENTA2_BATTERY_REM (0x40<<8)
+#define AB3100_EVENTA2_ALARM (0x80<<8)
+
+#define AB3100_EVENTA3_ADC_TRIG5 (0x01)
+#define AB3100_EVENTA3_ADC_TRIG4 (0x02)
+#define AB3100_EVENTA3_ADC_TRIG3 (0x04)
+#define AB3100_EVENTA3_ADC_TRIG2 (0x08)
+#define AB3100_EVENTA3_ADC_TRIGVBAT (0x10)
+#define AB3100_EVENTA3_ADC_TRIGVTX (0x20)
+#define AB3100_EVENTA3_ADC_TRIG1 (0x40)
+#define AB3100_EVENTA3_ADC_TRIG0 (0x80)
+
+/* AB3100, STR register flags */
+#define AB3100_STR_ONSWA (0x01)
+#define AB3100_STR_ONSWB (0x02)
+#define AB3100_STR_ONSWC (0x04)
+#define AB3100_STR_DCIO (0x08)
+#define AB3100_STR_BOOT_MODE (0x10)
+#define AB3100_STR_SIM_OFF (0x20)
+#define AB3100_STR_BATT_REMOVAL (0x40)
+#define AB3100_STR_VBUS (0x80)
+
+/**
+ * struct ab3100
+ * @access_mutex: lock out concurrent accesses to the AB3100 registers
+ * @dev: pointer to the containing device
+ * @i2c_client: I2C client for this chip
+ * @testreg_client: secondary client for test registers
+ * @chip_name: name of this chip variant
+ * @chip_id: 8 bit chip ID for this chip variant
+ * @work: an event handling worker
+ * @event_subscribers: event subscribers are listed here
+ * @startup_events: a copy of the first reading of the event registers
+ * @startup_events_read: whether the first events have been read
+ *
+ * This struct is PRIVATE and devices using it should NOT
+ * access ANY fields. It is used as a token for calling the
+ * AB3100 functions.
+ */
+struct ab3100 {
+ struct mutex access_mutex;
+ struct device *dev;
+ struct i2c_client *i2c_client;
+ struct i2c_client *testreg_client;
+ char chip_name[32];
+ u8 chip_id;
+ struct work_struct work;
+ struct blocking_notifier_head event_subscribers;
+ u32 startup_events;
+ bool startup_events_read;
+};
+
+int ab3100_set_register(struct ab3100 *ab3100, u8 reg, u8 regval);
+int ab3100_get_register(struct ab3100 *ab3100, u8 reg, u8 *regval);
+int ab3100_get_register_page(struct ab3100 *ab3100,
+ u8 first_reg, u8 *regvals, u8 numregs);
+int ab3100_mask_and_set_register(struct ab3100 *ab3100,
+ u8 reg, u8 andmask, u8 ormask);
+u8 ab3100_get_chip_type(struct ab3100 *ab3100);
+int ab3100_event_register(struct ab3100 *ab3100,
+ struct notifier_block *nb);
+int ab3100_event_unregister(struct ab3100 *ab3100,
+ struct notifier_block *nb);
+int ab3100_event_registers_startup_state_get(struct ab3100 *ab3100,
+ u32 *fatevent);
+
+#endif
diff --git a/include/linux/mfd/asic3.h b/include/linux/mfd/asic3.h
index 322cd6deb9f..de3c4ad19af 100644
--- a/include/linux/mfd/asic3.h
+++ b/include/linux/mfd/asic3.h
@@ -30,6 +30,13 @@ struct asic3_platform_data {
#define ASIC3_NUM_GPIOS 64
#define ASIC3_NR_IRQS ASIC3_NUM_GPIOS + 6
+#define ASIC3_IRQ_LED0 64
+#define ASIC3_IRQ_LED1 65
+#define ASIC3_IRQ_LED2 66
+#define ASIC3_IRQ_SPI 67
+#define ASIC3_IRQ_SMBUS 68
+#define ASIC3_IRQ_OWM 69
+
#define ASIC3_TO_GPIO(gpio) (NR_BUILTIN_GPIO + (gpio))
#define ASIC3_GPIO_BANK_A 0
@@ -227,8 +234,8 @@ struct asic3_platform_data {
/* Basic control of the SD ASIC */
-#define ASIC3_SDHWCTRL_Base 0x0E00
-#define ASIC3_SDHWCTRL_SDConf 0x00
+#define ASIC3_SDHWCTRL_BASE 0x0E00
+#define ASIC3_SDHWCTRL_SDCONF 0x00
#define ASIC3_SDHWCTRL_SUSPEND (1 << 0) /* 1=suspend all SD operations */
#define ASIC3_SDHWCTRL_CLKSEL (1 << 1) /* 1=SDICK, 0=HCLK */
@@ -242,10 +249,10 @@ struct asic3_platform_data {
/* SD card power supply ctrl 1=enable */
#define ASIC3_SDHWCTRL_SDPWR (1 << 6)
-#define ASIC3_EXTCF_Base 0x1100
+#define ASIC3_EXTCF_BASE 0x1100
-#define ASIC3_EXTCF_Select 0x00
-#define ASIC3_EXTCF_Reset 0x04
+#define ASIC3_EXTCF_SELECT 0x00
+#define ASIC3_EXTCF_RESET 0x04
#define ASIC3_EXTCF_SMOD0 (1 << 0) /* slot number of mode 0 */
#define ASIC3_EXTCF_SMOD1 (1 << 1) /* slot number of mode 1 */
@@ -279,222 +286,9 @@ struct asic3_platform_data {
* SDIO_CTRL Control registers for SDIO operations
*
*****************************************************************************/
-#define ASIC3_SD_CONFIG_Base 0x0400 /* Assumes 32 bit addressing */
-
-#define ASIC3_SD_CONFIG_Command 0x08 /* R/W: Command */
-
-/* [0:8] SD Control Register Base Address */
-#define ASIC3_SD_CONFIG_Addr0 0x20
-
-/* [9:31] SD Control Register Base Address */
-#define ASIC3_SD_CONFIG_Addr1 0x24
-
-/* R/O: interrupt assigned to pin */
-#define ASIC3_SD_CONFIG_IntPin 0x78
-
-/*
- * Set to 0x1f to clock SD controller, 0 otherwise.
- * At 0x82 - Gated Clock Ctrl
- */
-#define ASIC3_SD_CONFIG_ClkStop 0x80
-
-/* Control clock of SD controller */
-#define ASIC3_SD_CONFIG_ClockMode 0x84
-#define ASIC3_SD_CONFIG_SDHC_PinStatus 0x88 /* R/0: SD pins status */
-#define ASIC3_SD_CONFIG_SDHC_Power1 0x90 /* Power1 - manual pwr ctrl */
-
-/* auto power up after card inserted */
-#define ASIC3_SD_CONFIG_SDHC_Power2 0x92
-
-/* auto power down when card removed */
-#define ASIC3_SD_CONFIG_SDHC_Power3 0x94
-#define ASIC3_SD_CONFIG_SDHC_CardDetect 0x98
-#define ASIC3_SD_CONFIG_SDHC_Slot 0xA0 /* R/O: support slot number */
-#define ASIC3_SD_CONFIG_SDHC_ExtGateClk1 0x1E0 /* Not used */
-#define ASIC3_SD_CONFIG_SDHC_ExtGateClk2 0x1E2 /* Not used*/
-
-/* GPIO Output Reg. , at 0x1EA - GPIO Output Enable Reg. */
-#define ASIC3_SD_CONFIG_SDHC_GPIO_OutAndEnable 0x1E8
-#define ASIC3_SD_CONFIG_SDHC_GPIO_Status 0x1EC /* GPIO Status Reg. */
-
-/* Bit 1: double buffer/single buffer */
-#define ASIC3_SD_CONFIG_SDHC_ExtGateClk3 0x1F0
-
-/* Memory access enable (set to 1 to access SD Controller) */
-#define SD_CONFIG_COMMAND_MAE (1<<1)
-
-#define SD_CONFIG_CLK_ENABLE_ALL 0x1f
-
-#define SD_CONFIG_POWER1_PC_33V 0x0200 /* Set for 3.3 volts */
-#define SD_CONFIG_POWER1_PC_OFF 0x0000 /* Turn off power */
-
- /* two bits - number of cycles for card detection */
-#define SD_CONFIG_CARDDETECTMODE_CLK ((x) & 0x3)
-
-
-#define ASIC3_SD_CTRL_Base 0x1000
-
-#define ASIC3_SD_CTRL_Cmd 0x00
-#define ASIC3_SD_CTRL_Arg0 0x08
-#define ASIC3_SD_CTRL_Arg1 0x0C
-#define ASIC3_SD_CTRL_StopInternal 0x10
-#define ASIC3_SD_CTRL_TransferSectorCount 0x14
-#define ASIC3_SD_CTRL_Response0 0x18
-#define ASIC3_SD_CTRL_Response1 0x1C
-#define ASIC3_SD_CTRL_Response2 0x20
-#define ASIC3_SD_CTRL_Response3 0x24
-#define ASIC3_SD_CTRL_Response4 0x28
-#define ASIC3_SD_CTRL_Response5 0x2C
-#define ASIC3_SD_CTRL_Response6 0x30
-#define ASIC3_SD_CTRL_Response7 0x34
-#define ASIC3_SD_CTRL_CardStatus 0x38
-#define ASIC3_SD_CTRL_BufferCtrl 0x3C
-#define ASIC3_SD_CTRL_IntMaskCard 0x40
-#define ASIC3_SD_CTRL_IntMaskBuffer 0x44
-#define ASIC3_SD_CTRL_CardClockCtrl 0x48
-#define ASIC3_SD_CTRL_MemCardXferDataLen 0x4C
-#define ASIC3_SD_CTRL_MemCardOptionSetup 0x50
-#define ASIC3_SD_CTRL_ErrorStatus0 0x58
-#define ASIC3_SD_CTRL_ErrorStatus1 0x5C
-#define ASIC3_SD_CTRL_DataPort 0x60
-#define ASIC3_SD_CTRL_TransactionCtrl 0x68
-#define ASIC3_SD_CTRL_SoftwareReset 0x1C0
-
-#define SD_CTRL_SOFTWARE_RESET_CLEAR (1<<0)
-
-#define SD_CTRL_TRANSACTIONCONTROL_SET (1<<8)
-
-#define SD_CTRL_CARDCLOCKCONTROL_FOR_SD_CARD (1<<15)
-#define SD_CTRL_CARDCLOCKCONTROL_ENABLE_CLOCK (1<<8)
-#define SD_CTRL_CARDCLOCKCONTROL_CLK_DIV_512 (1<<7)
-#define SD_CTRL_CARDCLOCKCONTROL_CLK_DIV_256 (1<<6)
-#define SD_CTRL_CARDCLOCKCONTROL_CLK_DIV_128 (1<<5)
-#define SD_CTRL_CARDCLOCKCONTROL_CLK_DIV_64 (1<<4)
-#define SD_CTRL_CARDCLOCKCONTROL_CLK_DIV_32 (1<<3)
-#define SD_CTRL_CARDCLOCKCONTROL_CLK_DIV_16 (1<<2)
-#define SD_CTRL_CARDCLOCKCONTROL_CLK_DIV_8 (1<<1)
-#define SD_CTRL_CARDCLOCKCONTROL_CLK_DIV_4 (1<<0)
-#define SD_CTRL_CARDCLOCKCONTROL_CLK_DIV_2 (0<<0)
-
-#define MEM_CARD_OPTION_REQUIRED 0x000e
-#define MEM_CARD_OPTION_DATA_RESPONSE_TIMEOUT(x) (((x) & 0x0f) << 4)
-#define MEM_CARD_OPTION_C2_MODULE_NOT_PRESENT (1<<14)
-#define MEM_CARD_OPTION_DATA_XFR_WIDTH_1 (1<<15)
-#define MEM_CARD_OPTION_DATA_XFR_WIDTH_4 0
-
-#define SD_CTRL_COMMAND_INDEX(x) ((x) & 0x3f)
-#define SD_CTRL_COMMAND_TYPE_CMD (0 << 6)
-#define SD_CTRL_COMMAND_TYPE_ACMD (1 << 6)
-#define SD_CTRL_COMMAND_TYPE_AUTHENTICATION (2 << 6)
-#define SD_CTRL_COMMAND_RESPONSE_TYPE_NORMAL (0 << 8)
-#define SD_CTRL_COMMAND_RESPONSE_TYPE_EXT_R1 (4 << 8)
-#define SD_CTRL_COMMAND_RESPONSE_TYPE_EXT_R1B (5 << 8)
-#define SD_CTRL_COMMAND_RESPONSE_TYPE_EXT_R2 (6 << 8)
-#define SD_CTRL_COMMAND_RESPONSE_TYPE_EXT_R3 (7 << 8)
-#define SD_CTRL_COMMAND_DATA_PRESENT (1 << 11)
-#define SD_CTRL_COMMAND_TRANSFER_READ (1 << 12)
-#define SD_CTRL_COMMAND_TRANSFER_WRITE (0 << 12)
-#define SD_CTRL_COMMAND_MULTI_BLOCK (1 << 13)
-#define SD_CTRL_COMMAND_SECURITY_CMD (1 << 14)
-
-#define SD_CTRL_STOP_INTERNAL_ISSSUE_CMD12 (1 << 0)
-#define SD_CTRL_STOP_INTERNAL_AUTO_ISSUE_CMD12 (1 << 8)
-
-#define SD_CTRL_CARDSTATUS_RESPONSE_END (1 << 0)
-#define SD_CTRL_CARDSTATUS_RW_END (1 << 2)
-#define SD_CTRL_CARDSTATUS_CARD_REMOVED_0 (1 << 3)
-#define SD_CTRL_CARDSTATUS_CARD_INSERTED_0 (1 << 4)
-#define SD_CTRL_CARDSTATUS_SIGNAL_STATE_PRESENT_0 (1 << 5)
-#define SD_CTRL_CARDSTATUS_WRITE_PROTECT (1 << 7)
-#define SD_CTRL_CARDSTATUS_CARD_REMOVED_3 (1 << 8)
-#define SD_CTRL_CARDSTATUS_CARD_INSERTED_3 (1 << 9)
-#define SD_CTRL_CARDSTATUS_SIGNAL_STATE_PRESENT_3 (1 << 10)
-
-#define SD_CTRL_BUFFERSTATUS_CMD_INDEX_ERROR (1 << 0)
-#define SD_CTRL_BUFFERSTATUS_CRC_ERROR (1 << 1)
-#define SD_CTRL_BUFFERSTATUS_STOP_BIT_END_ERROR (1 << 2)
-#define SD_CTRL_BUFFERSTATUS_DATA_TIMEOUT (1 << 3)
-#define SD_CTRL_BUFFERSTATUS_BUFFER_OVERFLOW (1 << 4)
-#define SD_CTRL_BUFFERSTATUS_BUFFER_UNDERFLOW (1 << 5)
-#define SD_CTRL_BUFFERSTATUS_CMD_TIMEOUT (1 << 6)
-#define SD_CTRL_BUFFERSTATUS_UNK7 (1 << 7)
-#define SD_CTRL_BUFFERSTATUS_BUFFER_READ_ENABLE (1 << 8)
-#define SD_CTRL_BUFFERSTATUS_BUFFER_WRITE_ENABLE (1 << 9)
-#define SD_CTRL_BUFFERSTATUS_ILLEGAL_FUNCTION (1 << 13)
-#define SD_CTRL_BUFFERSTATUS_CMD_BUSY (1 << 14)
-#define SD_CTRL_BUFFERSTATUS_ILLEGAL_ACCESS (1 << 15)
-
-#define SD_CTRL_INTMASKCARD_RESPONSE_END (1 << 0)
-#define SD_CTRL_INTMASKCARD_RW_END (1 << 2)
-#define SD_CTRL_INTMASKCARD_CARD_REMOVED_0 (1 << 3)
-#define SD_CTRL_INTMASKCARD_CARD_INSERTED_0 (1 << 4)
-#define SD_CTRL_INTMASKCARD_SIGNAL_STATE_PRESENT_0 (1 << 5)
-#define SD_CTRL_INTMASKCARD_UNK6 (1 << 6)
-#define SD_CTRL_INTMASKCARD_WRITE_PROTECT (1 << 7)
-#define SD_CTRL_INTMASKCARD_CARD_REMOVED_3 (1 << 8)
-#define SD_CTRL_INTMASKCARD_CARD_INSERTED_3 (1 << 9)
-#define SD_CTRL_INTMASKCARD_SIGNAL_STATE_PRESENT_3 (1 << 10)
-
-#define SD_CTRL_INTMASKBUFFER_CMD_INDEX_ERROR (1 << 0)
-#define SD_CTRL_INTMASKBUFFER_CRC_ERROR (1 << 1)
-#define SD_CTRL_INTMASKBUFFER_STOP_BIT_END_ERROR (1 << 2)
-#define SD_CTRL_INTMASKBUFFER_DATA_TIMEOUT (1 << 3)
-#define SD_CTRL_INTMASKBUFFER_BUFFER_OVERFLOW (1 << 4)
-#define SD_CTRL_INTMASKBUFFER_BUFFER_UNDERFLOW (1 << 5)
-#define SD_CTRL_INTMASKBUFFER_CMD_TIMEOUT (1 << 6)
-#define SD_CTRL_INTMASKBUFFER_UNK7 (1 << 7)
-#define SD_CTRL_INTMASKBUFFER_BUFFER_READ_ENABLE (1 << 8)
-#define SD_CTRL_INTMASKBUFFER_BUFFER_WRITE_ENABLE (1 << 9)
-#define SD_CTRL_INTMASKBUFFER_ILLEGAL_FUNCTION (1 << 13)
-#define SD_CTRL_INTMASKBUFFER_CMD_BUSY (1 << 14)
-#define SD_CTRL_INTMASKBUFFER_ILLEGAL_ACCESS (1 << 15)
-
-#define SD_CTRL_DETAIL0_RESPONSE_CMD_ERROR (1 << 0)
-#define SD_CTRL_DETAIL0_END_BIT_ERROR_FOR_RESPONSE_NON_CMD12 (1 << 2)
-#define SD_CTRL_DETAIL0_END_BIT_ERROR_FOR_RESPONSE_CMD12 (1 << 3)
-#define SD_CTRL_DETAIL0_END_BIT_ERROR_FOR_READ_DATA (1 << 4)
-#define SD_CTRL_DETAIL0_END_BIT_ERROR_FOR_WRITE_CRC_STATUS (1 << 5)
-#define SD_CTRL_DETAIL0_CRC_ERROR_FOR_RESPONSE_NON_CMD12 (1 << 8)
-#define SD_CTRL_DETAIL0_CRC_ERROR_FOR_RESPONSE_CMD12 (1 << 9)
-#define SD_CTRL_DETAIL0_CRC_ERROR_FOR_READ_DATA (1 << 10)
-#define SD_CTRL_DETAIL0_CRC_ERROR_FOR_WRITE_CMD (1 << 11)
-
-#define SD_CTRL_DETAIL1_NO_CMD_RESPONSE (1 << 0)
-#define SD_CTRL_DETAIL1_TIMEOUT_READ_DATA (1 << 4)
-#define SD_CTRL_DETAIL1_TIMEOUT_CRS_STATUS (1 << 5)
-#define SD_CTRL_DETAIL1_TIMEOUT_CRC_BUSY (1 << 6)
-
-#define ASIC3_SDIO_CTRL_Base 0x1200
-
-#define ASIC3_SDIO_CTRL_Cmd 0x00
-#define ASIC3_SDIO_CTRL_CardPortSel 0x04
-#define ASIC3_SDIO_CTRL_Arg0 0x08
-#define ASIC3_SDIO_CTRL_Arg1 0x0C
-#define ASIC3_SDIO_CTRL_TransferBlockCount 0x14
-#define ASIC3_SDIO_CTRL_Response0 0x18
-#define ASIC3_SDIO_CTRL_Response1 0x1C
-#define ASIC3_SDIO_CTRL_Response2 0x20
-#define ASIC3_SDIO_CTRL_Response3 0x24
-#define ASIC3_SDIO_CTRL_Response4 0x28
-#define ASIC3_SDIO_CTRL_Response5 0x2C
-#define ASIC3_SDIO_CTRL_Response6 0x30
-#define ASIC3_SDIO_CTRL_Response7 0x34
-#define ASIC3_SDIO_CTRL_CardStatus 0x38
-#define ASIC3_SDIO_CTRL_BufferCtrl 0x3C
-#define ASIC3_SDIO_CTRL_IntMaskCard 0x40
-#define ASIC3_SDIO_CTRL_IntMaskBuffer 0x44
-#define ASIC3_SDIO_CTRL_CardXferDataLen 0x4C
-#define ASIC3_SDIO_CTRL_CardOptionSetup 0x50
-#define ASIC3_SDIO_CTRL_ErrorStatus0 0x54
-#define ASIC3_SDIO_CTRL_ErrorStatus1 0x58
-#define ASIC3_SDIO_CTRL_DataPort 0x60
-#define ASIC3_SDIO_CTRL_TransactionCtrl 0x68
-#define ASIC3_SDIO_CTRL_CardIntCtrl 0x6C
-#define ASIC3_SDIO_CTRL_ClocknWaitCtrl 0x70
-#define ASIC3_SDIO_CTRL_HostInformation 0x74
-#define ASIC3_SDIO_CTRL_ErrorCtrl 0x78
-#define ASIC3_SDIO_CTRL_LEDCtrl 0x7C
-#define ASIC3_SDIO_CTRL_SoftwareReset 0x1C0
+#define ASIC3_SD_CONFIG_BASE 0x0400 /* Assumes 32 bit addressing */
+#define ASIC3_SD_CTRL_BASE 0x1000
+#define ASIC3_SDIO_CTRL_BASE 0x1200
#define ASIC3_MAP_SIZE_32BIT 0x2000
#define ASIC3_MAP_SIZE_16BIT 0x1000
diff --git a/include/linux/mfd/ds1wm.h b/include/linux/mfd/ds1wm.h
new file mode 100644
index 00000000000..be469a357cb
--- /dev/null
+++ b/include/linux/mfd/ds1wm.h
@@ -0,0 +1,6 @@
+/* MFD cell driver data for the DS1WM driver */
+
+struct ds1wm_driver_data {
+ int active_high;
+ int clock_rate;
+};
diff --git a/include/linux/mfd/ezx-pcap.h b/include/linux/mfd/ezx-pcap.h
new file mode 100644
index 00000000000..c12c3c0932b
--- /dev/null
+++ b/include/linux/mfd/ezx-pcap.h
@@ -0,0 +1,256 @@
+/*
+ * Copyright 2009 Daniel Ribeiro <drwyrm@gmail.com>
+ *
+ * For further information, please see http://wiki.openezx.org/PCAP2
+ */
+
+#ifndef EZX_PCAP_H
+#define EZX_PCAP_H
+
+struct pcap_subdev {
+ int id;
+ const char *name;
+ void *platform_data;
+};
+
+struct pcap_platform_data {
+ unsigned int irq_base;
+ unsigned int config;
+ void (*init) (void *); /* board specific init */
+ int num_subdevs;
+ struct pcap_subdev *subdevs;
+};
+
+struct pcap_chip;
+
+int ezx_pcap_write(struct pcap_chip *, u8, u32);
+int ezx_pcap_read(struct pcap_chip *, u8, u32 *);
+int pcap_to_irq(struct pcap_chip *, int);
+int pcap_adc_async(struct pcap_chip *, u8, u32, u8[], void *, void *);
+int pcap_adc_sync(struct pcap_chip *, u8, u32, u8[], u16[]);
+
+#define PCAP_SECOND_PORT 1
+#define PCAP_CS_AH 2
+
+#define PCAP_REGISTER_WRITE_OP_BIT 0x80000000
+#define PCAP_REGISTER_READ_OP_BIT 0x00000000
+
+#define PCAP_REGISTER_VALUE_MASK 0x01ffffff
+#define PCAP_REGISTER_ADDRESS_MASK 0x7c000000
+#define PCAP_REGISTER_ADDRESS_SHIFT 26
+#define PCAP_REGISTER_NUMBER 32
+#define PCAP_CLEAR_INTERRUPT_REGISTER 0x01ffffff
+#define PCAP_MASK_ALL_INTERRUPT 0x01ffffff
+
+/* registers acessible by both pcap ports */
+#define PCAP_REG_ISR 0x0 /* Interrupt Status */
+#define PCAP_REG_MSR 0x1 /* Interrupt Mask */
+#define PCAP_REG_PSTAT 0x2 /* Processor Status */
+#define PCAP_REG_VREG2 0x6 /* Regulator Bank 2 Control */
+#define PCAP_REG_AUXVREG 0x7 /* Auxiliary Regulator Control */
+#define PCAP_REG_BATT 0x8 /* Battery Control */
+#define PCAP_REG_ADC 0x9 /* AD Control */
+#define PCAP_REG_ADR 0xa /* AD Result */
+#define PCAP_REG_CODEC 0xb /* Audio Codec Control */
+#define PCAP_REG_RX_AMPS 0xc /* RX Audio Amplifiers Control */
+#define PCAP_REG_ST_DAC 0xd /* Stereo DAC Control */
+#define PCAP_REG_BUSCTRL 0x14 /* Connectivity Control */
+#define PCAP_REG_PERIPH 0x15 /* Peripheral Control */
+#define PCAP_REG_LOWPWR 0x18 /* Regulator Low Power Control */
+#define PCAP_REG_TX_AMPS 0x1a /* TX Audio Amplifiers Control */
+#define PCAP_REG_GP 0x1b /* General Purpose */
+#define PCAP_REG_TEST1 0x1c
+#define PCAP_REG_TEST2 0x1d
+#define PCAP_REG_VENDOR_TEST1 0x1e
+#define PCAP_REG_VENDOR_TEST2 0x1f
+
+/* registers acessible by pcap port 1 only (a1200, e2 & e6) */
+#define PCAP_REG_INT_SEL 0x3 /* Interrupt Select */
+#define PCAP_REG_SWCTRL 0x4 /* Switching Regulator Control */
+#define PCAP_REG_VREG1 0x5 /* Regulator Bank 1 Control */
+#define PCAP_REG_RTC_TOD 0xe /* RTC Time of Day */
+#define PCAP_REG_RTC_TODA 0xf /* RTC Time of Day Alarm */
+#define PCAP_REG_RTC_DAY 0x10 /* RTC Day */
+#define PCAP_REG_RTC_DAYA 0x11 /* RTC Day Alarm */
+#define PCAP_REG_MTRTMR 0x12 /* AD Monitor Timer */
+#define PCAP_REG_PWR 0x13 /* Power Control */
+#define PCAP_REG_AUXVREG_MASK 0x16 /* Auxiliary Regulator Mask */
+#define PCAP_REG_VENDOR_REV 0x17
+#define PCAP_REG_PERIPH_MASK 0x19 /* Peripheral Mask */
+
+/* PCAP2 Interrupts */
+#define PCAP_NIRQS 23
+#define PCAP_IRQ_ADCDONE 0 /* ADC done port 1 */
+#define PCAP_IRQ_TS 1 /* Touch Screen */
+#define PCAP_IRQ_1HZ 2 /* 1HZ timer */
+#define PCAP_IRQ_WH 3 /* ADC above high limit */
+#define PCAP_IRQ_WL 4 /* ADC below low limit */
+#define PCAP_IRQ_TODA 5 /* Time of day alarm */
+#define PCAP_IRQ_USB4V 6 /* USB above 4V */
+#define PCAP_IRQ_ONOFF 7 /* On/Off button */
+#define PCAP_IRQ_ONOFF2 8 /* On/Off button 2 */
+#define PCAP_IRQ_USB1V 9 /* USB above 1V */
+#define PCAP_IRQ_MOBPORT 10
+#define PCAP_IRQ_MIC 11 /* Mic attach/HS button */
+#define PCAP_IRQ_HS 12 /* Headset attach */
+#define PCAP_IRQ_ST 13
+#define PCAP_IRQ_PC 14 /* Power Cut */
+#define PCAP_IRQ_WARM 15
+#define PCAP_IRQ_EOL 16 /* Battery End Of Life */
+#define PCAP_IRQ_CLK 17
+#define PCAP_IRQ_SYSRST 18 /* System Reset */
+#define PCAP_IRQ_DUMMY 19
+#define PCAP_IRQ_ADCDONE2 20 /* ADC done port 2 */
+#define PCAP_IRQ_SOFTRESET 21
+#define PCAP_IRQ_MNEXB 22
+
+/* voltage regulators */
+#define V1 0
+#define V2 1
+#define V3 2
+#define V4 3
+#define V5 4
+#define V6 5
+#define V7 6
+#define V8 7
+#define V9 8
+#define V10 9
+#define VAUX1 10
+#define VAUX2 11
+#define VAUX3 12
+#define VAUX4 13
+#define VSIM 14
+#define VSIM2 15
+#define VVIB 16
+#define SW1 17
+#define SW2 18
+#define SW3 19
+#define SW1S 20
+#define SW2S 21
+
+#define PCAP_BATT_DAC_MASK 0x000000ff
+#define PCAP_BATT_DAC_SHIFT 0
+#define PCAP_BATT_B_FDBK (1 << 8)
+#define PCAP_BATT_EXT_ISENSE (1 << 9)
+#define PCAP_BATT_V_COIN_MASK 0x00003c00
+#define PCAP_BATT_V_COIN_SHIFT 10
+#define PCAP_BATT_I_COIN (1 << 14)
+#define PCAP_BATT_COIN_CH_EN (1 << 15)
+#define PCAP_BATT_EOL_SEL_MASK 0x000e0000
+#define PCAP_BATT_EOL_SEL_SHIFT 17
+#define PCAP_BATT_EOL_CMP_EN (1 << 20)
+#define PCAP_BATT_BATT_DET_EN (1 << 21)
+#define PCAP_BATT_THERMBIAS_CTRL (1 << 22)
+
+#define PCAP_ADC_ADEN (1 << 0)
+#define PCAP_ADC_RAND (1 << 1)
+#define PCAP_ADC_AD_SEL1 (1 << 2)
+#define PCAP_ADC_AD_SEL2 (1 << 3)
+#define PCAP_ADC_ADA1_MASK 0x00000070
+#define PCAP_ADC_ADA1_SHIFT 4
+#define PCAP_ADC_ADA2_MASK 0x00000380
+#define PCAP_ADC_ADA2_SHIFT 7
+#define PCAP_ADC_ATO_MASK 0x00003c00
+#define PCAP_ADC_ATO_SHIFT 10
+#define PCAP_ADC_ATOX (1 << 14)
+#define PCAP_ADC_MTR1 (1 << 15)
+#define PCAP_ADC_MTR2 (1 << 16)
+#define PCAP_ADC_TS_M_MASK 0x000e0000
+#define PCAP_ADC_TS_M_SHIFT 17
+#define PCAP_ADC_TS_REF_LOWPWR (1 << 20)
+#define PCAP_ADC_TS_REFENB (1 << 21)
+#define PCAP_ADC_BATT_I_POLARITY (1 << 22)
+#define PCAP_ADC_BATT_I_ADC (1 << 23)
+
+#define PCAP_ADC_BANK_0 0
+#define PCAP_ADC_BANK_1 1
+/* ADC bank 0 */
+#define PCAP_ADC_CH_COIN 0
+#define PCAP_ADC_CH_BATT 1
+#define PCAP_ADC_CH_BPLUS 2
+#define PCAP_ADC_CH_MOBPORTB 3
+#define PCAP_ADC_CH_TEMPERATURE 4
+#define PCAP_ADC_CH_CHARGER_ID 5
+#define PCAP_ADC_CH_AD6 6
+/* ADC bank 1 */
+#define PCAP_ADC_CH_AD7 0
+#define PCAP_ADC_CH_AD8 1
+#define PCAP_ADC_CH_AD9 2
+#define PCAP_ADC_CH_TS_X1 3
+#define PCAP_ADC_CH_TS_X2 4
+#define PCAP_ADC_CH_TS_Y1 5
+#define PCAP_ADC_CH_TS_Y2 6
+
+#define PCAP_ADC_T_NOW 0
+#define PCAP_ADC_T_IN_BURST 1
+#define PCAP_ADC_T_OUT_BURST 2
+
+#define PCAP_ADC_ATO_IN_BURST 6
+#define PCAP_ADC_ATO_OUT_BURST 0
+
+#define PCAP_ADC_TS_M_XY 1
+#define PCAP_ADC_TS_M_PRESSURE 2
+#define PCAP_ADC_TS_M_PLATE_X 3
+#define PCAP_ADC_TS_M_PLATE_Y 4
+#define PCAP_ADC_TS_M_STANDBY 5
+#define PCAP_ADC_TS_M_NONTS 6
+
+#define PCAP_ADR_ADD1_MASK 0x000003ff
+#define PCAP_ADR_ADD1_SHIFT 0
+#define PCAP_ADR_ADD2_MASK 0x000ffc00
+#define PCAP_ADR_ADD2_SHIFT 10
+#define PCAP_ADR_ADINC1 (1 << 20)
+#define PCAP_ADR_ADINC2 (1 << 21)
+#define PCAP_ADR_ASC (1 << 22)
+#define PCAP_ADR_ONESHOT (1 << 23)
+
+#define PCAP_BUSCTRL_FSENB (1 << 0)
+#define PCAP_BUSCTRL_USB_SUSPEND (1 << 1)
+#define PCAP_BUSCTRL_USB_PU (1 << 2)
+#define PCAP_BUSCTRL_USB_PD (1 << 3)
+#define PCAP_BUSCTRL_VUSB_EN (1 << 4)
+#define PCAP_BUSCTRL_USB_PS (1 << 5)
+#define PCAP_BUSCTRL_VUSB_MSTR_EN (1 << 6)
+#define PCAP_BUSCTRL_VBUS_PD_ENB (1 << 7)
+#define PCAP_BUSCTRL_CURRLIM (1 << 8)
+#define PCAP_BUSCTRL_RS232ENB (1 << 9)
+#define PCAP_BUSCTRL_RS232_DIR (1 << 10)
+#define PCAP_BUSCTRL_SE0_CONN (1 << 11)
+#define PCAP_BUSCTRL_USB_PDM (1 << 12)
+#define PCAP_BUSCTRL_BUS_PRI_ADJ (1 << 24)
+
+/* leds */
+#define PCAP_LED0 0
+#define PCAP_LED1 1
+#define PCAP_BL0 2
+#define PCAP_BL1 3
+#define PCAP_VIB 4
+#define PCAP_LED_3MA 0
+#define PCAP_LED_4MA 1
+#define PCAP_LED_5MA 2
+#define PCAP_LED_9MA 3
+#define PCAP_LED_GPIO_VAL_MASK 0x00ffffff
+#define PCAP_LED_GPIO_EN 0x01000000
+#define PCAP_LED_GPIO_INVERT 0x02000000
+#define PCAP_LED_T_MASK 0xf
+#define PCAP_LED_C_MASK 0x3
+#define PCAP_BL_MASK 0x1f
+#define PCAP_BL0_SHIFT 0
+#define PCAP_LED0_EN (1 << 5)
+#define PCAP_LED1_EN (1 << 6)
+#define PCAP_LED0_T_SHIFT 7
+#define PCAP_LED1_T_SHIFT 11
+#define PCAP_LED0_C_SHIFT 15
+#define PCAP_LED1_C_SHIFT 17
+#define PCAP_BL1_SHIFT 20
+#define PCAP_VIB_MASK 0x3
+#define PCAP_VIB_SHIFT 20
+#define PCAP_VIB_EN (1 << 19)
+
+/* RTC */
+#define PCAP_RTC_DAY_MASK 0x3fff
+#define PCAP_RTC_TOD_MASK 0xffff
+#define PCAP_RTC_PC_MASK 0x7
+#define SEC_PER_DAY 86400
+
+#endif
diff --git a/include/linux/mfd/htc-pasic3.h b/include/linux/mfd/htc-pasic3.h
index b4294f12c4f..3d3ed67bd96 100644
--- a/include/linux/mfd/htc-pasic3.h
+++ b/include/linux/mfd/htc-pasic3.h
@@ -48,7 +48,6 @@ struct pasic3_leds_machinfo {
struct pasic3_platform_data {
struct pasic3_leds_machinfo *led_pdata;
- unsigned int bus_shift;
unsigned int clock_rate;
};
diff --git a/include/linux/mfd/pcf50633/core.h b/include/linux/mfd/pcf50633/core.h
index 4455b212d75..c8f51c3c0a7 100644
--- a/include/linux/mfd/pcf50633/core.h
+++ b/include/linux/mfd/pcf50633/core.h
@@ -29,6 +29,8 @@ struct pcf50633_platform_data {
char **batteries;
int num_batteries;
+ int charging_restart_interval;
+
/* Callbacks */
void (*probe_done)(struct pcf50633 *);
void (*mbc_event_callback)(struct pcf50633 *, int);
diff --git a/include/linux/mfd/pcf50633/mbc.h b/include/linux/mfd/pcf50633/mbc.h
index 6e17619b773..4119579acf2 100644
--- a/include/linux/mfd/pcf50633/mbc.h
+++ b/include/linux/mfd/pcf50633/mbc.h
@@ -128,7 +128,6 @@ enum pcf50633_reg_mbcs3 {
int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma);
int pcf50633_mbc_get_status(struct pcf50633 *);
-void pcf50633_mbc_set_status(struct pcf50633 *, int what, int status);
#endif
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 516d955ab8a..6b9c5d06690 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -19,6 +19,13 @@
} while (0)
/*
+ * data for the MMC controller
+ */
+struct tmio_mmc_data {
+ const unsigned int hclk;
+};
+
+/*
* data for the NAND controller
*/
struct tmio_nand_data {
diff --git a/include/linux/mfd/wm8350/audio.h b/include/linux/mfd/wm8350/audio.h
index af95a1d2f3a..d899dc0223b 100644
--- a/include/linux/mfd/wm8350/audio.h
+++ b/include/linux/mfd/wm8350/audio.h
@@ -490,6 +490,7 @@
/*
* R231 (0xE7) - Jack Status
*/
+#define WM8350_JACK_L_LVL 0x0800
#define WM8350_JACK_R_LVL 0x0400
/*
diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h
index 980669d50dc..42cca672f34 100644
--- a/include/linux/mfd/wm8350/core.h
+++ b/include/linux/mfd/wm8350/core.h
@@ -640,9 +640,11 @@ struct wm8350 {
*
* @init: Function called during driver initialisation. Should be
* used by the platform to configure GPIO functions and similar.
+ * @irq_high: Set if WM8350 IRQ is active high.
*/
struct wm8350_platform_data {
int (*init)(struct wm8350 *wm8350);
+ int irq_high;
};
diff --git a/include/linux/mfd/wm8400-audio.h b/include/linux/mfd/wm8400-audio.h
index b6640e01804..e06ed3eb1d0 100644
--- a/include/linux/mfd/wm8400-audio.h
+++ b/include/linux/mfd/wm8400-audio.h
@@ -1181,6 +1181,7 @@
#define WM8400_FLL_OUTDIV_SHIFT 0 /* FLL_OUTDIV - [2:0] */
#define WM8400_FLL_OUTDIV_WIDTH 3 /* FLL_OUTDIV - [2:0] */
+struct wm8400;
void wm8400_reset_codec_reg_cache(struct wm8400 *wm8400);
#endif
diff --git a/include/linux/mg_disk.h b/include/linux/mg_disk.h
new file mode 100644
index 00000000000..e11f4d9f1c2
--- /dev/null
+++ b/include/linux/mg_disk.h
@@ -0,0 +1,45 @@
+/*
+ * include/linux/mg_disk.c
+ *
+ * Private data for mflash platform driver
+ *
+ * (c) 2008 mGine Co.,LTD
+ * (c) 2008 unsik Kim <donari75@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MG_DISK_H__
+#define __MG_DISK_H__
+
+/* name for platform device */
+#define MG_DEV_NAME "mg_disk"
+
+/* names of GPIO resource */
+#define MG_RST_PIN "mg_rst"
+/* except MG_BOOT_DEV, reset-out pin should be assigned */
+#define MG_RSTOUT_PIN "mg_rstout"
+
+/* device attribution */
+/* use mflash as boot device */
+#define MG_BOOT_DEV (1 << 0)
+/* use mflash as storage device */
+#define MG_STORAGE_DEV (1 << 1)
+/* same as MG_STORAGE_DEV, but bootloader already done reset sequence */
+#define MG_STORAGE_DEV_SKIP_RST (1 << 2)
+
+/* private driver data */
+struct mg_drv_data {
+ /* disk resource */
+ u32 use_polling;
+
+ /* device attribution */
+ u32 dev_attr;
+
+ /* internally used */
+ void *host;
+};
+
+#endif
diff --git a/include/linux/mii.h b/include/linux/mii.h
index ad748588faf..359fba88027 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -240,6 +240,22 @@ static inline unsigned int mii_duplex (unsigned int duplex_lock,
}
/**
+ * mii_advertise_flowctrl - get flow control advertisement flags
+ * @cap: Flow control capabilities (FLOW_CTRL_RX, FLOW_CTRL_TX or both)
+ */
+static inline u16 mii_advertise_flowctrl(int cap)
+{
+ u16 adv = 0;
+
+ if (cap & FLOW_CTRL_RX)
+ adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+ if (cap & FLOW_CTRL_TX)
+ adv ^= ADVERTISE_PAUSE_ASYM;
+
+ return adv;
+}
+
+/**
* mii_resolve_flowctrl_fdx
* @lcladv: value of MII ADVERTISE register
* @rmtadv: value of MII LPA register
@@ -250,18 +266,12 @@ static inline u8 mii_resolve_flowctrl_fdx(u16 lcladv, u16 rmtadv)
{
u8 cap = 0;
- if (lcladv & ADVERTISE_PAUSE_CAP) {
- if (lcladv & ADVERTISE_PAUSE_ASYM) {
- if (rmtadv & LPA_PAUSE_CAP)
- cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
- else if (rmtadv & LPA_PAUSE_ASYM)
- cap = FLOW_CTRL_RX;
- } else {
- if (rmtadv & LPA_PAUSE_CAP)
- cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
- }
- } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
- if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
+ if (lcladv & rmtadv & ADVERTISE_PAUSE_CAP) {
+ cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
+ } else if (lcladv & rmtadv & ADVERTISE_PAUSE_ASYM) {
+ if (lcladv & ADVERTISE_PAUSE_CAP)
+ cap = FLOW_CTRL_RX;
+ else if (rmtadv & ADVERTISE_PAUSE_CAP)
cap = FLOW_CTRL_TX;
}
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index a820f816a49..05211774462 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -26,6 +26,7 @@
#define TUN_MINOR 200
#define MWAVE_MINOR 219 /* ACP/Mwave Modem */
#define MPT_MINOR 220
+#define MPT2SAS_MINOR 221
#define HPET_MINOR 228
#define FUSE_MINOR 229
#define KVM_MINOR 232
@@ -40,6 +41,7 @@ struct miscdevice {
struct list_head list;
struct device *parent;
struct device *this_device;
+ const char *devnode;
};
extern int misc_register(struct miscdevice * misc);
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index cf9c679ab38..0f82293a82e 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -55,6 +55,7 @@ enum {
MLX4_CMD_CLOSE_PORT = 0xa,
MLX4_CMD_QUERY_HCA = 0xb,
MLX4_CMD_QUERY_PORT = 0x43,
+ MLX4_CMD_SENSE_PORT = 0x4d,
MLX4_CMD_SET_PORT = 0xc,
MLX4_CMD_ACCESS_DDR = 0x2e,
MLX4_CMD_MAP_ICM = 0xffa,
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 8f659cc2996..ce7cc6c7bcb 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -155,8 +155,9 @@ enum mlx4_qp_region {
};
enum mlx4_port_type {
- MLX4_PORT_TYPE_IB = 1 << 0,
- MLX4_PORT_TYPE_ETH = 1 << 1,
+ MLX4_PORT_TYPE_IB = 1,
+ MLX4_PORT_TYPE_ETH = 2,
+ MLX4_PORT_TYPE_AUTO = 3
};
enum mlx4_special_vlan_idx {
@@ -209,6 +210,7 @@ struct mlx4_caps {
int num_comp_vectors;
int num_mpts;
int num_mtt_segs;
+ int mtts_per_seg;
int fmr_reserved_mtts;
int reserved_mtts;
int reserved_mrws;
@@ -237,6 +239,7 @@ struct mlx4_caps {
enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
u8 supported_type[MLX4_MAX_PORTS + 1];
u32 port_mask;
+ enum mlx4_port_type possible_type[MLX4_MAX_PORTS + 1];
};
struct mlx4_buf_list {
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index bf8f11982da..9f29d86e5dc 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -165,6 +165,7 @@ enum {
MLX4_WQE_CTRL_IP_CSUM = 1 << 4,
MLX4_WQE_CTRL_TCP_UDP_CSUM = 1 << 5,
MLX4_WQE_CTRL_INS_VLAN = 1 << 6,
+ MLX4_WQE_CTRL_STRONG_ORDER = 1 << 7,
};
struct mlx4_wqe_ctrl_seg {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 065cdf8c09f..d006e93d5c9 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -7,7 +7,6 @@
#include <linux/gfp.h>
#include <linux/list.h>
-#include <linux/mmdebug.h>
#include <linux/mmzone.h>
#include <linux/rbtree.h>
#include <linux/prio_tree.h>
@@ -19,6 +18,7 @@ struct anon_vma;
struct file_ra_state;
struct user_struct;
struct writeback_control;
+struct rlimit;
#ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */
extern unsigned long max_mapnr;
@@ -104,6 +104,7 @@ extern unsigned int kobjsize(const void *objp);
#define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
#define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
+#define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
@@ -134,6 +135,7 @@ extern pgprot_t protection_map[16];
#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */
#define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */
+#define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */
/*
* This interface is used by x86 PAT code to identify a pfn mapping that is
@@ -145,7 +147,7 @@ extern pgprot_t protection_map[16];
*/
static inline int is_linear_pfn_mapping(struct vm_area_struct *vma)
{
- return ((vma->vm_flags & VM_PFNMAP) && vma->vm_pgoff);
+ return (vma->vm_flags & VM_PFN_AT_MMAP);
}
static inline int is_pfn_mapping(struct vm_area_struct *vma)
@@ -186,7 +188,7 @@ struct vm_operations_struct {
/* notification that a previously read-only page is about to become
* writable, if an error is returned it will cause a SIGBUS */
- int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page);
+ int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
/* called by access_process_vm when get_user_pages() fails, typically
* for use by special VMAs that can switch between memory and hardware
@@ -578,12 +580,10 @@ static inline void set_page_links(struct page *page, enum zone_type zone,
*/
static inline unsigned long round_hint_to_min(unsigned long hint)
{
-#ifdef CONFIG_SECURITY
hint &= PAGE_MASK;
if (((void *)hint != NULL) &&
(hint < mmap_min_addr))
return PAGE_ALIGN(mmap_min_addr);
-#endif
return hint;
}
@@ -724,7 +724,7 @@ static inline int shmem_lock(struct file *file, int lock,
return 0;
}
#endif
-struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags);
+struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags);
int shmem_zero_setup(struct vm_area_struct *);
@@ -792,6 +792,8 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma);
void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen, int even_cows);
+int follow_pfn(struct vm_area_struct *vma, unsigned long address,
+ unsigned long *pfn);
int follow_phys(struct vm_area_struct *vma, unsigned long address,
unsigned int flags, unsigned long *prot, resource_size_t *phys);
int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
@@ -808,11 +810,11 @@ extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end);
#ifdef CONFIG_MMU
extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long address, int write_access);
+ unsigned long address, unsigned int flags);
#else
static inline int handle_mm_fault(struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long address,
- int write_access)
+ unsigned int flags)
{
/* should never happen if there's no MMU */
BUG();
@@ -823,8 +825,11 @@ static inline int handle_mm_fault(struct mm_struct *mm,
extern int make_pages_present(unsigned long addr, unsigned long end);
extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
-int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start,
- int len, int write, int force, struct page **pages, struct vm_area_struct **vmas);
+int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, int len, int write, int force,
+ struct page **pages, struct vm_area_struct **vmas);
+int get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ struct page **pages);
extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
extern void do_invalidatepage(struct page *page, unsigned long offset);
@@ -833,6 +838,7 @@ int __set_page_dirty_nobuffers(struct page *page);
int __set_page_dirty_no_writeback(struct page *page);
int redirty_page_for_writepage(struct writeback_control *wbc,
struct page *page);
+void account_page_dirtied(struct page *page, struct address_space *mapping);
int set_page_dirty(struct page *page);
int set_page_dirty_lock(struct page *page);
int clear_page_dirty_for_io(struct page *page);
@@ -848,17 +854,10 @@ extern int mprotect_fixup(struct vm_area_struct *vma,
unsigned long end, unsigned long newflags);
/*
- * get_user_pages_fast provides equivalent functionality to get_user_pages,
- * operating on current and current->mm (force=0 and doesn't return any vmas).
- *
- * get_user_pages_fast may take mmap_sem and page tables, so no assumptions
- * can be made about locking. get_user_pages_fast is to be implemented in a
- * way that is advantageous (vs get_user_pages()) when the user memory area is
- * already faulted in and present in ptes. However if the pages have to be
- * faulted in, it may turn out to be slightly slower).
+ * doesn't attempt to fault and will return short.
*/
-int get_user_pages_fast(unsigned long start, int nr_pages, int write,
- struct page **pages);
+int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ struct page **pages);
/*
* A callback you can register to apply pressure to ageable caches.
@@ -1028,8 +1027,6 @@ extern void add_active_range(unsigned int nid, unsigned long start_pfn,
unsigned long end_pfn);
extern void remove_active_range(unsigned int nid, unsigned long start_pfn,
unsigned long end_pfn);
-extern void push_node_boundaries(unsigned int nid, unsigned long start_pfn,
- unsigned long end_pfn);
extern void remove_all_active_ranges(void);
extern unsigned long absent_pages_in_range(unsigned long start_pfn,
unsigned long end_pfn);
@@ -1061,7 +1058,8 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn);
extern void set_dma_reserve(unsigned long new_dma_reserve);
extern void memmap_init_zone(unsigned long, int, unsigned long,
unsigned long, enum memmap_context);
-extern void setup_per_zone_pages_min(void);
+extern void setup_per_zone_wmarks(void);
+extern void calculate_zone_inactive_ratio(struct zone *zone);
extern void mem_init(void);
extern void __init mmap_init(void);
extern void show_mem(void);
@@ -1076,7 +1074,7 @@ static inline void setup_per_cpu_pageset(void) {}
#endif
/* nommu.c */
-extern atomic_t mmap_pages_allocated;
+extern atomic_long_t mmap_pages_allocated;
/* prio_tree.c */
void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old);
@@ -1178,8 +1176,6 @@ void task_dirty_inc(struct task_struct *tsk);
#define VM_MAX_READAHEAD 128 /* kbytes */
#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */
-int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
- pgoff_t offset, unsigned long nr_to_read);
int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
pgoff_t offset, unsigned long nr_to_read);
@@ -1197,6 +1193,9 @@ void page_cache_async_readahead(struct address_space *mapping,
unsigned long size);
unsigned long max_sane_readahead(unsigned long nr);
+unsigned long ra_submit(struct file_ra_state *ra,
+ struct address_space *mapping,
+ struct file *filp);
/* Do stack extension */
extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
@@ -1316,8 +1315,8 @@ int vmemmap_populate_basepages(struct page *start_page,
int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
void vmemmap_populate_print_last(void);
-extern void *alloc_locked_buffer(size_t size);
-extern void free_locked_buffer(void *buffer, size_t size);
-extern void release_locked_buffer(void *buffer, size_t size);
+extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim,
+ size_t size);
+extern void refund_locked_memory(struct mm_struct *mm, size_t size);
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index d84feb7bdbf..7acc8439d9b 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -11,6 +11,7 @@
#include <linux/rwsem.h>
#include <linux/completion.h>
#include <linux/cpumask.h>
+#include <linux/page-debug-flags.h>
#include <asm/page.h>
#include <asm/mmu.h>
@@ -94,6 +95,17 @@ struct page {
void *virtual; /* Kernel virtual address (NULL if
not kmapped, ie. highmem) */
#endif /* WANT_PAGE_VIRTUAL */
+#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
+ unsigned long debug_flags; /* Use atomic bitops on this */
+#endif
+
+#ifdef CONFIG_KMEMCHECK
+ /*
+ * kmemcheck wants to track the status of each byte in a page; this
+ * is a pointer to such a status block. NULL if not tracked.
+ */
+ void *shadow;
+#endif
};
/*
@@ -228,6 +240,8 @@ struct mm_struct {
unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
+ s8 oom_adj; /* OOM kill score adjustment (bit shift) */
+
cpumask_t cpu_vm_mask;
/* Architecture-specific MM context */
diff --git a/include/linux/mman.h b/include/linux/mman.h
index 30d1073bac3..9872d6ca58a 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -12,21 +12,18 @@
#ifdef __KERNEL__
#include <linux/mm.h>
+#include <linux/percpu_counter.h>
#include <asm/atomic.h>
extern int sysctl_overcommit_memory;
extern int sysctl_overcommit_ratio;
-extern atomic_long_t vm_committed_space;
+extern struct percpu_counter vm_committed_as;
-#ifdef CONFIG_SMP
-extern void vm_acct_memory(long pages);
-#else
static inline void vm_acct_memory(long pages)
{
- atomic_long_add(pages, &vm_committed_space);
+ percpu_counter_add(&vm_committed_as, pages);
}
-#endif
static inline void vm_unacct_memory(long pages)
{
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 4e457256bd3..3e7615e9087 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -192,5 +192,10 @@ static inline void mmc_signal_sdio_irq(struct mmc_host *host)
wake_up_process(host->sdio_irq_thread);
}
+struct regulator;
+
+int mmc_regulator_get_ocrmask(struct regulator *supply);
+int mmc_regulator_set_ocr(struct regulator *supply, unsigned short vdd_bit);
+
#endif
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index ea1bf5ba092..39751c8cde9 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -25,5 +25,15 @@
#define SDIO_VENDOR_ID_MARVELL 0x02df
#define SDIO_DEVICE_ID_MARVELL_LIBERTAS 0x9103
+#define SDIO_DEVICE_ID_MARVELL_8688WLAN 0x9104
+#define SDIO_DEVICE_ID_MARVELL_8688BT 0x9105
+
+#define SDIO_VENDOR_ID_SIANO 0x039a
+#define SDIO_DEVICE_ID_SIANO_NOVA_B0 0x0201
+#define SDIO_DEVICE_ID_SIANO_NICE 0x0202
+#define SDIO_DEVICE_ID_SIANO_VEGA_A0 0x0300
+#define SDIO_DEVICE_ID_SIANO_VENICE 0x0301
+#define SDIO_DEVICE_ID_SIANO_NOVA_A0 0x1100
+#define SDIO_DEVICE_ID_SIANO_STELLAR 0x5347
#endif
diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
index 139d7c88d9c..97491f78b08 100644
--- a/include/linux/mmiotrace.h
+++ b/include/linux/mmiotrace.h
@@ -1,5 +1,5 @@
-#ifndef MMIOTRACE_H
-#define MMIOTRACE_H
+#ifndef _LINUX_MMIOTRACE_H
+#define _LINUX_MMIOTRACE_H
#include <linux/types.h>
#include <linux/list.h>
@@ -13,28 +13,36 @@ typedef void (*kmmio_post_handler_t)(struct kmmio_probe *,
unsigned long condition, struct pt_regs *);
struct kmmio_probe {
- struct list_head list; /* kmmio internal list */
- unsigned long addr; /* start location of the probe point */
- unsigned long len; /* length of the probe region */
- kmmio_pre_handler_t pre_handler; /* Called before addr is executed. */
- kmmio_post_handler_t post_handler; /* Called after addr is executed */
- void *private;
+ /* kmmio internal list: */
+ struct list_head list;
+ /* start location of the probe point: */
+ unsigned long addr;
+ /* length of the probe region: */
+ unsigned long len;
+ /* Called before addr is executed: */
+ kmmio_pre_handler_t pre_handler;
+ /* Called after addr is executed: */
+ kmmio_post_handler_t post_handler;
+ void *private;
};
+extern unsigned int kmmio_count;
+
+extern int register_kmmio_probe(struct kmmio_probe *p);
+extern void unregister_kmmio_probe(struct kmmio_probe *p);
+extern int kmmio_init(void);
+extern void kmmio_cleanup(void);
+
+#ifdef CONFIG_MMIOTRACE
/* kmmio is active by some kmmio_probes? */
static inline int is_kmmio_active(void)
{
- extern unsigned int kmmio_count;
return kmmio_count;
}
-extern int register_kmmio_probe(struct kmmio_probe *p);
-extern void unregister_kmmio_probe(struct kmmio_probe *p);
-
/* Called from page fault handler. */
extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
-#ifdef CONFIG_MMIOTRACE
/* Called from ioremap.c */
extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
void __iomem *addr);
@@ -43,7 +51,17 @@ extern void mmiotrace_iounmap(volatile void __iomem *addr);
/* For anyone to insert markers. Remember trailing newline. */
extern int mmiotrace_printk(const char *fmt, ...)
__attribute__ ((format (printf, 1, 2)));
-#else
+#else /* !CONFIG_MMIOTRACE: */
+static inline int is_kmmio_active(void)
+{
+ return 0;
+}
+
+static inline int kmmio_handler(struct pt_regs *regs, unsigned long addr)
+{
+ return 0;
+}
+
static inline void mmiotrace_ioremap(resource_size_t offset,
unsigned long size, void __iomem *addr)
{
@@ -63,28 +81,28 @@ static inline int mmiotrace_printk(const char *fmt, ...)
#endif /* CONFIG_MMIOTRACE */
enum mm_io_opcode {
- MMIO_READ = 0x1, /* struct mmiotrace_rw */
- MMIO_WRITE = 0x2, /* struct mmiotrace_rw */
- MMIO_PROBE = 0x3, /* struct mmiotrace_map */
- MMIO_UNPROBE = 0x4, /* struct mmiotrace_map */
- MMIO_UNKNOWN_OP = 0x5, /* struct mmiotrace_rw */
+ MMIO_READ = 0x1, /* struct mmiotrace_rw */
+ MMIO_WRITE = 0x2, /* struct mmiotrace_rw */
+ MMIO_PROBE = 0x3, /* struct mmiotrace_map */
+ MMIO_UNPROBE = 0x4, /* struct mmiotrace_map */
+ MMIO_UNKNOWN_OP = 0x5, /* struct mmiotrace_rw */
};
struct mmiotrace_rw {
- resource_size_t phys; /* PCI address of register */
- unsigned long value;
- unsigned long pc; /* optional program counter */
- int map_id;
- unsigned char opcode; /* one of MMIO_{READ,WRITE,UNKNOWN_OP} */
- unsigned char width; /* size of register access in bytes */
+ resource_size_t phys; /* PCI address of register */
+ unsigned long value;
+ unsigned long pc; /* optional program counter */
+ int map_id;
+ unsigned char opcode; /* one of MMIO_{READ,WRITE,UNKNOWN_OP} */
+ unsigned char width; /* size of register access in bytes */
};
struct mmiotrace_map {
- resource_size_t phys; /* base address in PCI space */
- unsigned long virt; /* base virtual address */
- unsigned long len; /* mapping size */
- int map_id;
- unsigned char opcode; /* MMIO_PROBE or MMIO_UNPROBE */
+ resource_size_t phys; /* base address in PCI space */
+ unsigned long virt; /* base virtual address */
+ unsigned long len; /* mapping size */
+ int map_id;
+ unsigned char opcode; /* MMIO_PROBE or MMIO_UNPROBE */
};
/* in kernel/trace/trace_mmiotrace.c */
@@ -94,4 +112,4 @@ extern void mmio_trace_rw(struct mmiotrace_rw *rw);
extern void mmio_trace_mapping(struct mmiotrace_map *map);
extern int mmio_trace_printk(const char *fmt, va_list args);
-#endif /* MMIOTRACE_H */
+#endif /* _LINUX_MMIOTRACE_H */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 1aca6cebbb7..88959853737 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -50,9 +50,6 @@ extern int page_group_by_mobility_disabled;
static inline int get_pageblock_migratetype(struct page *page)
{
- if (unlikely(page_group_by_mobility_disabled))
- return MIGRATE_UNMOVABLE;
-
return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end);
}
@@ -86,13 +83,8 @@ enum zone_stat_item {
NR_ACTIVE_ANON, /* " " " " " */
NR_INACTIVE_FILE, /* " " " " " */
NR_ACTIVE_FILE, /* " " " " " */
-#ifdef CONFIG_UNEVICTABLE_LRU
NR_UNEVICTABLE, /* " " " " " */
NR_MLOCK, /* mlock()ed pages found and moved off LRU */
-#else
- NR_UNEVICTABLE = NR_ACTIVE_FILE, /* avoid compiler errors in dead code */
- NR_MLOCK = NR_ACTIVE_FILE,
-#endif
NR_ANON_PAGES, /* Mapped anonymous pages */
NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
only modified from process context */
@@ -135,11 +127,7 @@ enum lru_list {
LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
-#ifdef CONFIG_UNEVICTABLE_LRU
LRU_UNEVICTABLE,
-#else
- LRU_UNEVICTABLE = LRU_ACTIVE_FILE, /* avoid compiler errors in dead code */
-#endif
NR_LRU_LISTS
};
@@ -159,13 +147,20 @@ static inline int is_active_lru(enum lru_list l)
static inline int is_unevictable_lru(enum lru_list l)
{
-#ifdef CONFIG_UNEVICTABLE_LRU
return (l == LRU_UNEVICTABLE);
-#else
- return 0;
-#endif
}
+enum zone_watermarks {
+ WMARK_MIN,
+ WMARK_LOW,
+ WMARK_HIGH,
+ NR_WMARK
+};
+
+#define min_wmark_pages(z) (z->watermark[WMARK_MIN])
+#define low_wmark_pages(z) (z->watermark[WMARK_LOW])
+#define high_wmark_pages(z) (z->watermark[WMARK_HIGH])
+
struct per_cpu_pages {
int count; /* number of pages in the list */
int high; /* high watermark, emptying needed */
@@ -278,7 +273,10 @@ struct zone_reclaim_stat {
struct zone {
/* Fields commonly accessed by the page allocator */
- unsigned long pages_min, pages_low, pages_high;
+
+ /* zone watermarks, access with *_wmark_pages(zone) macros */
+ unsigned long watermark[NR_WMARK];
+
/*
* We don't know if the memory that we're going to allocate will be freeable
* or/and it will be released eventually, so to avoid totally wasting several
@@ -323,9 +321,9 @@ struct zone {
/* Fields commonly accessed by the page reclaim scanner */
spinlock_t lru_lock;
- struct {
+ struct zone_lru {
struct list_head list;
- unsigned long nr_scan;
+ unsigned long nr_saved_scan; /* accumulated for batching */
} lru[NR_LRU_LISTS];
struct zone_reclaim_stat reclaim_stat;
@@ -764,12 +762,6 @@ extern int numa_zonelist_order_handler(struct ctl_table *, int,
extern char numa_zonelist_order[];
#define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */
-#include <linux/topology.h>
-/* Returns the number of the current Node. */
-#ifndef numa_node_id
-#define numa_node_id() (cpu_to_node(raw_smp_processor_id()))
-#endif
-
#ifndef CONFIG_NEED_MULTIPLE_NODES
extern struct pglist_data contig_page_data;
@@ -806,6 +798,14 @@ extern struct zone *next_zone(struct zone *zone);
zone; \
zone = next_zone(zone))
+#define for_each_populated_zone(zone) \
+ for (zone = (first_online_pgdat())->node_zones; \
+ zone; \
+ zone = next_zone(zone)) \
+ if (!populated_zone(zone)) \
+ ; /* do nothing */ \
+ else
+
static inline struct zone *zonelist_zone(struct zoneref *zoneref)
{
return zoneref->zone;
@@ -1095,6 +1095,32 @@ unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
#define pfn_valid_within(pfn) (1)
#endif
+#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
+/*
+ * pfn_valid() is meant to be able to tell if a given PFN has valid memmap
+ * associated with it or not. In FLATMEM, it is expected that holes always
+ * have valid memmap as long as there is valid PFNs either side of the hole.
+ * In SPARSEMEM, it is assumed that a valid section has a memmap for the
+ * entire section.
+ *
+ * However, an ARM, and maybe other embedded architectures in the future
+ * free memmap backing holes to save memory on the assumption the memmap is
+ * never used. The page_zone linkages are then broken even though pfn_valid()
+ * returns true. A walker of the full memmap must then do this additional
+ * check to ensure the memmap they are looking at is sane by making sure
+ * the zone and PFN linkages are still valid. This is expensive, but walkers
+ * of the full memmap are extremely rare.
+ */
+int memmap_valid_within(unsigned long pfn,
+ struct page *page, struct zone *zone);
+#else
+static inline int memmap_valid_within(unsigned long pfn,
+ struct page *page, struct zone *zone)
+{
+ return 1;
+}
+#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
+
#endif /* !__GENERATING_BOUNDS.H */
#endif /* !__ASSEMBLY__ */
#endif /* _LINUX_MMZONE_H */
diff --git a/include/linux/mnt_namespace.h b/include/linux/mnt_namespace.h
index 830bbcd449d..3beb2592b03 100644
--- a/include/linux/mnt_namespace.h
+++ b/include/linux/mnt_namespace.h
@@ -22,16 +22,12 @@ struct proc_mounts {
int event;
};
+struct fs_struct;
+
+extern struct mnt_namespace *create_mnt_ns(struct vfsmount *mnt);
extern struct mnt_namespace *copy_mnt_ns(unsigned long, struct mnt_namespace *,
struct fs_struct *);
-extern void __put_mnt_ns(struct mnt_namespace *ns);
-
-static inline void put_mnt_ns(struct mnt_namespace *ns)
-{
- if (atomic_dec_and_lock(&ns->count, &vfsmount_lock))
- /* releases vfsmount_lock */
- __put_mnt_ns(ns);
-}
+extern void put_mnt_ns(struct mnt_namespace *ns);
static inline void exit_mnt_ns(struct task_struct *p)
{
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index fde86671f48..1bf5900ffe4 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -454,4 +454,13 @@ struct dmi_system_id {
#define DMI_MATCH(a, b) { a, b }
+#define PLATFORM_NAME_SIZE 20
+#define PLATFORM_MODULE_PREFIX "platform:"
+
+struct platform_device_id {
+ char name[PLATFORM_NAME_SIZE];
+ kernel_ulong_t driver_data
+ __attribute__((aligned(sizeof(kernel_ulong_t))));
+};
+
#endif /* LINUX_MOD_DEVICETABLE_H */
diff --git a/include/linux/module.h b/include/linux/module.h
index 145a75528cc..098bdb7bfac 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -77,6 +77,7 @@ search_extable(const struct exception_table_entry *first,
void sort_extable(struct exception_table_entry *start,
struct exception_table_entry *finish);
void sort_main_extable(void);
+void trim_init_extable(struct module *m);
#ifdef MODULE
#define MODULE_GENERIC_TABLE(gtype,name) \
@@ -248,6 +249,10 @@ struct module
const unsigned long *crcs;
unsigned int num_syms;
+ /* Kernel parameters. */
+ struct kernel_param *kp;
+ unsigned int num_kp;
+
/* GPL-only exported symbols. */
unsigned int num_gpl_syms;
const struct kernel_symbol *gpl_syms;
@@ -329,6 +334,19 @@ struct module
unsigned int num_tracepoints;
#endif
+#ifdef CONFIG_TRACING
+ const char **trace_bprintk_fmt_start;
+ unsigned int num_trace_bprintk_fmt;
+#endif
+#ifdef CONFIG_EVENT_TRACING
+ struct ftrace_event_call *trace_events;
+ unsigned int num_trace_events;
+#endif
+#ifdef CONFIG_FTRACE_MCOUNT_RECORD
+ unsigned long *ftrace_callsites;
+ unsigned int num_ftrace_callsites;
+#endif
+
#ifdef CONFIG_MODULE_UNLOAD
/* What modules depend on me? */
struct list_head modules_which_use_me;
@@ -345,11 +363,19 @@ struct module
local_t ref;
#endif
#endif
+
+#ifdef CONFIG_CONSTRUCTORS
+ /* Constructor functions. */
+ ctor_fn_t *ctors;
+ unsigned int num_ctors;
+#endif
};
#ifndef MODULE_ARCH_INIT
#define MODULE_ARCH_INIT {}
#endif
+extern struct mutex module_mutex;
+
/* FIXME: It'd be nice to isolate modules during init, too, so they
aren't used before they (may) fail. But presently too much code
(IDE & SCSI) require entry into the module during init.*/
@@ -358,10 +384,10 @@ static inline int module_is_live(struct module *mod)
return mod->state != MODULE_STATE_GOING;
}
-/* Is this address in a module? (second is with no locks, for oops) */
-struct module *module_text_address(unsigned long addr);
struct module *__module_text_address(unsigned long addr);
-int is_module_address(unsigned long addr);
+struct module *__module_address(unsigned long addr);
+bool is_module_address(unsigned long addr);
+bool is_module_text_address(unsigned long addr);
static inline int within_module_core(unsigned long addr, struct module *mod)
{
@@ -375,6 +401,31 @@ static inline int within_module_init(unsigned long addr, struct module *mod)
addr < (unsigned long)mod->module_init + mod->init_size;
}
+/* Search for module by name: must hold module_mutex. */
+struct module *find_module(const char *name);
+
+struct symsearch {
+ const struct kernel_symbol *start, *stop;
+ const unsigned long *crcs;
+ enum {
+ NOT_GPL_ONLY,
+ GPL_ONLY,
+ WILL_BE_GPL_ONLY,
+ } licence;
+ bool unused;
+};
+
+/* Search for an exported symbol by name. */
+const struct kernel_symbol *find_symbol(const char *name,
+ struct module **owner,
+ const unsigned long **crc,
+ bool gplok,
+ bool warn);
+
+/* Walk the exported symbol table */
+bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
+ unsigned int symnum, void *data), void *data);
+
/* Returns 0 and fills in value, defined and namebuf, or -ERANGE if
symnum out of range. */
int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
@@ -383,6 +434,10 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
/* Look for this name: can be of form module:name. */
unsigned long module_kallsyms_lookup_name(const char *name);
+int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
+ struct module *, unsigned long),
+ void *data);
+
extern void __module_put_and_exit(struct module *mod, long code)
__attribute__((noreturn));
#define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code);
@@ -444,6 +499,7 @@ static inline void __module_get(struct module *module)
#define symbol_put_addr(p) do { } while(0)
#endif /* CONFIG_MODULE_UNLOAD */
+int use_module(struct module *a, struct module *b);
/* This is a #define so the string doesn't get put in every .o file */
#define module_name(mod) \
@@ -490,21 +546,24 @@ search_module_extables(unsigned long addr)
return NULL;
}
-/* Is this address in a module? */
-static inline struct module *module_text_address(unsigned long addr)
+static inline struct module *__module_address(unsigned long addr)
{
return NULL;
}
-/* Is this address in a module? (don't take a lock, we're oopsing) */
static inline struct module *__module_text_address(unsigned long addr)
{
return NULL;
}
-static inline int is_module_address(unsigned long addr)
+static inline bool is_module_address(unsigned long addr)
{
- return 0;
+ return false;
+}
+
+static inline bool is_module_text_address(unsigned long addr)
+{
+ return false;
}
/* Get/put a kernel symbol (calls should be symmetric) */
@@ -559,6 +618,14 @@ static inline unsigned long module_kallsyms_lookup_name(const char *name)
return 0;
}
+static inline int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
+ struct module *,
+ unsigned long),
+ void *data)
+{
+ return 0;
+}
+
static inline int register_module_notifier(struct notifier_block * nb)
{
/* no events will happen anyway, so this can always succeed */
@@ -636,4 +703,21 @@ static inline void module_remove_modinfo_attrs(struct module *mod)
#define __MODULE_STRING(x) __stringify(x)
+
+#ifdef CONFIG_GENERIC_BUG
+int module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *,
+ struct module *);
+void module_bug_cleanup(struct module *);
+
+#else /* !CONFIG_GENERIC_BUG */
+
+static inline int module_bug_finalize(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ struct module *mod)
+{
+ return 0;
+}
+static inline void module_bug_cleanup(struct module *mod) {}
+#endif /* CONFIG_GENERIC_BUG */
+
#endif /* _LINUX_MODULE_H */
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index e4af3399ef4..6547c3cdbc4 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -36,9 +36,14 @@ typedef int (*param_set_fn)(const char *val, struct kernel_param *kp);
/* Returns length written or -errno. Buffer is 4k (ie. be short!) */
typedef int (*param_get_fn)(char *buffer, struct kernel_param *kp);
+/* Flag bits for kernel_param.flags */
+#define KPARAM_KMALLOCED 1
+#define KPARAM_ISBOOL 2
+
struct kernel_param {
const char *name;
- unsigned int perm;
+ u16 perm;
+ u16 flags;
param_set_fn set;
param_get_fn get;
union {
@@ -79,7 +84,7 @@ struct kparam_array
parameters. perm sets the visibility in sysfs: 000 means it's
not there, read bits mean it's readable, write bits mean it's
writable. */
-#define __module_param_call(prefix, name, set, get, arg, perm) \
+#define __module_param_call(prefix, name, set, get, arg, isbool, perm) \
/* Default value instead of permissions? */ \
static int __param_perm_check_##name __attribute__((unused)) = \
BUILD_BUG_ON_ZERO((perm) < 0 || (perm) > 0777 || ((perm) & 2)) \
@@ -88,10 +93,13 @@ struct kparam_array
static struct kernel_param __moduleparam_const __param_##name \
__used \
__attribute__ ((unused,__section__ ("__param"),aligned(sizeof(void *)))) \
- = { __param_str_##name, perm, set, get, { arg } }
+ = { __param_str_##name, perm, isbool ? KPARAM_ISBOOL : 0, \
+ set, get, { arg } }
#define module_param_call(name, set, get, arg, perm) \
- __module_param_call(MODULE_PARAM_PREFIX, name, set, get, arg, perm)
+ __module_param_call(MODULE_PARAM_PREFIX, \
+ name, set, get, arg, \
+ __same_type(*(arg), bool), perm)
/* Helper functions: type is byte, short, ushort, int, uint, long,
ulong, charp, bool or invbool, or XXX if you define param_get_XXX,
@@ -120,15 +128,16 @@ struct kparam_array
#define core_param(name, var, type, perm) \
param_check_##type(name, &(var)); \
__module_param_call("", name, param_set_##type, param_get_##type, \
- &var, perm)
+ &var, __same_type(var, bool), perm)
#endif /* !MODULE */
/* Actually copy string: maxlen param is usually sizeof(string). */
#define module_param_string(name, string, len, perm) \
static const struct kparam_string __param_string_##name \
= { len, string }; \
- module_param_call(name, param_set_copystring, param_get_string, \
- .str = &__param_string_##name, perm); \
+ __module_param_call(MODULE_PARAM_PREFIX, name, \
+ param_set_copystring, param_get_string, \
+ .str = &__param_string_##name, 0, perm); \
__MODULE_PARM_TYPE(name, "string")
/* Called on module insert or kernel boot */
@@ -138,6 +147,16 @@ extern int parse_args(const char *name,
unsigned num,
int (*unknown)(char *param, char *val));
+/* Called by module remove. */
+#ifdef CONFIG_SYSFS
+extern void destroy_params(const struct kernel_param *params, unsigned num);
+#else
+static inline void destroy_params(const struct kernel_param *params,
+ unsigned num)
+{
+}
+#endif /* !CONFIG_SYSFS */
+
/* All the helper functions */
/* The macros to do compile-time type checking stolen from Jakub
Jelinek, who IIRC came up with this idea for the 2.4 module init code. */
@@ -176,21 +195,30 @@ extern int param_set_charp(const char *val, struct kernel_param *kp);
extern int param_get_charp(char *buffer, struct kernel_param *kp);
#define param_check_charp(name, p) __param_check(name, p, char *)
+/* For historical reasons "bool" parameters can be (unsigned) "int". */
extern int param_set_bool(const char *val, struct kernel_param *kp);
extern int param_get_bool(char *buffer, struct kernel_param *kp);
-#define param_check_bool(name, p) __param_check(name, p, int)
+#define param_check_bool(name, p) \
+ static inline void __check_##name(void) \
+ { \
+ BUILD_BUG_ON(!__same_type(*(p), bool) && \
+ !__same_type(*(p), unsigned int) && \
+ !__same_type(*(p), int)); \
+ }
extern int param_set_invbool(const char *val, struct kernel_param *kp);
extern int param_get_invbool(char *buffer, struct kernel_param *kp);
-#define param_check_invbool(name, p) __param_check(name, p, int)
+#define param_check_invbool(name, p) __param_check(name, p, bool)
/* Comma-separated array: *nump is set to number they actually specified. */
#define module_param_array_named(name, array, type, nump, perm) \
static const struct kparam_array __param_arr_##name \
= { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
sizeof(array[0]), array }; \
- module_param_call(name, param_array_set, param_array_get, \
- .arr = &__param_arr_##name, perm); \
+ __module_param_call(MODULE_PARAM_PREFIX, name, \
+ param_array_set, param_array_get, \
+ .arr = &__param_arr_##name, \
+ __same_type(array[0], bool), perm); \
__MODULE_PARM_TYPE(name, "array of " #type)
#define module_param_array(name, type, nump, perm) \
diff --git a/include/linux/mount.h b/include/linux/mount.h
index cab2a85e2ee..5d527536486 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -27,9 +27,10 @@ struct mnt_namespace;
#define MNT_NODIRATIME 0x10
#define MNT_RELATIME 0x20
#define MNT_READONLY 0x40 /* does the user want this to be r/o? */
+#define MNT_STRICTATIME 0x80
#define MNT_SHRINKABLE 0x100
-#define MNT_IMBALANCED_WRITE_COUNT 0x200 /* just for debugging */
+#define MNT_WRITE_HOLD 0x200
#define MNT_SHARED 0x1000 /* if the vfsmount is a shared mount */
#define MNT_UNBINDABLE 0x2000 /* if the vfsmount is a unbindable mount */
@@ -64,13 +65,22 @@ struct vfsmount {
int mnt_expiry_mark; /* true if marked for expiry */
int mnt_pinned;
int mnt_ghosts;
- /*
- * This value is not stable unless all of the mnt_writers[] spinlocks
- * are held, and all mnt_writer[]s on this mount have 0 as their ->count
- */
- atomic_t __mnt_writers;
+#ifdef CONFIG_SMP
+ int *mnt_writers;
+#else
+ int mnt_writers;
+#endif
};
+static inline int *get_mnt_writers_ptr(struct vfsmount *mnt)
+{
+#ifdef CONFIG_SMP
+ return mnt->mnt_writers;
+#else
+ return &mnt->mnt_writers;
+#endif
+}
+
static inline struct vfsmount *mntget(struct vfsmount *mnt)
{
if (mnt)
@@ -78,7 +88,11 @@ static inline struct vfsmount *mntget(struct vfsmount *mnt)
return mnt;
}
+struct file; /* forward dec */
+
extern int mnt_want_write(struct vfsmount *mnt);
+extern int mnt_want_write_file(struct file *file);
+extern int mnt_clone_write(struct vfsmount *mnt);
extern void mnt_drop_write(struct vfsmount *mnt);
extern void mntput_no_expire(struct vfsmount *mnt);
extern void mnt_pin(struct vfsmount *mnt);
diff --git a/include/linux/mpage.h b/include/linux/mpage.h
index 5c42821da2d..068a0c9946a 100644
--- a/include/linux/mpage.h
+++ b/include/linux/mpage.h
@@ -11,21 +11,11 @@
*/
#ifdef CONFIG_BLOCK
-struct mpage_data {
- struct bio *bio;
- sector_t last_block_in_bio;
- get_block_t *get_block;
- unsigned use_writepage;
-};
-
struct writeback_control;
-struct bio *mpage_bio_submit(int rw, struct bio *bio);
int mpage_readpages(struct address_space *mapping, struct list_head *pages,
unsigned nr_pages, get_block_t get_block);
int mpage_readpage(struct page *page, get_block_t get_block);
-int __mpage_writepage(struct page *page, struct writeback_control *wbc,
- void *data);
int mpage_writepages(struct address_space *mapping,
struct writeback_control *wbc, get_block_t get_block);
int mpage_writepage(struct page *page, get_block_t *get_block,
diff --git a/include/linux/mroute.h b/include/linux/mroute.h
index 8a455694d68..0d45b4e8d36 100644
--- a/include/linux/mroute.h
+++ b/include/linux/mroute.h
@@ -193,6 +193,9 @@ struct vif_device
struct mfc_cache
{
struct mfc_cache *next; /* Next entry on cache line */
+#ifdef CONFIG_NET_NS
+ struct net *mfc_net;
+#endif
__be32 mfc_mcastgrp; /* Group the entry belongs to */
__be32 mfc_origin; /* Source of packet */
vifi_t mfc_parent; /* Source interface */
@@ -215,6 +218,18 @@ struct mfc_cache
} mfc_un;
};
+static inline
+struct net *mfc_net(const struct mfc_cache *mfc)
+{
+ return read_pnet(&mfc->mfc_net);
+}
+
+static inline
+void mfc_net_set(struct mfc_cache *mfc, struct net *net)
+{
+ write_pnet(&mfc->mfc_net, hold_net(net));
+}
+
#define MFC_STATIC 1
#define MFC_NOTIFY 2
@@ -241,7 +256,8 @@ struct mfc_cache
#ifdef __KERNEL__
struct rtmsg;
-extern int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait);
+extern int ipmr_get_route(struct net *net, struct sk_buff *skb,
+ struct rtmsg *rtm, int nowait);
#endif
#endif
diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
index 5375faca1f7..43dc97e3218 100644
--- a/include/linux/mroute6.h
+++ b/include/linux/mroute6.h
@@ -65,7 +65,7 @@ struct mif6ctl {
mifi_t mif6c_mifi; /* Index of MIF */
unsigned char mif6c_flags; /* MIFF_ flags */
unsigned char vifc_threshold; /* ttl limit */
- u_short mif6c_pifi; /* the index of the physical IF */
+ __u16 mif6c_pifi; /* the index of the physical IF */
unsigned int vifc_rate_limit; /* Rate limiter values (NI) */
};
diff --git a/include/linux/msi.h b/include/linux/msi.h
index d2b8a1e8ca1..6991ab5b24d 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -20,20 +20,23 @@ extern void write_msi_msg(unsigned int irq, struct msi_msg *msg);
struct msi_desc {
struct {
- __u8 type : 5; /* {0: unused, 5h:MSI, 11h:MSI-X} */
+ __u8 is_msix : 1;
+ __u8 multiple: 3; /* log2 number of messages */
__u8 maskbit : 1; /* mask-pending bit supported ? */
- __u8 masked : 1;
__u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */
__u8 pos; /* Location of the msi capability */
- __u32 maskbits_mask; /* mask bits mask */
__u16 entry_nr; /* specific enabled entry */
unsigned default_irq; /* default pre-assigned irq */
- }msi_attrib;
+ } msi_attrib;
+ u32 masked; /* mask bits */
unsigned int irq;
struct list_head list;
- void __iomem *mask_base;
+ union {
+ void __iomem *mask_base;
+ u8 mask_pos;
+ };
struct pci_dev *dev;
/* Last set MSI message */
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 3aa5d77c2cd..5675b63a063 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/uio.h>
#include <linux/notifier.h>
+#include <linux/device.h>
#include <linux/mtd/compatmac.h>
#include <mtd/mtd-abi.h>
@@ -162,6 +163,20 @@ struct mtd_info {
/* We probably shouldn't allow XIP if the unpoint isn't a NULL */
void (*unpoint) (struct mtd_info *mtd, loff_t from, size_t len);
+ /* Allow NOMMU mmap() to directly map the device (if not NULL)
+ * - return the address to which the offset maps
+ * - return -ENOSYS to indicate refusal to do the mapping
+ */
+ unsigned long (*get_unmapped_area) (struct mtd_info *mtd,
+ unsigned long len,
+ unsigned long offset,
+ unsigned long flags);
+
+ /* Backing device capabilities for this device
+ * - provides mmap capabilities
+ */
+ struct backing_dev_info *backing_dev_info;
+
int (*read) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf);
int (*write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf);
@@ -223,6 +238,7 @@ struct mtd_info {
void *priv;
struct module *owner;
+ struct device dev;
int usecount;
/* If the driver is something smart, like UBI, it may need to maintain
@@ -233,6 +249,11 @@ struct mtd_info {
void (*put_device) (struct mtd_info *mtd);
};
+static inline struct mtd_info *dev_to_mtd(struct device *dev)
+{
+ return dev ? container_of(dev, struct mtd_info, dev) : NULL;
+}
+
static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd)
{
if (mtd->erasesize_shift)
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index db5b63da2a7..4030ebada49 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -43,8 +43,8 @@ extern void nand_wait_ready(struct mtd_info *mtd);
* is supported now. If you add a chip with bigger oobsize/page
* adjust this accordingly.
*/
-#define NAND_MAX_OOBSIZE 64
-#define NAND_MAX_PAGESIZE 2048
+#define NAND_MAX_OOBSIZE 128
+#define NAND_MAX_PAGESIZE 4096
/*
* Constants for hardware specific CLE/ALE/NCE function
@@ -563,6 +563,7 @@ extern int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len,
* @options: Option flags, e.g. 16bit buswidth
* @ecclayout: ecc layout info structure
* @part_probe_types: NULL-terminated array of probe types
+ * @set_parts: platform specific function to set partitions
* @priv: hardware controller specific settings
*/
struct platform_nand_chip {
@@ -574,26 +575,41 @@ struct platform_nand_chip {
int chip_delay;
unsigned int options;
const char **part_probe_types;
+ void (*set_parts)(uint64_t size,
+ struct platform_nand_chip *chip);
void *priv;
};
+/* Keep gcc happy */
+struct platform_device;
+
/**
* struct platform_nand_ctrl - controller level device structure
+ * @probe: platform specific function to probe/setup hardware
+ * @remove: platform specific function to remove/teardown hardware
* @hwcontrol: platform specific hardware control structure
* @dev_ready: platform specific function to read ready/busy pin
* @select_chip: platform specific chip select function
* @cmd_ctrl: platform specific function for controlling
* ALE/CLE/nCE. Also used to write command and address
+ * @write_buf: platform specific function for write buffer
+ * @read_buf: platform specific function for read buffer
* @priv: private data to transport driver specific settings
*
* All fields are optional and depend on the hardware driver requirements
*/
struct platform_nand_ctrl {
+ int (*probe)(struct platform_device *pdev);
+ void (*remove)(struct platform_device *pdev);
void (*hwcontrol)(struct mtd_info *mtd, int cmd);
int (*dev_ready)(struct mtd_info *mtd);
void (*select_chip)(struct mtd_info *mtd, int chip);
void (*cmd_ctrl)(struct mtd_info *mtd, int dat,
unsigned int ctrl);
+ void (*write_buf)(struct mtd_info *mtd,
+ const uint8_t *buf, int len);
+ void (*read_buf)(struct mtd_info *mtd,
+ uint8_t *buf, int len);
void *priv;
};
diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h
index 9aa2a9149b5..8ed87337438 100644
--- a/include/linux/mtd/onenand.h
+++ b/include/linux/mtd/onenand.h
@@ -17,6 +17,7 @@
#include <linux/mtd/onenand_regs.h>
#include <linux/mtd/bbm.h>
+#define MAX_DIES 2
#define MAX_BUFFERRAM 2
/* Scan and identify a OneNAND device */
@@ -51,7 +52,12 @@ struct onenand_bufferram {
/**
* struct onenand_chip - OneNAND Private Flash Chip Data
* @base: [BOARDSPECIFIC] address to access OneNAND
+ * @dies: [INTERN][FLEX-ONENAND] number of dies on chip
+ * @boundary: [INTERN][FLEX-ONENAND] Boundary of the dies
+ * @diesize: [INTERN][FLEX-ONENAND] Size of the dies
* @chipsize: [INTERN] the size of one chip for multichip arrays
+ * FIXME For Flex-OneNAND, chipsize holds maximum possible
+ * device size ie when all blocks are considered MLC
* @device_id: [INTERN] device ID
* @density_mask: chip density, used for DDP devices
* @verstion_id: [INTERN] version ID
@@ -68,6 +74,8 @@ struct onenand_bufferram {
* @command: [REPLACEABLE] hardware specific function for writing
* commands to the chip
* @wait: [REPLACEABLE] hardware specific function for wait on ready
+ * @bbt_wait: [REPLACEABLE] hardware specific function for bbt wait on ready
+ * @unlock_all: [REPLACEABLE] hardware specific function for unlock all
* @read_bufferram: [REPLACEABLE] hardware specific function for BufferRAM Area
* @write_bufferram: [REPLACEABLE] hardware specific function for BufferRAM Area
* @read_word: [REPLACEABLE] hardware specific function for read
@@ -92,9 +100,13 @@ struct onenand_bufferram {
*/
struct onenand_chip {
void __iomem *base;
+ unsigned dies;
+ unsigned boundary[MAX_DIES];
+ loff_t diesize[MAX_DIES];
unsigned int chipsize;
unsigned int device_id;
unsigned int version_id;
+ unsigned int technology;
unsigned int density_mask;
unsigned int options;
@@ -108,6 +120,8 @@ struct onenand_chip {
int (*command)(struct mtd_info *mtd, int cmd, loff_t address, size_t len);
int (*wait)(struct mtd_info *mtd, int state);
+ int (*bbt_wait)(struct mtd_info *mtd, int state);
+ void (*unlock_all)(struct mtd_info *mtd);
int (*read_bufferram)(struct mtd_info *mtd, int area,
unsigned char *buffer, int offset, size_t count);
int (*write_bufferram)(struct mtd_info *mtd, int area,
@@ -145,6 +159,8 @@ struct onenand_chip {
#define ONENAND_SET_BUFFERRAM0(this) (this->bufferram_index = 0)
#define ONENAND_SET_BUFFERRAM1(this) (this->bufferram_index = 1)
+#define FLEXONENAND(this) \
+ (this->device_id & DEVICE_IS_FLEXONENAND)
#define ONENAND_GET_SYS_CFG1(this) \
(this->read_word(this->base + ONENAND_REG_SYS_CFG1))
#define ONENAND_SET_SYS_CFG1(v, this) \
@@ -153,6 +169,9 @@ struct onenand_chip {
#define ONENAND_IS_DDP(this) \
(this->device_id & ONENAND_DEVICE_IS_DDP)
+#define ONENAND_IS_MLC(this) \
+ (this->technology & ONENAND_TECHNOLOGY_IS_MLC)
+
#ifdef CONFIG_MTD_ONENAND_2X_PROGRAM
#define ONENAND_IS_2PLANE(this) \
(this->options & ONENAND_HAS_2PLANE)
@@ -169,6 +188,7 @@ struct onenand_chip {
#define ONENAND_HAS_CONT_LOCK (0x0001)
#define ONENAND_HAS_UNLOCK_ALL (0x0002)
#define ONENAND_HAS_2PLANE (0x0004)
+#define ONENAND_SKIP_UNLOCK_CHECK (0x0100)
#define ONENAND_PAGEBUF_ALLOC (0x1000)
#define ONENAND_OOBBUF_ALLOC (0x2000)
@@ -176,6 +196,7 @@ struct onenand_chip {
* OneNAND Flash Manufacturer ID Codes
*/
#define ONENAND_MFR_SAMSUNG 0xec
+#define ONENAND_MFR_NUMONYX 0x20
/**
* struct onenand_manufacturers - NAND Flash Manufacturer ID Structure
@@ -189,5 +210,8 @@ struct onenand_manufacturers {
int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops);
+unsigned onenand_block(struct onenand_chip *this, loff_t addr);
+loff_t onenand_addr(struct onenand_chip *this, int block);
+int flexonenand_region(struct mtd_info *mtd, loff_t addr);
#endif /* __LINUX_MTD_ONENAND_H */
diff --git a/include/linux/mtd/onenand_regs.h b/include/linux/mtd/onenand_regs.h
index 0c6bbe28f38..86a6bbef646 100644
--- a/include/linux/mtd/onenand_regs.h
+++ b/include/linux/mtd/onenand_regs.h
@@ -67,6 +67,9 @@
/*
* Device ID Register F001h (R)
*/
+#define DEVICE_IS_FLEXONENAND (1 << 9)
+#define FLEXONENAND_PI_MASK (0x3ff)
+#define FLEXONENAND_PI_UNLOCK_SHIFT (14)
#define ONENAND_DEVICE_DENSITY_MASK (0xf)
#define ONENAND_DEVICE_DENSITY_SHIFT (4)
#define ONENAND_DEVICE_IS_DDP (1 << 3)
@@ -84,6 +87,11 @@
#define ONENAND_VERSION_PROCESS_SHIFT (8)
/*
+ * Technology Register F006h (R)
+ */
+#define ONENAND_TECHNOLOGY_IS_MLC (1 << 0)
+
+/*
* Start Address 1 F100h (R/W) & Start Address 2 F101h (R/W)
*/
#define ONENAND_DDP_SHIFT (15)
@@ -93,7 +101,8 @@
/*
* Start Address 8 F107h (R/W)
*/
-#define ONENAND_FPA_MASK (0x3f)
+/* Note: It's actually 0x3f in case of SLC */
+#define ONENAND_FPA_MASK (0x7f)
#define ONENAND_FPA_SHIFT (2)
#define ONENAND_FSA_MASK (0x03)
@@ -105,7 +114,8 @@
#define ONENAND_BSA_BOOTRAM (0 << 2)
#define ONENAND_BSA_DATARAM0 (2 << 2)
#define ONENAND_BSA_DATARAM1 (3 << 2)
-#define ONENAND_BSC_MASK (0x03)
+/* Note: It's actually 0x03 in case of SLC */
+#define ONENAND_BSC_MASK (0x07)
/*
* Command Register F220h (R/W)
@@ -124,9 +134,13 @@
#define ONENAND_CMD_RESET (0xF0)
#define ONENAND_CMD_OTP_ACCESS (0x65)
#define ONENAND_CMD_READID (0x90)
+#define FLEXONENAND_CMD_PI_UPDATE (0x05)
+#define FLEXONENAND_CMD_PI_ACCESS (0x66)
+#define FLEXONENAND_CMD_RECOVER_LSB (0x05)
/* NOTE: Those are not *REAL* commands */
#define ONENAND_CMD_BUFFERRAM (0x1978)
+#define FLEXONENAND_CMD_READ_PI (0x1985)
/*
* System Configuration 1 Register F221h (R, R/W)
@@ -192,10 +206,12 @@
#define ONENAND_ECC_1BIT_ALL (0x5555)
#define ONENAND_ECC_2BIT (1 << 1)
#define ONENAND_ECC_2BIT_ALL (0xAAAA)
+#define FLEXONENAND_UNCORRECTABLE_ERROR (0x1010)
/*
* One-Time Programmable (OTP)
*/
+#define FLEXONENAND_OTP_LOCK_OFFSET (2048)
#define ONENAND_OTP_LOCK_OFFSET (14)
#endif /* __ONENAND_REG_H */
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index a45dd831b3f..af6dcb992bc 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -40,7 +40,6 @@ struct mtd_partition {
uint64_t offset; /* offset within the master MTD space */
uint32_t mask_flags; /* master MTD flags to mask out for this partition */
struct nand_ecclayout *ecclayout; /* out of band layout for this partition (NAND only)*/
- struct mtd_info **mtdp; /* pointer to store the MTD object */
};
#define MTDPART_OFS_NXTBLK (-2)
@@ -76,4 +75,16 @@ int __devinit of_mtd_parse_partitions(struct device *dev,
struct device_node *node,
struct mtd_partition **pparts);
+#ifdef CONFIG_MTD_PARTITIONS
+static inline int mtd_has_partitions(void) { return 1; }
+#else
+static inline int mtd_has_partitions(void) { return 0; }
+#endif
+
+#ifdef CONFIG_MTD_CMDLINE_PARTS
+static inline int mtd_has_cmdlinepart(void) { return 1; }
+#else
+static inline int mtd_has_cmdlinepart(void) { return 0; }
+#endif
+
#endif
diff --git a/include/linux/mtd/ubi.h b/include/linux/mtd/ubi.h
index 6316fafe5c2..6913b71d9ab 100644
--- a/include/linux/mtd/ubi.h
+++ b/include/linux/mtd/ubi.h
@@ -132,6 +132,39 @@ struct ubi_device_info {
dev_t cdev;
};
+/*
+ * enum - volume notification types.
+ * @UBI_VOLUME_ADDED: volume has been added
+ * @UBI_VOLUME_REMOVED: start volume volume
+ * @UBI_VOLUME_RESIZED: volume size has been re-sized
+ * @UBI_VOLUME_RENAMED: volume name has been re-named
+ * @UBI_VOLUME_UPDATED: volume name has been updated
+ *
+ * These constants define which type of event has happened when a volume
+ * notification function is invoked.
+ */
+enum {
+ UBI_VOLUME_ADDED,
+ UBI_VOLUME_REMOVED,
+ UBI_VOLUME_RESIZED,
+ UBI_VOLUME_RENAMED,
+ UBI_VOLUME_UPDATED,
+};
+
+/*
+ * struct ubi_notification - UBI notification description structure.
+ * @di: UBI device description object
+ * @vi: UBI volume description object
+ *
+ * UBI notifiers are called with a pointer to an object of this type. The
+ * object describes the notification. Namely, it provides a description of the
+ * UBI device and UBI volume the notification informs about.
+ */
+struct ubi_notification {
+ struct ubi_device_info di;
+ struct ubi_volume_info vi;
+};
+
/* UBI descriptor given to users when they open UBI volumes */
struct ubi_volume_desc;
@@ -141,6 +174,10 @@ void ubi_get_volume_info(struct ubi_volume_desc *desc,
struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode);
struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
int mode);
+int ubi_register_volume_notifier(struct notifier_block *nb,
+ int ignore_existing);
+int ubi_unregister_volume_notifier(struct notifier_block *nb);
+
void ubi_close_volume(struct ubi_volume_desc *desc);
int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
int len, int check);
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 7a0e5c4f807..878cab4f5fc 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -50,8 +50,10 @@ struct mutex {
atomic_t count;
spinlock_t wait_lock;
struct list_head wait_list;
-#ifdef CONFIG_DEBUG_MUTEXES
+#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
struct thread_info *owner;
+#endif
+#ifdef CONFIG_DEBUG_MUTEXES
const char *name;
void *magic;
#endif
@@ -68,7 +70,6 @@ struct mutex_waiter {
struct list_head list;
struct task_struct *task;
#ifdef CONFIG_DEBUG_MUTEXES
- struct mutex *lock;
void *magic;
#endif
};
@@ -149,5 +150,6 @@ extern int __must_check mutex_lock_killable(struct mutex *lock);
*/
extern int mutex_trylock(struct mutex *lock);
extern void mutex_unlock(struct mutex *lock);
+extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
#endif
diff --git a/include/linux/namei.h b/include/linux/namei.h
index fc2e0357987..d870ae2faed 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -18,6 +18,7 @@ enum { MAX_NESTED_LINKS = 8 };
struct nameidata {
struct path path;
struct qstr last;
+ struct path root;
unsigned int flags;
int last_type;
unsigned depth;
@@ -69,7 +70,6 @@ extern int path_lookup(const char *, unsigned, struct nameidata *);
extern int vfs_path_lookup(struct dentry *, struct vfsmount *,
const char *, unsigned int, struct nameidata *);
-extern int path_lookup_open(int dfd, const char *name, unsigned lookup_flags, struct nameidata *, int open_flags);
extern struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry,
int (*open)(struct inode *, struct file *));
extern struct file *nameidata_to_filp(struct nameidata *nd, int flags);
@@ -78,8 +78,8 @@ extern void release_open_intent(struct nameidata *);
extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
extern struct dentry *lookup_one_noperm(const char *, struct dentry *);
-extern int follow_down(struct vfsmount **, struct dentry **);
-extern int follow_up(struct vfsmount **, struct dentry **);
+extern int follow_down(struct path *);
+extern int follow_up(struct path *);
extern struct dentry *lock_rename(struct dentry *, struct dentry *);
extern void unlock_rename(struct dentry *, struct dentry *);
diff --git a/include/linux/ncp_fs.h b/include/linux/ncp_fs.h
index f69e66d151c..30b06c89394 100644
--- a/include/linux/ncp_fs.h
+++ b/include/linux/ncp_fs.h
@@ -204,7 +204,7 @@ void ncp_update_inode2(struct inode *, struct ncp_entry_info *);
/* linux/fs/ncpfs/dir.c */
extern const struct inode_operations ncp_dir_inode_operations;
extern const struct file_operations ncp_dir_operations;
-extern struct dentry_operations ncp_root_dentry_operations;
+extern const struct dentry_operations ncp_root_dentry_operations;
int ncp_conn_logged_in(struct super_block *);
int ncp_date_dos2unix(__le16 time, __le16 date);
void ncp_date_unix2dos(int unix_date, __le16 * time, __le16 * date);
diff --git a/include/linux/ncp_no.h b/include/linux/ncp_no.h
index f56a696a7cc..cddaa48fb18 100644
--- a/include/linux/ncp_no.h
+++ b/include/linux/ncp_no.h
@@ -2,18 +2,18 @@
#define _NCP_NO
/* these define the attribute byte as seen by NCP */
-#define aRONLY (__constant_cpu_to_le32(1))
-#define aHIDDEN (__constant_cpu_to_le32(2))
-#define aSYSTEM (__constant_cpu_to_le32(4))
-#define aEXECUTE (__constant_cpu_to_le32(8))
-#define aDIR (__constant_cpu_to_le32(0x10))
-#define aARCH (__constant_cpu_to_le32(0x20))
-#define aSHARED (__constant_cpu_to_le32(0x80))
-#define aDONTSUBALLOCATE (__constant_cpu_to_le32(1L<<11))
-#define aTRANSACTIONAL (__constant_cpu_to_le32(1L<<12))
-#define aPURGE (__constant_cpu_to_le32(1L<<16))
-#define aRENAMEINHIBIT (__constant_cpu_to_le32(1L<<17))
-#define aDELETEINHIBIT (__constant_cpu_to_le32(1L<<18))
-#define aDONTCOMPRESS (__constant_cpu_to_le32(1L<<27))
+#define aRONLY (__cpu_to_le32(1))
+#define aHIDDEN (__cpu_to_le32(2))
+#define aSYSTEM (__cpu_to_le32(4))
+#define aEXECUTE (__cpu_to_le32(8))
+#define aDIR (__cpu_to_le32(0x10))
+#define aARCH (__cpu_to_le32(0x20))
+#define aSHARED (__cpu_to_le32(0x80))
+#define aDONTSUBALLOCATE (__cpu_to_le32(1L<<11))
+#define aTRANSACTIONAL (__cpu_to_le32(1L<<12))
+#define aPURGE (__cpu_to_le32(1L<<16))
+#define aRENAMEINHIBIT (__cpu_to_le32(1L<<17))
+#define aDELETEINHIBIT (__cpu_to_le32(1L<<18))
+#define aDONTCOMPRESS (__cpu_to_le32(1L<<27))
#endif /* _NCP_NO */
diff --git a/include/linux/neighbour.h b/include/linux/neighbour.h
index 8730d5dae1b..12c9de13845 100644
--- a/include/linux/neighbour.h
+++ b/include/linux/neighbour.h
@@ -31,6 +31,7 @@ enum
* Neighbor Cache Entry Flags
*/
+#define NTF_USE 0x01
#define NTF_PROXY 0x08 /* == ATF_PUBL */
#define NTF_ROUTER 0x80
diff --git a/include/linux/net.h b/include/linux/net.h
index 4515efae4c3..4fc2ffd527f 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -129,11 +129,15 @@ struct socket {
socket_state state;
short type;
unsigned long flags;
- const struct proto_ops *ops;
+ /*
+ * Please keep fasync_list & wait fields in the same cache line
+ */
struct fasync_struct *fasync_list;
+ wait_queue_head_t wait;
+
struct file *file;
struct sock *sk;
- wait_queue_head_t wait;
+ const struct proto_ops *ops;
};
struct vm_area_struct;
diff --git a/include/linux/net_dropmon.h b/include/linux/net_dropmon.h
new file mode 100644
index 00000000000..3ceb0cc1bc7
--- /dev/null
+++ b/include/linux/net_dropmon.h
@@ -0,0 +1,65 @@
+#ifndef __NET_DROPMON_H
+#define __NET_DROPMON_H
+
+#include <linux/types.h>
+#include <linux/netlink.h>
+#include <linux/types.h>
+
+struct net_dm_drop_point {
+ __u8 pc[8];
+ __u32 count;
+};
+
+#define is_drop_point_hw(x) do {\
+ int ____i, ____j;\
+ for (____i = 0; ____i < 8; i ____i++)\
+ ____j |= x[____i];\
+ ____j;\
+} while (0)
+
+#define NET_DM_CFG_VERSION 0
+#define NET_DM_CFG_ALERT_COUNT 1
+#define NET_DM_CFG_ALERT_DELAY 2
+#define NET_DM_CFG_MAX 3
+
+struct net_dm_config_entry {
+ __u32 type;
+ __u64 data __attribute__((aligned(8)));
+};
+
+struct net_dm_config_msg {
+ __u32 entries;
+ struct net_dm_config_entry options[0];
+};
+
+struct net_dm_alert_msg {
+ __u32 entries;
+ struct net_dm_drop_point points[0];
+};
+
+struct net_dm_user_msg {
+ union {
+ struct net_dm_config_msg user;
+ struct net_dm_alert_msg alert;
+ } u;
+};
+
+
+/* These are the netlink message types for this protocol */
+
+enum {
+ NET_DM_CMD_UNSPEC = 0,
+ NET_DM_CMD_ALERT,
+ NET_DM_CMD_CONFIG,
+ NET_DM_CMD_START,
+ NET_DM_CMD_STOP,
+ _NET_DM_CMD_MAX,
+};
+
+#define NET_DM_CMD_MAX (_NET_DM_CMD_MAX - 1)
+
+/*
+ * Our group identifiers
+ */
+#define NET_DM_GRP_ALERT 1
+#endif
diff --git a/include/linux/net_tstamp.h b/include/linux/net_tstamp.h
new file mode 100644
index 00000000000..a3b8546354a
--- /dev/null
+++ b/include/linux/net_tstamp.h
@@ -0,0 +1,104 @@
+/*
+ * Userspace API for hardware time stamping of network packets
+ *
+ * Copyright (C) 2008,2009 Intel Corporation
+ * Author: Patrick Ohly <patrick.ohly@intel.com>
+ *
+ */
+
+#ifndef _NET_TIMESTAMPING_H
+#define _NET_TIMESTAMPING_H
+
+#include <linux/socket.h> /* for SO_TIMESTAMPING */
+
+/* SO_TIMESTAMPING gets an integer bit field comprised of these values */
+enum {
+ SOF_TIMESTAMPING_TX_HARDWARE = (1<<0),
+ SOF_TIMESTAMPING_TX_SOFTWARE = (1<<1),
+ SOF_TIMESTAMPING_RX_HARDWARE = (1<<2),
+ SOF_TIMESTAMPING_RX_SOFTWARE = (1<<3),
+ SOF_TIMESTAMPING_SOFTWARE = (1<<4),
+ SOF_TIMESTAMPING_SYS_HARDWARE = (1<<5),
+ SOF_TIMESTAMPING_RAW_HARDWARE = (1<<6),
+ SOF_TIMESTAMPING_MASK =
+ (SOF_TIMESTAMPING_RAW_HARDWARE - 1) |
+ SOF_TIMESTAMPING_RAW_HARDWARE
+};
+
+/**
+ * struct hwtstamp_config - %SIOCSHWTSTAMP parameter
+ *
+ * @flags: no flags defined right now, must be zero
+ * @tx_type: one of HWTSTAMP_TX_*
+ * @rx_type: one of one of HWTSTAMP_FILTER_*
+ *
+ * %SIOCSHWTSTAMP expects a &struct ifreq with a ifr_data pointer to
+ * this structure. dev_ifsioc() in the kernel takes care of the
+ * translation between 32 bit userspace and 64 bit kernel. The
+ * structure is intentionally chosen so that it has the same layout on
+ * 32 and 64 bit systems, don't break this!
+ */
+struct hwtstamp_config {
+ int flags;
+ int tx_type;
+ int rx_filter;
+};
+
+/* possible values for hwtstamp_config->tx_type */
+enum {
+ /*
+ * No outgoing packet will need hardware time stamping;
+ * should a packet arrive which asks for it, no hardware
+ * time stamping will be done.
+ */
+ HWTSTAMP_TX_OFF,
+
+ /*
+ * Enables hardware time stamping for outgoing packets;
+ * the sender of the packet decides which are to be
+ * time stamped by setting %SOF_TIMESTAMPING_TX_SOFTWARE
+ * before sending the packet.
+ */
+ HWTSTAMP_TX_ON,
+};
+
+/* possible values for hwtstamp_config->rx_filter */
+enum {
+ /* time stamp no incoming packet at all */
+ HWTSTAMP_FILTER_NONE,
+
+ /* time stamp any incoming packet */
+ HWTSTAMP_FILTER_ALL,
+
+ /* return value: time stamp all packets requested plus some others */
+ HWTSTAMP_FILTER_SOME,
+
+ /* PTP v1, UDP, any kind of event packet */
+ HWTSTAMP_FILTER_PTP_V1_L4_EVENT,
+ /* PTP v1, UDP, Sync packet */
+ HWTSTAMP_FILTER_PTP_V1_L4_SYNC,
+ /* PTP v1, UDP, Delay_req packet */
+ HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ,
+ /* PTP v2, UDP, any kind of event packet */
+ HWTSTAMP_FILTER_PTP_V2_L4_EVENT,
+ /* PTP v2, UDP, Sync packet */
+ HWTSTAMP_FILTER_PTP_V2_L4_SYNC,
+ /* PTP v2, UDP, Delay_req packet */
+ HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ,
+
+ /* 802.AS1, Ethernet, any kind of event packet */
+ HWTSTAMP_FILTER_PTP_V2_L2_EVENT,
+ /* 802.AS1, Ethernet, Sync packet */
+ HWTSTAMP_FILTER_PTP_V2_L2_SYNC,
+ /* 802.AS1, Ethernet, Delay_req packet */
+ HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ,
+
+ /* PTP v2/802.AS1, any layer, any kind of event packet */
+ HWTSTAMP_FILTER_PTP_V2_EVENT,
+ /* PTP v2/802.AS1, any layer, Sync packet */
+ HWTSTAMP_FILTER_PTP_V2_SYNC,
+ /* PTP v2/802.AS1, any layer, Delay_req packet */
+ HWTSTAMP_FILTER_PTP_V2_DELAY_REQ,
+};
+
+#endif /* _NET_TIMESTAMPING_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 659366734f3..d4a4d986779 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -32,15 +32,18 @@
#ifdef __KERNEL__
#include <linux/timer.h>
#include <linux/delay.h>
+#include <linux/mm.h>
#include <asm/atomic.h>
#include <asm/cache.h>
#include <asm/byteorder.h>
#include <linux/device.h>
#include <linux/percpu.h>
+#include <linux/rculist.h>
#include <linux/dmaengine.h>
#include <linux/workqueue.h>
+#include <linux/ethtool.h>
#include <net/net_namespace.h>
#include <net/dsa.h>
#ifdef CONFIG_DCB
@@ -48,7 +51,6 @@
#endif
struct vlan_group;
-struct ethtool_ops;
struct netpoll_info;
/* 802.11 specific */
struct wireless_dev;
@@ -96,14 +98,14 @@ struct wireless_dev;
* Compute the worst case header length according to the protocols
* used.
*/
-
+
#if defined(CONFIG_WLAN_80211) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
# if defined(CONFIG_MAC80211_MESH)
# define LL_MAX_HEADER 128
# else
# define LL_MAX_HEADER 96
# endif
-#elif defined(CONFIG_TR)
+#elif defined(CONFIG_TR) || defined(CONFIG_TR_MODULE)
# define LL_MAX_HEADER 48
#else
# define LL_MAX_HEADER 32
@@ -124,7 +126,7 @@ struct wireless_dev;
* Network device statistics. Akin to the 2.0 ether stats but
* with byte counters.
*/
-
+
struct net_device_stats
{
unsigned long rx_packets; /* total packets received */
@@ -209,6 +211,24 @@ struct dev_addr_list
#define dmi_users da_users
#define dmi_gusers da_gusers
+struct netdev_hw_addr {
+ struct list_head list;
+ unsigned char addr[MAX_ADDR_LEN];
+ unsigned char type;
+#define NETDEV_HW_ADDR_T_LAN 1
+#define NETDEV_HW_ADDR_T_SAN 2
+#define NETDEV_HW_ADDR_T_SLAVE 3
+#define NETDEV_HW_ADDR_T_UNICAST 4
+ int refcount;
+ bool synced;
+ struct rcu_head rcu_head;
+};
+
+struct netdev_hw_addr_list {
+ struct list_head list;
+ int count;
+};
+
struct hh_cache
{
struct hh_cache *hh_next; /* Next entry */
@@ -285,7 +305,7 @@ enum netdev_state_t
/*
* This structure holds at boot time configured netdevice settings. They
- * are then used in the device probing.
+ * are then used in the device probing.
*/
struct netdev_boot_setup {
char name[IFNAMSIZ];
@@ -314,6 +334,9 @@ struct napi_struct {
spinlock_t poll_lock;
int poll_owner;
#endif
+
+ unsigned int gro_count;
+
struct net_device *dev;
struct list_head dev_list;
struct sk_buff *gro_list;
@@ -327,6 +350,14 @@ enum
NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
};
+enum {
+ GRO_MERGED,
+ GRO_MERGED_FREE,
+ GRO_HELD,
+ GRO_NORMAL,
+ GRO_DROP,
+};
+
extern void __napi_schedule(struct napi_struct *n);
static inline int napi_disable_pending(struct napi_struct *n)
@@ -435,12 +466,25 @@ enum netdev_queue_state_t
};
struct netdev_queue {
+/*
+ * read mostly part
+ */
struct net_device *dev;
struct Qdisc *qdisc;
unsigned long state;
- spinlock_t _xmit_lock;
- int xmit_lock_owner;
struct Qdisc *qdisc_sleeping;
+/*
+ * write mostly part
+ */
+ spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
+ int xmit_lock_owner;
+ /*
+ * please use this field instead of dev->trans_start
+ */
+ unsigned long trans_start;
+ unsigned long tx_bytes;
+ unsigned long tx_packets;
+ unsigned long tx_dropped;
} ____cacheline_aligned_in_smp;
@@ -488,7 +532,7 @@ struct netdev_queue {
*
* int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
* This function is called when the Media Access Control address
- * needs to be changed. If not this interface is not defined, the
+ * needs to be changed. If this interface is not defined, the
* mac address can not be changed.
*
* int (*ndo_validate_addr)(struct net_device *dev);
@@ -582,6 +626,14 @@ struct net_device_ops {
#define HAVE_NETDEV_POLL
void (*ndo_poll_controller)(struct net_device *dev);
#endif
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+ int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
+ u16 xid,
+ struct scatterlist *sgl,
+ unsigned int sgc);
+ int (*ndo_fcoe_ddp_done)(struct net_device *dev,
+ u16 xid);
+#endif
};
/*
@@ -650,14 +702,19 @@ struct net_device
#define NETIF_F_GRO 16384 /* Generic receive offload */
#define NETIF_F_LRO 32768 /* large receive offload */
+/* the GSO_MASK reserves bits 16 through 23 */
+#define NETIF_F_FCOE_CRC (1 << 24) /* FCoE CRC32 */
+#define NETIF_F_SCTP_CSUM (1 << 25) /* SCTP checksum offload */
+
/* Segmentation offload features */
#define NETIF_F_GSO_SHIFT 16
-#define NETIF_F_GSO_MASK 0xffff0000
+#define NETIF_F_GSO_MASK 0x00ff0000
#define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
#define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
#define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
#define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
#define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
+#define NETIF_F_FSO (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT)
/* List of features with software fallbacks. */
#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
@@ -724,10 +781,10 @@ struct net_device
unsigned char addr_len; /* hardware address length */
unsigned short dev_id; /* for shared network cards */
- spinlock_t addr_list_lock;
- struct dev_addr_list *uc_list; /* Secondary unicast mac addresses */
- int uc_count; /* Number of installed ucasts */
+ struct netdev_hw_addr_list uc; /* Secondary unicast
+ mac addresses */
int uc_promisc;
+ spinlock_t addr_list_lock;
struct dev_addr_list *mc_list; /* Multicast mac addresses */
int mc_count; /* Number of installed mcasts */
unsigned int promiscuity;
@@ -740,7 +797,7 @@ struct net_device
void *dsa_ptr; /* dsa specific data */
#endif
void *atalk_ptr; /* AppleTalk link */
- void *ip_ptr; /* IPv4 specific data */
+ void *ip_ptr; /* IPv4 specific data */
void *dn_ptr; /* DECnet specific data */
void *ip6_ptr; /* IPv6 specific data */
void *ec_ptr; /* Econet specific data */
@@ -753,8 +810,12 @@ struct net_device
*/
unsigned long last_rx; /* Time of last Rx */
/* Interface address info used in eth_type_trans() */
- unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast
- because most packets are unicast) */
+ unsigned char *dev_addr; /* hw address, (before bcast
+ because most packets are
+ unicast) */
+
+ struct netdev_hw_addr_list dev_addrs; /* list of device
+ hw addresses */
unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
@@ -774,6 +835,11 @@ struct net_device
* One part is mostly used on xmit path (device)
*/
/* These may be needed for future network-power-down code. */
+
+ /*
+ * trans_start here is expensive for high speed devices on SMP,
+ * please use netdev_queue->trans_start instead.
+ */
unsigned long trans_start; /* Time (in jiffies) of last Tx */
int watchdog_timeo; /* used by dev_watchdog() */
@@ -840,48 +906,14 @@ struct net_device
struct dcbnl_rtnl_ops *dcbnl_ops;
#endif
-#ifdef CONFIG_COMPAT_NET_DEV_OPS
- struct {
- int (*init)(struct net_device *dev);
- void (*uninit)(struct net_device *dev);
- int (*open)(struct net_device *dev);
- int (*stop)(struct net_device *dev);
- int (*hard_start_xmit) (struct sk_buff *skb,
- struct net_device *dev);
- u16 (*select_queue)(struct net_device *dev,
- struct sk_buff *skb);
- void (*change_rx_flags)(struct net_device *dev,
- int flags);
- void (*set_rx_mode)(struct net_device *dev);
- void (*set_multicast_list)(struct net_device *dev);
- int (*set_mac_address)(struct net_device *dev,
- void *addr);
- int (*validate_addr)(struct net_device *dev);
- int (*do_ioctl)(struct net_device *dev,
- struct ifreq *ifr, int cmd);
- int (*set_config)(struct net_device *dev,
- struct ifmap *map);
- int (*change_mtu)(struct net_device *dev, int new_mtu);
- int (*neigh_setup)(struct net_device *dev,
- struct neigh_parms *);
- void (*tx_timeout) (struct net_device *dev);
- struct net_device_stats* (*get_stats)(struct net_device *dev);
- void (*vlan_rx_register)(struct net_device *dev,
- struct vlan_group *grp);
- void (*vlan_rx_add_vid)(struct net_device *dev,
- unsigned short vid);
- void (*vlan_rx_kill_vid)(struct net_device *dev,
- unsigned short vid);
-#ifdef CONFIG_NET_POLL_CONTROLLER
- void (*poll_controller)(struct net_device *dev);
-#endif
- };
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+ /* max exchange id for FCoE LRO by ddp */
+ unsigned int fcoe_ddp_xid;
#endif
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
#define NETDEV_ALIGN 32
-#define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1)
static inline
struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
@@ -952,9 +984,7 @@ static inline bool netdev_uses_trailer_tags(struct net_device *dev)
*/
static inline void *netdev_priv(const struct net_device *dev)
{
- return (char *)dev + ((sizeof(struct net_device)
- + NETDEV_ALIGN_CONST)
- & ~NETDEV_ALIGN_CONST);
+ return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
}
/* Set the sysfs physical device reference for the network logical device
@@ -984,6 +1014,15 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
void netif_napi_del(struct napi_struct *napi);
struct napi_gro_cb {
+ /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
+ void *frag0;
+
+ /* Length of frag0. */
+ unsigned int frag0_len;
+
+ /* This indicates where we are processing relative to skb->data. */
+ int data_offset;
+
/* This is non-zero if the packet may be of the same flow. */
int same_flow;
@@ -1016,14 +1055,6 @@ struct packet_type {
struct list_head list;
};
-struct napi_gro_fraginfo {
- skb_frag_t frags[MAX_SKB_FRAGS];
- unsigned int nr_frags;
- unsigned int ip_summed;
- unsigned int len;
- __wsum csum;
-};
-
#include <linux/interrupt.h>
#include <linux/notifier.h>
@@ -1088,6 +1119,54 @@ extern int dev_restart(struct net_device *dev);
#ifdef CONFIG_NETPOLL_TRAP
extern int netpoll_trap(void);
#endif
+extern int skb_gro_receive(struct sk_buff **head,
+ struct sk_buff *skb);
+extern void skb_gro_reset_offset(struct sk_buff *skb);
+
+static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
+{
+ return NAPI_GRO_CB(skb)->data_offset;
+}
+
+static inline unsigned int skb_gro_len(const struct sk_buff *skb)
+{
+ return skb->len - NAPI_GRO_CB(skb)->data_offset;
+}
+
+static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
+{
+ NAPI_GRO_CB(skb)->data_offset += len;
+}
+
+static inline void *skb_gro_header_fast(struct sk_buff *skb,
+ unsigned int offset)
+{
+ return NAPI_GRO_CB(skb)->frag0 + offset;
+}
+
+static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
+{
+ return NAPI_GRO_CB(skb)->frag0_len < hlen;
+}
+
+static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
+ unsigned int offset)
+{
+ NAPI_GRO_CB(skb)->frag0 = NULL;
+ NAPI_GRO_CB(skb)->frag0_len = 0;
+ return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL;
+}
+
+static inline void *skb_gro_mac_header(struct sk_buff *skb)
+{
+ return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb);
+}
+
+static inline void *skb_gro_network_header(struct sk_buff *skb)
+{
+ return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
+ skb_network_offset(skb);
+}
static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
@@ -1376,14 +1455,23 @@ extern int netif_receive_skb(struct sk_buff *skb);
extern void napi_gro_flush(struct napi_struct *napi);
extern int dev_gro_receive(struct napi_struct *napi,
struct sk_buff *skb);
+extern int napi_skb_finish(int ret, struct sk_buff *skb);
extern int napi_gro_receive(struct napi_struct *napi,
struct sk_buff *skb);
extern void napi_reuse_skb(struct napi_struct *napi,
struct sk_buff *skb);
-extern struct sk_buff * napi_fraginfo_skb(struct napi_struct *napi,
- struct napi_gro_fraginfo *info);
-extern int napi_gro_frags(struct napi_struct *napi,
- struct napi_gro_fraginfo *info);
+extern struct sk_buff * napi_get_frags(struct napi_struct *napi);
+extern int napi_frags_finish(struct napi_struct *napi,
+ struct sk_buff *skb, int ret);
+extern struct sk_buff * napi_frags_skb(struct napi_struct *napi);
+extern int napi_gro_frags(struct napi_struct *napi);
+
+static inline void napi_free_frags(struct napi_struct *napi)
+{
+ kfree_skb(napi->skb);
+ napi->skb = NULL;
+}
+
extern void netif_nit_deliver(struct sk_buff *skb);
extern int dev_valid_name(const char *name);
extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
@@ -1450,6 +1538,8 @@ static inline int netif_carrier_ok(const struct net_device *dev)
return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
}
+extern unsigned long dev_trans_start(struct net_device *dev);
+
extern void __netdev_watchdog_up(struct net_device *dev);
extern void netif_carrier_on(struct net_device *dev);
@@ -1575,56 +1665,6 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
return (1 << debug_value) - 1;
}
-/* Test if receive needs to be scheduled but only if up */
-static inline int netif_rx_schedule_prep(struct napi_struct *napi)
-{
- return napi_schedule_prep(napi);
-}
-
-/* Add interface to tail of rx poll list. This assumes that _prep has
- * already been called and returned 1.
- */
-static inline void __netif_rx_schedule(struct napi_struct *napi)
-{
- __napi_schedule(napi);
-}
-
-/* Try to reschedule poll. Called by irq handler. */
-
-static inline void netif_rx_schedule(struct napi_struct *napi)
-{
- if (netif_rx_schedule_prep(napi))
- __netif_rx_schedule(napi);
-}
-
-/* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). */
-static inline int netif_rx_reschedule(struct napi_struct *napi)
-{
- if (napi_schedule_prep(napi)) {
- __netif_rx_schedule(napi);
- return 1;
- }
- return 0;
-}
-
-/* same as netif_rx_complete, except that local_irq_save(flags)
- * has already been issued
- */
-static inline void __netif_rx_complete(struct napi_struct *napi)
-{
- __napi_complete(napi);
-}
-
-/* Remove interface from poll list: it must be in the poll list
- * on current cpu. This primitive is called by dev->poll(), when
- * it completes the work. The device cannot be out of poll list at this
- * moment, it is BUG().
- */
-static inline void netif_rx_complete(struct napi_struct *napi)
-{
- napi_complete(napi);
-}
-
static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
{
spin_lock(&txq->_xmit_lock);
@@ -1657,6 +1697,12 @@ static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
spin_unlock_bh(&txq->_xmit_lock);
}
+static inline void txq_trans_update(struct netdev_queue *txq)
+{
+ if (txq->xmit_lock_owner != -1)
+ txq->trans_start = jiffies;
+}
+
/**
* netif_tx_lock - grab network device transmit lock
* @dev: network device
@@ -1764,6 +1810,13 @@ static inline void netif_addr_unlock_bh(struct net_device *dev)
spin_unlock_bh(&dev->addr_list_lock);
}
+/*
+ * dev_addrs walker. Should be used only for read access. Call with
+ * rcu_read_lock held.
+ */
+#define for_each_dev_addr(dev, ha) \
+ list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
+
/* These functions live elsewhere (drivers/net/net_init.c, but related) */
extern void ether_setup(struct net_device *dev);
@@ -1776,11 +1829,24 @@ extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
alloc_netdev_mq(sizeof_priv, name, setup, 1)
extern int register_netdev(struct net_device *dev);
extern void unregister_netdev(struct net_device *dev);
+
+/* Functions used for device addresses handling */
+extern int dev_addr_add(struct net_device *dev, unsigned char *addr,
+ unsigned char addr_type);
+extern int dev_addr_del(struct net_device *dev, unsigned char *addr,
+ unsigned char addr_type);
+extern int dev_addr_add_multiple(struct net_device *to_dev,
+ struct net_device *from_dev,
+ unsigned char addr_type);
+extern int dev_addr_del_multiple(struct net_device *to_dev,
+ struct net_device *from_dev,
+ unsigned char addr_type);
+
/* Functions used for secondary unicast and multicast support */
extern void dev_set_rx_mode(struct net_device *dev);
extern void __dev_set_rx_mode(struct net_device *dev);
-extern int dev_unicast_delete(struct net_device *dev, void *addr, int alen);
-extern int dev_unicast_add(struct net_device *dev, void *addr, int alen);
+extern int dev_unicast_delete(struct net_device *dev, void *addr);
+extern int dev_unicast_add(struct net_device *dev, void *addr);
extern int dev_unicast_sync(struct net_device *to, struct net_device *from);
extern void dev_unicast_unsync(struct net_device *to, struct net_device *from);
extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all);
@@ -1842,15 +1908,14 @@ static inline int net_gso_ok(int features, int gso_type)
static inline int skb_gso_ok(struct sk_buff *skb, int features)
{
- return net_gso_ok(features, skb_shinfo(skb)->gso_type);
+ return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
+ (!skb_has_frags(skb) || (features & NETIF_F_FRAGLIST));
}
static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
{
return skb_is_gso(skb) &&
(!skb_gso_ok(skb, dev->features) ||
- (skb_shinfo(skb)->frag_list &&
- !(dev->features & NETIF_F_FRAGLIST)) ||
unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
}
@@ -1860,6 +1925,16 @@ static inline void netif_set_gso_max_size(struct net_device *dev,
dev->gso_max_size = size;
}
+static inline void skb_bond_set_mac_by_master(struct sk_buff *skb,
+ struct net_device *master)
+{
+ if (skb->pkt_type == PACKET_HOST) {
+ u16 *dest = (u16 *) eth_hdr(skb)->h_dest;
+
+ memcpy(dest, master->dev_addr, ETH_ALEN);
+ }
+}
+
/* On bonding slaves other than the currently active slave, suppress
* duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
* ARP on active-backup slaves with arp_validate enabled.
@@ -1873,9 +1948,17 @@ static inline int skb_bond_should_drop(struct sk_buff *skb)
if (master->priv_flags & IFF_MASTER_ARPMON)
dev->last_rx = jiffies;
+ if ((master->priv_flags & IFF_MASTER_ALB) && master->br_port) {
+ /* Do address unmangle. The local destination address
+ * will be always the one master has. Provides the right
+ * functionality in a bridge.
+ */
+ skb_bond_set_mac_by_master(skb, master);
+ }
+
if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
- skb->protocol == __constant_htons(ETH_P_ARP))
+ skb->protocol == __cpu_to_be16(ETH_P_ARP))
return 0;
if (master->priv_flags & IFF_MASTER_ALB) {
@@ -1884,7 +1967,7 @@ static inline int skb_bond_should_drop(struct sk_buff *skb)
return 0;
}
if (master->priv_flags & IFF_MASTER_8023AD &&
- skb->protocol == __constant_htons(ETH_P_SLOW))
+ skb->protocol == __cpu_to_be16(ETH_P_SLOW))
return 0;
return 1;
@@ -1894,6 +1977,28 @@ static inline int skb_bond_should_drop(struct sk_buff *skb)
}
extern struct pernet_operations __net_initdata loopback_net_ops;
+
+static inline int dev_ethtool_get_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings)
+ return -EOPNOTSUPP;
+ return dev->ethtool_ops->get_settings(dev, cmd);
+}
+
+static inline u32 dev_ethtool_get_rx_csum(struct net_device *dev)
+{
+ if (!dev->ethtool_ops || !dev->ethtool_ops->get_rx_csum)
+ return 0;
+ return dev->ethtool_ops->get_rx_csum(dev);
+}
+
+static inline u32 dev_ethtool_get_flags(struct net_device *dev)
+{
+ if (!dev->ethtool_ops || !dev->ethtool_ops->get_flags)
+ return 0;
+ return dev->ethtool_ops->get_flags(dev);
+}
#endif /* __KERNEL__ */
-#endif /* _LINUX_DEV_H */
+#endif /* _LINUX_NETDEVICE_H */
diff --git a/include/linux/netfilter/Kbuild b/include/linux/netfilter/Kbuild
index 5a8af875bce..2aea50399c0 100644
--- a/include/linux/netfilter/Kbuild
+++ b/include/linux/netfilter/Kbuild
@@ -7,16 +7,21 @@ header-y += xt_CLASSIFY.h
header-y += xt_CONNMARK.h
header-y += xt_CONNSECMARK.h
header-y += xt_DSCP.h
+header-y += xt_LED.h
header-y += xt_MARK.h
header-y += xt_NFLOG.h
header-y += xt_NFQUEUE.h
header-y += xt_RATEEST.h
header-y += xt_SECMARK.h
header-y += xt_TCPMSS.h
+header-y += xt_TCPOPTSTRIP.h
+header-y += xt_TPROXY.h
header-y += xt_comment.h
header-y += xt_connbytes.h
+header-y += xt_connlimit.h
header-y += xt_connmark.h
header-y += xt_conntrack.h
+header-y += xt_cluster.h
header-y += xt_dccp.h
header-y += xt_dscp.h
header-y += xt_esp.h
@@ -28,8 +33,10 @@ header-y += xt_limit.h
header-y += xt_mac.h
header-y += xt_mark.h
header-y += xt_multiport.h
+header-y += xt_osf.h
header-y += xt_owner.h
header-y += xt_pkttype.h
+header-y += xt_quota.h
header-y += xt_rateest.h
header-y += xt_realm.h
header-y += xt_recent.h
@@ -39,6 +46,8 @@ header-y += xt_statistic.h
header-y += xt_string.h
header-y += xt_tcpmss.h
header-y += xt_tcpudp.h
+header-y += xt_time.h
+header-y += xt_u32.h
unifdef-y += nf_conntrack_common.h
unifdef-y += nf_conntrack_ftp.h
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h
index 885cbe28226..a8248ee422b 100644
--- a/include/linux/netfilter/nf_conntrack_common.h
+++ b/include/linux/netfilter/nf_conntrack_common.h
@@ -75,75 +75,6 @@ enum ip_conntrack_status {
IPS_FIXED_TIMEOUT = (1 << IPS_FIXED_TIMEOUT_BIT),
};
-/* Connection tracking event bits */
-enum ip_conntrack_events
-{
- /* New conntrack */
- IPCT_NEW_BIT = 0,
- IPCT_NEW = (1 << IPCT_NEW_BIT),
-
- /* Expected connection */
- IPCT_RELATED_BIT = 1,
- IPCT_RELATED = (1 << IPCT_RELATED_BIT),
-
- /* Destroyed conntrack */
- IPCT_DESTROY_BIT = 2,
- IPCT_DESTROY = (1 << IPCT_DESTROY_BIT),
-
- /* Timer has been refreshed */
- IPCT_REFRESH_BIT = 3,
- IPCT_REFRESH = (1 << IPCT_REFRESH_BIT),
-
- /* Status has changed */
- IPCT_STATUS_BIT = 4,
- IPCT_STATUS = (1 << IPCT_STATUS_BIT),
-
- /* Update of protocol info */
- IPCT_PROTOINFO_BIT = 5,
- IPCT_PROTOINFO = (1 << IPCT_PROTOINFO_BIT),
-
- /* Volatile protocol info */
- IPCT_PROTOINFO_VOLATILE_BIT = 6,
- IPCT_PROTOINFO_VOLATILE = (1 << IPCT_PROTOINFO_VOLATILE_BIT),
-
- /* New helper for conntrack */
- IPCT_HELPER_BIT = 7,
- IPCT_HELPER = (1 << IPCT_HELPER_BIT),
-
- /* Update of helper info */
- IPCT_HELPINFO_BIT = 8,
- IPCT_HELPINFO = (1 << IPCT_HELPINFO_BIT),
-
- /* Volatile helper info */
- IPCT_HELPINFO_VOLATILE_BIT = 9,
- IPCT_HELPINFO_VOLATILE = (1 << IPCT_HELPINFO_VOLATILE_BIT),
-
- /* NAT info */
- IPCT_NATINFO_BIT = 10,
- IPCT_NATINFO = (1 << IPCT_NATINFO_BIT),
-
- /* Counter highest bit has been set, unused */
- IPCT_COUNTER_FILLING_BIT = 11,
- IPCT_COUNTER_FILLING = (1 << IPCT_COUNTER_FILLING_BIT),
-
- /* Mark is set */
- IPCT_MARK_BIT = 12,
- IPCT_MARK = (1 << IPCT_MARK_BIT),
-
- /* NAT sequence adjustment */
- IPCT_NATSEQADJ_BIT = 13,
- IPCT_NATSEQADJ = (1 << IPCT_NATSEQADJ_BIT),
-
- /* Secmark is set */
- IPCT_SECMARK_BIT = 14,
- IPCT_SECMARK = (1 << IPCT_SECMARK_BIT),
-};
-
-enum ip_conntrack_expect_events {
- IPEXP_NEW_BIT = 0,
- IPEXP_NEW = (1 << IPEXP_NEW_BIT),
-};
-
#ifdef __KERNEL__
struct ip_conntrack_stat
{
diff --git a/include/linux/netfilter/nf_conntrack_tcp.h b/include/linux/netfilter/nf_conntrack_tcp.h
index a049df4f223..4352feed237 100644
--- a/include/linux/netfilter/nf_conntrack_tcp.h
+++ b/include/linux/netfilter/nf_conntrack_tcp.h
@@ -2,6 +2,8 @@
#define _NF_CONNTRACK_TCP_H
/* TCP tracking. */
+#include <linux/types.h>
+
/* This is exposed to userspace (ctnetlink) */
enum tcp_conntrack {
TCP_CONNTRACK_NONE,
@@ -13,7 +15,8 @@ enum tcp_conntrack {
TCP_CONNTRACK_LAST_ACK,
TCP_CONNTRACK_TIME_WAIT,
TCP_CONNTRACK_CLOSE,
- TCP_CONNTRACK_LISTEN,
+ TCP_CONNTRACK_LISTEN, /* obsolete */
+#define TCP_CONNTRACK_SYN_SENT2 TCP_CONNTRACK_LISTEN
TCP_CONNTRACK_MAX,
TCP_CONNTRACK_IGNORE
};
@@ -33,9 +36,12 @@ enum tcp_conntrack {
/* Has unacknowledged data */
#define IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED 0x10
+/* The field td_maxack has been set */
+#define IP_CT_TCP_FLAG_MAXACK_SET 0x20
+
struct nf_ct_tcp_flags {
- u_int8_t flags;
- u_int8_t mask;
+ __u8 flags;
+ __u8 mask;
};
#ifdef __KERNEL__
@@ -44,6 +50,7 @@ struct ip_ct_tcp_state {
u_int32_t td_end; /* max of seq + len */
u_int32_t td_maxend; /* max of ack + max(win, 1) */
u_int32_t td_maxwin; /* max(win) */
+ u_int32_t td_maxack; /* max of ack */
u_int8_t td_scale; /* window scale factor */
u_int8_t flags; /* per direction options */
};
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 7d8e0455cca..bff4d5741d9 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -25,8 +25,8 @@ enum nfnetlink_groups {
/* General form of address family dependent message.
*/
struct nfgenmsg {
- u_int8_t nfgen_family; /* AF_xxx */
- u_int8_t version; /* nfnetlink version */
+ __u8 nfgen_family; /* AF_xxx */
+ __u8 version; /* nfnetlink version */
__be16 res_id; /* resource id */
};
@@ -46,7 +46,8 @@ struct nfgenmsg {
#define NFNL_SUBSYS_CTNETLINK_EXP 2
#define NFNL_SUBSYS_QUEUE 3
#define NFNL_SUBSYS_ULOG 4
-#define NFNL_SUBSYS_COUNT 5
+#define NFNL_SUBSYS_OSF 5
+#define NFNL_SUBSYS_COUNT 6
#ifdef __KERNEL__
@@ -75,7 +76,8 @@ extern int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n);
extern int nfnetlink_has_listeners(unsigned int group);
extern int nfnetlink_send(struct sk_buff *skb, u32 pid, unsigned group,
- int echo);
+ int echo, gfp_t flags);
+extern void nfnetlink_set_err(u32 pid, u32 group, int error);
extern int nfnetlink_unicast(struct sk_buff *skb, u_int32_t pid, int flags);
extern void nfnl_lock(void);
diff --git a/include/linux/netfilter/nfnetlink_compat.h b/include/linux/netfilter/nfnetlink_compat.h
index e1451760c9c..eda55cabcee 100644
--- a/include/linux/netfilter/nfnetlink_compat.h
+++ b/include/linux/netfilter/nfnetlink_compat.h
@@ -1,5 +1,8 @@
#ifndef _NFNETLINK_COMPAT_H
#define _NFNETLINK_COMPAT_H
+
+#include <linux/types.h>
+
#ifndef __KERNEL__
/* Old nfnetlink macros for userspace */
@@ -20,8 +23,8 @@
struct nfattr
{
- u_int16_t nfa_len;
- u_int16_t nfa_type; /* we use 15 bits for the type, and the highest
+ __u16 nfa_len;
+ __u16 nfa_type; /* we use 15 bits for the type, and the highest
* bit to indicate whether the payload is nested */
};
diff --git a/include/linux/netfilter/nfnetlink_conntrack.h b/include/linux/netfilter/nfnetlink_conntrack.h
index 29fe9ea1d34..ed4ef8d0b11 100644
--- a/include/linux/netfilter/nfnetlink_conntrack.h
+++ b/include/linux/netfilter/nfnetlink_conntrack.h
@@ -100,6 +100,8 @@ enum ctattr_protoinfo_tcp {
enum ctattr_protoinfo_dccp {
CTA_PROTOINFO_DCCP_UNSPEC,
CTA_PROTOINFO_DCCP_STATE,
+ CTA_PROTOINFO_DCCP_ROLE,
+ CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ,
__CTA_PROTOINFO_DCCP_MAX,
};
#define CTA_PROTOINFO_DCCP_MAX (__CTA_PROTOINFO_DCCP_MAX - 1)
diff --git a/include/linux/netfilter/nfnetlink_log.h b/include/linux/netfilter/nfnetlink_log.h
index f661731f3cb..d3bab7a2c9b 100644
--- a/include/linux/netfilter/nfnetlink_log.h
+++ b/include/linux/netfilter/nfnetlink_log.h
@@ -17,14 +17,14 @@ enum nfulnl_msg_types {
struct nfulnl_msg_packet_hdr {
__be16 hw_protocol; /* hw protocol (network order) */
- u_int8_t hook; /* netfilter hook */
- u_int8_t _pad;
+ __u8 hook; /* netfilter hook */
+ __u8 _pad;
};
struct nfulnl_msg_packet_hw {
__be16 hw_addrlen;
- u_int16_t _pad;
- u_int8_t hw_addr[8];
+ __u16 _pad;
+ __u8 hw_addr[8];
};
struct nfulnl_msg_packet_timestamp {
@@ -35,12 +35,12 @@ struct nfulnl_msg_packet_timestamp {
enum nfulnl_attr_type {
NFULA_UNSPEC,
NFULA_PACKET_HDR,
- NFULA_MARK, /* u_int32_t nfmark */
+ NFULA_MARK, /* __u32 nfmark */
NFULA_TIMESTAMP, /* nfulnl_msg_packet_timestamp */
- NFULA_IFINDEX_INDEV, /* u_int32_t ifindex */
- NFULA_IFINDEX_OUTDEV, /* u_int32_t ifindex */
- NFULA_IFINDEX_PHYSINDEV, /* u_int32_t ifindex */
- NFULA_IFINDEX_PHYSOUTDEV, /* u_int32_t ifindex */
+ NFULA_IFINDEX_INDEV, /* __u32 ifindex */
+ NFULA_IFINDEX_OUTDEV, /* __u32 ifindex */
+ NFULA_IFINDEX_PHYSINDEV, /* __u32 ifindex */
+ NFULA_IFINDEX_PHYSOUTDEV, /* __u32 ifindex */
NFULA_HWADDR, /* nfulnl_msg_packet_hw */
NFULA_PAYLOAD, /* opaque data payload */
NFULA_PREFIX, /* string prefix */
@@ -65,23 +65,23 @@ enum nfulnl_msg_config_cmds {
};
struct nfulnl_msg_config_cmd {
- u_int8_t command; /* nfulnl_msg_config_cmds */
+ __u8 command; /* nfulnl_msg_config_cmds */
} __attribute__ ((packed));
struct nfulnl_msg_config_mode {
__be32 copy_range;
- u_int8_t copy_mode;
- u_int8_t _pad;
+ __u8 copy_mode;
+ __u8 _pad;
} __attribute__ ((packed));
enum nfulnl_attr_config {
NFULA_CFG_UNSPEC,
NFULA_CFG_CMD, /* nfulnl_msg_config_cmd */
NFULA_CFG_MODE, /* nfulnl_msg_config_mode */
- NFULA_CFG_NLBUFSIZ, /* u_int32_t buffer size */
- NFULA_CFG_TIMEOUT, /* u_int32_t in 1/100 s */
- NFULA_CFG_QTHRESH, /* u_int32_t */
- NFULA_CFG_FLAGS, /* u_int16_t */
+ NFULA_CFG_NLBUFSIZ, /* __u32 buffer size */
+ NFULA_CFG_TIMEOUT, /* __u32 in 1/100 s */
+ NFULA_CFG_QTHRESH, /* __u32 */
+ NFULA_CFG_FLAGS, /* __u16 */
__NFULA_CFG_MAX
};
#define NFULA_CFG_MAX (__NFULA_CFG_MAX -1)
diff --git a/include/linux/netfilter/nfnetlink_queue.h b/include/linux/netfilter/nfnetlink_queue.h
index 83e789633e3..2455fe5f4e0 100644
--- a/include/linux/netfilter/nfnetlink_queue.h
+++ b/include/linux/netfilter/nfnetlink_queue.h
@@ -15,13 +15,13 @@ enum nfqnl_msg_types {
struct nfqnl_msg_packet_hdr {
__be32 packet_id; /* unique ID of packet in queue */
__be16 hw_protocol; /* hw protocol (network order) */
- u_int8_t hook; /* netfilter hook */
+ __u8 hook; /* netfilter hook */
} __attribute__ ((packed));
struct nfqnl_msg_packet_hw {
__be16 hw_addrlen;
- u_int16_t _pad;
- u_int8_t hw_addr[8];
+ __u16 _pad;
+ __u8 hw_addr[8];
};
struct nfqnl_msg_packet_timestamp {
@@ -33,12 +33,12 @@ enum nfqnl_attr_type {
NFQA_UNSPEC,
NFQA_PACKET_HDR,
NFQA_VERDICT_HDR, /* nfqnl_msg_verdict_hrd */
- NFQA_MARK, /* u_int32_t nfmark */
+ NFQA_MARK, /* __u32 nfmark */
NFQA_TIMESTAMP, /* nfqnl_msg_packet_timestamp */
- NFQA_IFINDEX_INDEV, /* u_int32_t ifindex */
- NFQA_IFINDEX_OUTDEV, /* u_int32_t ifindex */
- NFQA_IFINDEX_PHYSINDEV, /* u_int32_t ifindex */
- NFQA_IFINDEX_PHYSOUTDEV, /* u_int32_t ifindex */
+ NFQA_IFINDEX_INDEV, /* __u32 ifindex */
+ NFQA_IFINDEX_OUTDEV, /* __u32 ifindex */
+ NFQA_IFINDEX_PHYSINDEV, /* __u32 ifindex */
+ NFQA_IFINDEX_PHYSOUTDEV, /* __u32 ifindex */
NFQA_HWADDR, /* nfqnl_msg_packet_hw */
NFQA_PAYLOAD, /* opaque data payload */
@@ -61,8 +61,8 @@ enum nfqnl_msg_config_cmds {
};
struct nfqnl_msg_config_cmd {
- u_int8_t command; /* nfqnl_msg_config_cmds */
- u_int8_t _pad;
+ __u8 command; /* nfqnl_msg_config_cmds */
+ __u8 _pad;
__be16 pf; /* AF_xxx for PF_[UN]BIND */
};
@@ -74,7 +74,7 @@ enum nfqnl_config_mode {
struct nfqnl_msg_config_params {
__be32 copy_range;
- u_int8_t copy_mode; /* enum nfqnl_config_mode */
+ __u8 copy_mode; /* enum nfqnl_config_mode */
} __attribute__ ((packed));
@@ -82,7 +82,7 @@ enum nfqnl_attr_config {
NFQA_CFG_UNSPEC,
NFQA_CFG_CMD, /* nfqnl_msg_config_cmd */
NFQA_CFG_PARAMS, /* nfqnl_msg_config_params */
- NFQA_CFG_QUEUE_MAXLEN, /* u_int32_t */
+ NFQA_CFG_QUEUE_MAXLEN, /* __u32 */
__NFQA_CFG_MAX
};
#define NFQA_CFG_MAX (__NFQA_CFG_MAX-1)
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index c7ee8744d26..1030b759389 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -1,6 +1,8 @@
#ifndef _X_TABLES_H
#define _X_TABLES_H
+#include <linux/types.h>
+
#define XT_FUNCTION_MAXNAMELEN 30
#define XT_TABLE_MAXNAMELEN 32
@@ -8,22 +10,22 @@ struct xt_entry_match
{
union {
struct {
- u_int16_t match_size;
+ __u16 match_size;
/* Used by userspace */
char name[XT_FUNCTION_MAXNAMELEN-1];
- u_int8_t revision;
+ __u8 revision;
} user;
struct {
- u_int16_t match_size;
+ __u16 match_size;
/* Used inside the kernel */
struct xt_match *match;
} kernel;
/* Total length */
- u_int16_t match_size;
+ __u16 match_size;
} u;
unsigned char data[0];
@@ -33,22 +35,22 @@ struct xt_entry_target
{
union {
struct {
- u_int16_t target_size;
+ __u16 target_size;
/* Used by userspace */
char name[XT_FUNCTION_MAXNAMELEN-1];
- u_int8_t revision;
+ __u8 revision;
} user;
struct {
- u_int16_t target_size;
+ __u16 target_size;
/* Used inside the kernel */
struct xt_target *target;
} kernel;
/* Total length */
- u_int16_t target_size;
+ __u16 target_size;
} u;
unsigned char data[0];
@@ -74,7 +76,7 @@ struct xt_get_revision
{
char name[XT_FUNCTION_MAXNAMELEN-1];
- u_int8_t revision;
+ __u8 revision;
};
/* CONTINUE verdict for targets */
@@ -90,10 +92,10 @@ struct xt_get_revision
*/
struct _xt_align
{
- u_int8_t u8;
- u_int16_t u16;
- u_int32_t u32;
- u_int64_t u64;
+ __u8 u8;
+ __u16 u16;
+ __u32 u32;
+ __u64 u64;
};
#define XT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1)) \
@@ -109,7 +111,7 @@ struct _xt_align
struct xt_counters
{
- u_int64_t pcnt, bcnt; /* Packet and byte counters */
+ __u64 pcnt, bcnt; /* Packet and byte counters */
};
/* The argument to IPT_SO_ADD_COUNTERS. */
@@ -182,9 +184,10 @@ struct xt_counters_info
* @matchinfo: per-match data
* @fragoff: packet is a fragment, this is the data offset
* @thoff: position of transport header relative to skb->data
- * @hotdrop: drop packet if we had inspection problems
+ * @hook: hook number given packet came from
* @family: Actual NFPROTO_* through which the function is invoked
* (helpful when match->family == NFPROTO_UNSPEC)
+ * @hotdrop: drop packet if we had inspection problems
*/
struct xt_match_param {
const struct net_device *in, *out;
@@ -192,8 +195,9 @@ struct xt_match_param {
const void *matchinfo;
int fragoff;
unsigned int thoff;
- bool *hotdrop;
+ unsigned int hooknum;
u_int8_t family;
+ bool *hotdrop;
};
/**
@@ -349,23 +353,19 @@ struct xt_table
{
struct list_head list;
- /* A unique name... */
- const char name[XT_TABLE_MAXNAMELEN];
-
/* What hooks you will enter on */
unsigned int valid_hooks;
- /* Lock for the curtain */
- rwlock_t lock;
-
/* Man behind the curtain... */
- //struct ip6t_table_info *private;
- void *private;
+ struct xt_table_info *private;
/* Set this to THIS_MODULE if you are a module, otherwise NULL */
struct module *me;
u_int8_t af; /* address/protocol family */
+
+ /* A unique name... */
+ const char name[XT_TABLE_MAXNAMELEN];
};
#include <linux/netfilter_ipv4.h>
@@ -386,7 +386,7 @@ struct xt_table_info
/* ipt_entry tables: one per CPU */
/* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */
- char *entries[1];
+ void *entries[1];
};
#define XT_TABLE_INFO_SZ (offsetof(struct xt_table_info, entries) \
@@ -434,6 +434,97 @@ extern void xt_proto_fini(struct net *net, u_int8_t af);
extern struct xt_table_info *xt_alloc_table_info(unsigned int size);
extern void xt_free_table_info(struct xt_table_info *info);
+/*
+ * Per-CPU spinlock associated with per-cpu table entries, and
+ * with a counter for the "reading" side that allows a recursive
+ * reader to avoid taking the lock and deadlocking.
+ *
+ * "reading" is used by ip/arp/ip6 tables rule processing which runs per-cpu.
+ * It needs to ensure that the rules are not being changed while the packet
+ * is being processed. In some cases, the read lock will be acquired
+ * twice on the same CPU; this is okay because of the count.
+ *
+ * "writing" is used when reading counters.
+ * During replace any readers that are using the old tables have to complete
+ * before freeing the old table. This is handled by the write locking
+ * necessary for reading the counters.
+ */
+struct xt_info_lock {
+ spinlock_t lock;
+ unsigned char readers;
+};
+DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks);
+
+/*
+ * Note: we need to ensure that preemption is disabled before acquiring
+ * the per-cpu-variable, so we do it as a two step process rather than
+ * using "spin_lock_bh()".
+ *
+ * We _also_ need to disable bottom half processing before updating our
+ * nesting count, to make sure that the only kind of re-entrancy is this
+ * code being called by itself: since the count+lock is not an atomic
+ * operation, we can allow no races.
+ *
+ * _Only_ that special combination of being per-cpu and never getting
+ * re-entered asynchronously means that the count is safe.
+ */
+static inline void xt_info_rdlock_bh(void)
+{
+ struct xt_info_lock *lock;
+
+ local_bh_disable();
+ lock = &__get_cpu_var(xt_info_locks);
+ if (likely(!lock->readers++))
+ spin_lock(&lock->lock);
+}
+
+static inline void xt_info_rdunlock_bh(void)
+{
+ struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks);
+
+ if (likely(!--lock->readers))
+ spin_unlock(&lock->lock);
+ local_bh_enable();
+}
+
+/*
+ * The "writer" side needs to get exclusive access to the lock,
+ * regardless of readers. This must be called with bottom half
+ * processing (and thus also preemption) disabled.
+ */
+static inline void xt_info_wrlock(unsigned int cpu)
+{
+ spin_lock(&per_cpu(xt_info_locks, cpu).lock);
+}
+
+static inline void xt_info_wrunlock(unsigned int cpu)
+{
+ spin_unlock(&per_cpu(xt_info_locks, cpu).lock);
+}
+
+/*
+ * This helper is performance critical and must be inlined
+ */
+static inline unsigned long ifname_compare_aligned(const char *_a,
+ const char *_b,
+ const char *_mask)
+{
+ const unsigned long *a = (const unsigned long *)_a;
+ const unsigned long *b = (const unsigned long *)_b;
+ const unsigned long *mask = (const unsigned long *)_mask;
+ unsigned long ret;
+
+ ret = (a[0] ^ b[0]) & mask[0];
+ if (IFNAMSIZ > sizeof(unsigned long))
+ ret |= (a[1] ^ b[1]) & mask[1];
+ if (IFNAMSIZ > 2 * sizeof(unsigned long))
+ ret |= (a[2] ^ b[2]) & mask[2];
+ if (IFNAMSIZ > 3 * sizeof(unsigned long))
+ ret |= (a[3] ^ b[3]) & mask[3];
+ BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
+ return ret;
+}
+
#ifdef CONFIG_COMPAT
#include <net/compat.h>
diff --git a/include/linux/netfilter/xt_CLASSIFY.h b/include/linux/netfilter/xt_CLASSIFY.h
index 58111355255..a813bf14dd6 100644
--- a/include/linux/netfilter/xt_CLASSIFY.h
+++ b/include/linux/netfilter/xt_CLASSIFY.h
@@ -1,8 +1,10 @@
#ifndef _XT_CLASSIFY_H
#define _XT_CLASSIFY_H
+#include <linux/types.h>
+
struct xt_classify_target_info {
- u_int32_t priority;
+ __u32 priority;
};
#endif /*_XT_CLASSIFY_H */
diff --git a/include/linux/netfilter/xt_CONNMARK.h b/include/linux/netfilter/xt_CONNMARK.h
index 4e58ba43c28..7635c8ffdad 100644
--- a/include/linux/netfilter/xt_CONNMARK.h
+++ b/include/linux/netfilter/xt_CONNMARK.h
@@ -1,6 +1,8 @@
#ifndef _XT_CONNMARK_H_target
#define _XT_CONNMARK_H_target
+#include <linux/types.h>
+
/* Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com>
* by Henrik Nordstrom <hno@marasystems.com>
*
@@ -19,12 +21,12 @@ enum {
struct xt_connmark_target_info {
unsigned long mark;
unsigned long mask;
- u_int8_t mode;
+ __u8 mode;
};
struct xt_connmark_tginfo1 {
- u_int32_t ctmark, ctmask, nfmask;
- u_int8_t mode;
+ __u32 ctmark, ctmask, nfmask;
+ __u8 mode;
};
#endif /*_XT_CONNMARK_H_target*/
diff --git a/include/linux/netfilter/xt_CONNSECMARK.h b/include/linux/netfilter/xt_CONNSECMARK.h
index c6bd75469ba..b973ff80fa1 100644
--- a/include/linux/netfilter/xt_CONNSECMARK.h
+++ b/include/linux/netfilter/xt_CONNSECMARK.h
@@ -1,13 +1,15 @@
#ifndef _XT_CONNSECMARK_H_target
#define _XT_CONNSECMARK_H_target
+#include <linux/types.h>
+
enum {
CONNSECMARK_SAVE = 1,
CONNSECMARK_RESTORE,
};
struct xt_connsecmark_target_info {
- u_int8_t mode;
+ __u8 mode;
};
#endif /*_XT_CONNSECMARK_H_target */
diff --git a/include/linux/netfilter/xt_DSCP.h b/include/linux/netfilter/xt_DSCP.h
index 14da1968e2c..648e0b3bed2 100644
--- a/include/linux/netfilter/xt_DSCP.h
+++ b/include/linux/netfilter/xt_DSCP.h
@@ -11,15 +11,16 @@
#ifndef _XT_DSCP_TARGET_H
#define _XT_DSCP_TARGET_H
#include <linux/netfilter/xt_dscp.h>
+#include <linux/types.h>
/* target info */
struct xt_DSCP_info {
- u_int8_t dscp;
+ __u8 dscp;
};
struct xt_tos_target_info {
- u_int8_t tos_value;
- u_int8_t tos_mask;
+ __u8 tos_value;
+ __u8 tos_mask;
};
#endif /* _XT_DSCP_TARGET_H */
diff --git a/include/linux/netfilter/xt_LED.h b/include/linux/netfilter/xt_LED.h
new file mode 100644
index 00000000000..f5509e7524d
--- /dev/null
+++ b/include/linux/netfilter/xt_LED.h
@@ -0,0 +1,15 @@
+#ifndef _XT_LED_H
+#define _XT_LED_H
+
+#include <linux/types.h>
+
+struct xt_led_info {
+ char id[27]; /* Unique ID for this trigger in the LED class */
+ __u8 always_blink; /* Blink even if the LED is already on */
+ __u32 delay; /* Delay until LED is switched off after trigger */
+
+ /* Kernel data used in the module */
+ void *internal_data __attribute__((aligned(8)));
+};
+
+#endif /* _XT_LED_H */
diff --git a/include/linux/netfilter/xt_MARK.h b/include/linux/netfilter/xt_MARK.h
index 778b278fd9f..028304bcc0b 100644
--- a/include/linux/netfilter/xt_MARK.h
+++ b/include/linux/netfilter/xt_MARK.h
@@ -1,6 +1,8 @@
#ifndef _XT_MARK_H_target
#define _XT_MARK_H_target
+#include <linux/types.h>
+
/* Version 0 */
struct xt_mark_target_info {
unsigned long mark;
@@ -15,11 +17,11 @@ enum {
struct xt_mark_target_info_v1 {
unsigned long mark;
- u_int8_t mode;
+ __u8 mode;
};
struct xt_mark_tginfo2 {
- u_int32_t mark, mask;
+ __u32 mark, mask;
};
#endif /*_XT_MARK_H_target */
diff --git a/include/linux/netfilter/xt_NFLOG.h b/include/linux/netfilter/xt_NFLOG.h
index 4b36aeb46a1..87b58311ce6 100644
--- a/include/linux/netfilter/xt_NFLOG.h
+++ b/include/linux/netfilter/xt_NFLOG.h
@@ -1,17 +1,19 @@
#ifndef _XT_NFLOG_TARGET
#define _XT_NFLOG_TARGET
+#include <linux/types.h>
+
#define XT_NFLOG_DEFAULT_GROUP 0x1
#define XT_NFLOG_DEFAULT_THRESHOLD 0
#define XT_NFLOG_MASK 0x0
struct xt_nflog_info {
- u_int32_t len;
- u_int16_t group;
- u_int16_t threshold;
- u_int16_t flags;
- u_int16_t pad;
+ __u32 len;
+ __u16 group;
+ __u16 threshold;
+ __u16 flags;
+ __u16 pad;
char prefix[64];
};
diff --git a/include/linux/netfilter/xt_NFQUEUE.h b/include/linux/netfilter/xt_NFQUEUE.h
index 9a9af79f74d..2584f4a777d 100644
--- a/include/linux/netfilter/xt_NFQUEUE.h
+++ b/include/linux/netfilter/xt_NFQUEUE.h
@@ -8,9 +8,16 @@
#ifndef _XT_NFQ_TARGET_H
#define _XT_NFQ_TARGET_H
+#include <linux/types.h>
+
/* target info */
struct xt_NFQ_info {
- u_int16_t queuenum;
+ __u16 queuenum;
+};
+
+struct xt_NFQ_info_v1 {
+ __u16 queuenum;
+ __u16 queues_total;
};
#endif /* _XT_NFQ_TARGET_H */
diff --git a/include/linux/netfilter/xt_RATEEST.h b/include/linux/netfilter/xt_RATEEST.h
index f79e3133cbe..6605e20ad8c 100644
--- a/include/linux/netfilter/xt_RATEEST.h
+++ b/include/linux/netfilter/xt_RATEEST.h
@@ -1,10 +1,12 @@
#ifndef _XT_RATEEST_TARGET_H
#define _XT_RATEEST_TARGET_H
+#include <linux/types.h>
+
struct xt_rateest_target_info {
char name[IFNAMSIZ];
- int8_t interval;
- u_int8_t ewma_log;
+ __s8 interval;
+ __u8 ewma_log;
/* Used internally by the kernel */
struct xt_rateest *est __attribute__((aligned(8)));
diff --git a/include/linux/netfilter/xt_SECMARK.h b/include/linux/netfilter/xt_SECMARK.h
index c53fbffa997..6fcd3448b18 100644
--- a/include/linux/netfilter/xt_SECMARK.h
+++ b/include/linux/netfilter/xt_SECMARK.h
@@ -1,6 +1,8 @@
#ifndef _XT_SECMARK_H_target
#define _XT_SECMARK_H_target
+#include <linux/types.h>
+
/*
* This is intended for use by various security subsystems (but not
* at the same time).
@@ -12,12 +14,12 @@
#define SECMARK_SELCTX_MAX 256
struct xt_secmark_target_selinux_info {
- u_int32_t selsid;
+ __u32 selsid;
char selctx[SECMARK_SELCTX_MAX];
};
struct xt_secmark_target_info {
- u_int8_t mode;
+ __u8 mode;
union {
struct xt_secmark_target_selinux_info sel;
} u;
diff --git a/include/linux/netfilter/xt_TCPMSS.h b/include/linux/netfilter/xt_TCPMSS.h
index 53a292cd47f..9a6960afc13 100644
--- a/include/linux/netfilter/xt_TCPMSS.h
+++ b/include/linux/netfilter/xt_TCPMSS.h
@@ -1,8 +1,10 @@
#ifndef _XT_TCPMSS_H
#define _XT_TCPMSS_H
+#include <linux/types.h>
+
struct xt_tcpmss_info {
- u_int16_t mss;
+ __u16 mss;
};
#define XT_TCPMSS_CLAMP_PMTU 0xffff
diff --git a/include/linux/netfilter/xt_cluster.h b/include/linux/netfilter/xt_cluster.h
new file mode 100644
index 00000000000..886682656f0
--- /dev/null
+++ b/include/linux/netfilter/xt_cluster.h
@@ -0,0 +1,17 @@
+#ifndef _XT_CLUSTER_MATCH_H
+#define _XT_CLUSTER_MATCH_H
+
+enum xt_cluster_flags {
+ XT_CLUSTER_F_INV = (1 << 0)
+};
+
+struct xt_cluster_match_info {
+ u_int32_t total_nodes;
+ u_int32_t node_mask;
+ u_int32_t hash_seed;
+ u_int32_t flags;
+};
+
+#define XT_CLUSTER_NODES_MAX 32
+
+#endif /* _XT_CLUSTER_MATCH_H */
diff --git a/include/linux/netfilter/xt_connbytes.h b/include/linux/netfilter/xt_connbytes.h
index c022c989754..52bd6153b99 100644
--- a/include/linux/netfilter/xt_connbytes.h
+++ b/include/linux/netfilter/xt_connbytes.h
@@ -1,6 +1,8 @@
#ifndef _XT_CONNBYTES_H
#define _XT_CONNBYTES_H
+#include <linux/types.h>
+
enum xt_connbytes_what {
XT_CONNBYTES_PKTS,
XT_CONNBYTES_BYTES,
@@ -19,7 +21,7 @@ struct xt_connbytes_info
aligned_u64 from; /* count to be matched */
aligned_u64 to; /* count to be matched */
} count;
- u_int8_t what; /* ipt_connbytes_what */
- u_int8_t direction; /* ipt_connbytes_direction */
+ __u8 what; /* ipt_connbytes_what */
+ __u8 direction; /* ipt_connbytes_direction */
};
#endif
diff --git a/include/linux/netfilter/xt_connmark.h b/include/linux/netfilter/xt_connmark.h
index 359ef86918d..571e266d004 100644
--- a/include/linux/netfilter/xt_connmark.h
+++ b/include/linux/netfilter/xt_connmark.h
@@ -1,6 +1,8 @@
#ifndef _XT_CONNMARK_H
#define _XT_CONNMARK_H
+#include <linux/types.h>
+
/* Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com>
* by Henrik Nordstrom <hno@marasystems.com>
*
@@ -12,12 +14,12 @@
struct xt_connmark_info {
unsigned long mark, mask;
- u_int8_t invert;
+ __u8 invert;
};
struct xt_connmark_mtinfo1 {
- u_int32_t mark, mask;
- u_int8_t invert;
+ __u32 mark, mask;
+ __u8 invert;
};
#endif /*_XT_CONNMARK_H*/
diff --git a/include/linux/netfilter/xt_conntrack.h b/include/linux/netfilter/xt_conntrack.h
index 8f534527539..3430c775194 100644
--- a/include/linux/netfilter/xt_conntrack.h
+++ b/include/linux/netfilter/xt_conntrack.h
@@ -63,9 +63,9 @@ struct xt_conntrack_info
unsigned long expires_min, expires_max;
/* Flags word */
- u_int8_t flags;
+ __u8 flags;
/* Inverse flags */
- u_int8_t invflags;
+ __u8 invflags;
};
struct xt_conntrack_mtinfo1 {
@@ -73,12 +73,12 @@ struct xt_conntrack_mtinfo1 {
union nf_inet_addr origdst_addr, origdst_mask;
union nf_inet_addr replsrc_addr, replsrc_mask;
union nf_inet_addr repldst_addr, repldst_mask;
- u_int32_t expires_min, expires_max;
- u_int16_t l4proto;
+ __u32 expires_min, expires_max;
+ __u16 l4proto;
__be16 origsrc_port, origdst_port;
__be16 replsrc_port, repldst_port;
- u_int16_t match_flags, invert_flags;
- u_int8_t state_mask, status_mask;
+ __u16 match_flags, invert_flags;
+ __u8 state_mask, status_mask;
};
#endif /*_XT_CONNTRACK_H*/
diff --git a/include/linux/netfilter/xt_dccp.h b/include/linux/netfilter/xt_dccp.h
index e0221b9d32c..a579e1b6f04 100644
--- a/include/linux/netfilter/xt_dccp.h
+++ b/include/linux/netfilter/xt_dccp.h
@@ -1,6 +1,8 @@
#ifndef _XT_DCCP_H_
#define _XT_DCCP_H_
+#include <linux/types.h>
+
#define XT_DCCP_SRC_PORTS 0x01
#define XT_DCCP_DEST_PORTS 0x02
#define XT_DCCP_TYPE 0x04
@@ -9,14 +11,14 @@
#define XT_DCCP_VALID_FLAGS 0x0f
struct xt_dccp_info {
- u_int16_t dpts[2]; /* Min, Max */
- u_int16_t spts[2]; /* Min, Max */
+ __u16 dpts[2]; /* Min, Max */
+ __u16 spts[2]; /* Min, Max */
- u_int16_t flags;
- u_int16_t invflags;
+ __u16 flags;
+ __u16 invflags;
- u_int16_t typemask;
- u_int8_t option;
+ __u16 typemask;
+ __u8 option;
};
#endif /* _XT_DCCP_H_ */
diff --git a/include/linux/netfilter/xt_dscp.h b/include/linux/netfilter/xt_dscp.h
index f49bc1a648d..15f8932ad5c 100644
--- a/include/linux/netfilter/xt_dscp.h
+++ b/include/linux/netfilter/xt_dscp.h
@@ -10,20 +10,22 @@
#ifndef _XT_DSCP_H
#define _XT_DSCP_H
+#include <linux/types.h>
+
#define XT_DSCP_MASK 0xfc /* 11111100 */
#define XT_DSCP_SHIFT 2
#define XT_DSCP_MAX 0x3f /* 00111111 */
/* match info */
struct xt_dscp_info {
- u_int8_t dscp;
- u_int8_t invert;
+ __u8 dscp;
+ __u8 invert;
};
struct xt_tos_match_info {
- u_int8_t tos_mask;
- u_int8_t tos_value;
- u_int8_t invert;
+ __u8 tos_mask;
+ __u8 tos_value;
+ __u8 invert;
};
#endif /* _XT_DSCP_H */
diff --git a/include/linux/netfilter/xt_esp.h b/include/linux/netfilter/xt_esp.h
index 9380fb1c27d..ef6fa4747d0 100644
--- a/include/linux/netfilter/xt_esp.h
+++ b/include/linux/netfilter/xt_esp.h
@@ -1,10 +1,12 @@
#ifndef _XT_ESP_H
#define _XT_ESP_H
+#include <linux/types.h>
+
struct xt_esp
{
- u_int32_t spis[2]; /* Security Parameter Index */
- u_int8_t invflags; /* Inverse flags */
+ __u32 spis[2]; /* Security Parameter Index */
+ __u8 invflags; /* Inverse flags */
};
/* Values for "invflags" field in struct xt_esp. */
diff --git a/include/linux/netfilter/xt_hashlimit.h b/include/linux/netfilter/xt_hashlimit.h
index 51b18d83b47..b1925b5925e 100644
--- a/include/linux/netfilter/xt_hashlimit.h
+++ b/include/linux/netfilter/xt_hashlimit.h
@@ -1,6 +1,8 @@
#ifndef _XT_HASHLIMIT_H
#define _XT_HASHLIMIT_H
+#include <linux/types.h>
+
/* timings are in milliseconds. */
#define XT_HASHLIMIT_SCALE 10000
/* 1/10,000 sec period => max of 10,000/sec. Min rate is then 429490
@@ -18,15 +20,15 @@ enum {
};
struct hashlimit_cfg {
- u_int32_t mode; /* bitmask of XT_HASHLIMIT_HASH_* */
- u_int32_t avg; /* Average secs between packets * scale */
- u_int32_t burst; /* Period multiplier for upper limit. */
+ __u32 mode; /* bitmask of XT_HASHLIMIT_HASH_* */
+ __u32 avg; /* Average secs between packets * scale */
+ __u32 burst; /* Period multiplier for upper limit. */
/* user specified */
- u_int32_t size; /* how many buckets */
- u_int32_t max; /* max number of entries */
- u_int32_t gc_interval; /* gc interval */
- u_int32_t expire; /* when do entries expire? */
+ __u32 size; /* how many buckets */
+ __u32 max; /* max number of entries */
+ __u32 gc_interval; /* gc interval */
+ __u32 expire; /* when do entries expire? */
};
struct xt_hashlimit_info {
@@ -42,17 +44,17 @@ struct xt_hashlimit_info {
};
struct hashlimit_cfg1 {
- u_int32_t mode; /* bitmask of XT_HASHLIMIT_HASH_* */
- u_int32_t avg; /* Average secs between packets * scale */
- u_int32_t burst; /* Period multiplier for upper limit. */
+ __u32 mode; /* bitmask of XT_HASHLIMIT_HASH_* */
+ __u32 avg; /* Average secs between packets * scale */
+ __u32 burst; /* Period multiplier for upper limit. */
/* user specified */
- u_int32_t size; /* how many buckets */
- u_int32_t max; /* max number of entries */
- u_int32_t gc_interval; /* gc interval */
- u_int32_t expire; /* when do entries expire? */
+ __u32 size; /* how many buckets */
+ __u32 max; /* max number of entries */
+ __u32 gc_interval; /* gc interval */
+ __u32 expire; /* when do entries expire? */
- u_int8_t srcmask, dstmask;
+ __u8 srcmask, dstmask;
};
struct xt_hashlimit_mtinfo1 {
diff --git a/include/linux/netfilter/xt_iprange.h b/include/linux/netfilter/xt_iprange.h
index a4299c7d368..c1f21a779a4 100644
--- a/include/linux/netfilter/xt_iprange.h
+++ b/include/linux/netfilter/xt_iprange.h
@@ -1,6 +1,8 @@
#ifndef _LINUX_NETFILTER_XT_IPRANGE_H
#define _LINUX_NETFILTER_XT_IPRANGE_H 1
+#include <linux/types.h>
+
enum {
IPRANGE_SRC = 1 << 0, /* match source IP address */
IPRANGE_DST = 1 << 1, /* match destination IP address */
@@ -11,7 +13,7 @@ enum {
struct xt_iprange_mtinfo {
union nf_inet_addr src_min, src_max;
union nf_inet_addr dst_min, dst_max;
- u_int8_t flags;
+ __u8 flags;
};
#endif /* _LINUX_NETFILTER_XT_IPRANGE_H */
diff --git a/include/linux/netfilter/xt_length.h b/include/linux/netfilter/xt_length.h
index 7c2b439f73f..b82ed7c4b1e 100644
--- a/include/linux/netfilter/xt_length.h
+++ b/include/linux/netfilter/xt_length.h
@@ -1,9 +1,11 @@
#ifndef _XT_LENGTH_H
#define _XT_LENGTH_H
+#include <linux/types.h>
+
struct xt_length_info {
- u_int16_t min, max;
- u_int8_t invert;
+ __u16 min, max;
+ __u8 invert;
};
#endif /*_XT_LENGTH_H*/
diff --git a/include/linux/netfilter/xt_limit.h b/include/linux/netfilter/xt_limit.h
index b3ce65375ec..bb47fc4d2ad 100644
--- a/include/linux/netfilter/xt_limit.h
+++ b/include/linux/netfilter/xt_limit.h
@@ -1,21 +1,24 @@
#ifndef _XT_RATE_H
#define _XT_RATE_H
+#include <linux/types.h>
+
/* timings are in milliseconds. */
#define XT_LIMIT_SCALE 10000
+struct xt_limit_priv;
+
/* 1/10,000 sec period => max of 10,000/sec. Min rate is then 429490
seconds, or one every 59 hours. */
struct xt_rateinfo {
- u_int32_t avg; /* Average secs between packets * scale */
- u_int32_t burst; /* Period multiplier for upper limit. */
+ __u32 avg; /* Average secs between packets * scale */
+ __u32 burst; /* Period multiplier for upper limit. */
/* Used internally by the kernel */
- unsigned long prev;
- u_int32_t credit;
- u_int32_t credit_cap, cost;
+ unsigned long prev; /* moved to xt_limit_priv */
+ __u32 credit; /* moved to xt_limit_priv */
+ __u32 credit_cap, cost;
- /* Ugly, ugly fucker. */
- struct xt_rateinfo *master;
+ struct xt_limit_priv *master;
};
#endif /*_XT_RATE_H*/
diff --git a/include/linux/netfilter/xt_mark.h b/include/linux/netfilter/xt_mark.h
index fae74bc3f34..6fa460a3cc2 100644
--- a/include/linux/netfilter/xt_mark.h
+++ b/include/linux/netfilter/xt_mark.h
@@ -1,14 +1,16 @@
#ifndef _XT_MARK_H
#define _XT_MARK_H
+#include <linux/types.h>
+
struct xt_mark_info {
unsigned long mark, mask;
- u_int8_t invert;
+ __u8 invert;
};
struct xt_mark_mtinfo1 {
- u_int32_t mark, mask;
- u_int8_t invert;
+ __u32 mark, mask;
+ __u8 invert;
};
#endif /*_XT_MARK_H*/
diff --git a/include/linux/netfilter/xt_multiport.h b/include/linux/netfilter/xt_multiport.h
index d49ee418371..185db499fcb 100644
--- a/include/linux/netfilter/xt_multiport.h
+++ b/include/linux/netfilter/xt_multiport.h
@@ -1,6 +1,8 @@
#ifndef _XT_MULTIPORT_H
#define _XT_MULTIPORT_H
+#include <linux/types.h>
+
enum xt_multiport_flags
{
XT_MULTIPORT_SOURCE,
@@ -13,18 +15,18 @@ enum xt_multiport_flags
/* Must fit inside union xt_matchinfo: 16 bytes */
struct xt_multiport
{
- u_int8_t flags; /* Type of comparison */
- u_int8_t count; /* Number of ports */
- u_int16_t ports[XT_MULTI_PORTS]; /* Ports */
+ __u8 flags; /* Type of comparison */
+ __u8 count; /* Number of ports */
+ __u16 ports[XT_MULTI_PORTS]; /* Ports */
};
struct xt_multiport_v1
{
- u_int8_t flags; /* Type of comparison */
- u_int8_t count; /* Number of ports */
- u_int16_t ports[XT_MULTI_PORTS]; /* Ports */
- u_int8_t pflags[XT_MULTI_PORTS]; /* Port flags */
- u_int8_t invert; /* Invert flag */
+ __u8 flags; /* Type of comparison */
+ __u8 count; /* Number of ports */
+ __u16 ports[XT_MULTI_PORTS]; /* Ports */
+ __u8 pflags[XT_MULTI_PORTS]; /* Port flags */
+ __u8 invert; /* Invert flag */
};
#endif /*_XT_MULTIPORT_H*/
diff --git a/include/linux/netfilter/xt_osf.h b/include/linux/netfilter/xt_osf.h
new file mode 100644
index 00000000000..fd2272e0959
--- /dev/null
+++ b/include/linux/netfilter/xt_osf.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2003+ Evgeniy Polyakov <johnpol@2ka.mxt.ru>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _XT_OSF_H
+#define _XT_OSF_H
+
+#define MAXGENRELEN 32
+
+#define XT_OSF_GENRE (1<<0)
+#define XT_OSF_TTL (1<<1)
+#define XT_OSF_LOG (1<<2)
+#define XT_OSF_INVERT (1<<3)
+
+#define XT_OSF_LOGLEVEL_ALL 0 /* log all matched fingerprints */
+#define XT_OSF_LOGLEVEL_FIRST 1 /* log only the first matced fingerprint */
+#define XT_OSF_LOGLEVEL_ALL_KNOWN 2 /* do not log unknown packets */
+
+#define XT_OSF_TTL_TRUE 0 /* True ip and fingerprint TTL comparison */
+#define XT_OSF_TTL_LESS 1 /* Check if ip TTL is less than fingerprint one */
+#define XT_OSF_TTL_NOCHECK 2 /* Do not compare ip and fingerprint TTL at all */
+
+struct xt_osf_info {
+ char genre[MAXGENRELEN];
+ __u32 len;
+ __u32 flags;
+ __u32 loglevel;
+ __u32 ttl;
+};
+
+/*
+ * Wildcard MSS (kind of).
+ * It is used to implement a state machine for the different wildcard values
+ * of the MSS and window sizes.
+ */
+struct xt_osf_wc {
+ __u32 wc;
+ __u32 val;
+};
+
+/*
+ * This struct represents IANA options
+ * http://www.iana.org/assignments/tcp-parameters
+ */
+struct xt_osf_opt {
+ __u16 kind, length;
+ struct xt_osf_wc wc;
+};
+
+struct xt_osf_user_finger {
+ struct xt_osf_wc wss;
+
+ __u8 ttl, df;
+ __u16 ss, mss;
+ __u16 opt_num;
+
+ char genre[MAXGENRELEN];
+ char version[MAXGENRELEN];
+ char subtype[MAXGENRELEN];
+
+ /* MAX_IPOPTLEN is maximum if all options are NOPs or EOLs */
+ struct xt_osf_opt opt[MAX_IPOPTLEN];
+};
+
+struct xt_osf_nlmsg {
+ struct xt_osf_user_finger f;
+ struct iphdr ip;
+ struct tcphdr tcp;
+};
+
+/* Defines for IANA option kinds */
+
+enum iana_options {
+ OSFOPT_EOL = 0, /* End of options */
+ OSFOPT_NOP, /* NOP */
+ OSFOPT_MSS, /* Maximum segment size */
+ OSFOPT_WSO, /* Window scale option */
+ OSFOPT_SACKP, /* SACK permitted */
+ OSFOPT_SACK, /* SACK */
+ OSFOPT_ECHO,
+ OSFOPT_ECHOREPLY,
+ OSFOPT_TS, /* Timestamp option */
+ OSFOPT_POCP, /* Partial Order Connection Permitted */
+ OSFOPT_POSP, /* Partial Order Service Profile */
+
+ /* Others are not used in the current OSF */
+ OSFOPT_EMPTY = 255,
+};
+
+/*
+ * Initial window size option state machine: multiple of mss, mtu or
+ * plain numeric value. Can also be made as plain numeric value which
+ * is not a multiple of specified value.
+ */
+enum xt_osf_window_size_options {
+ OSF_WSS_PLAIN = 0,
+ OSF_WSS_MSS,
+ OSF_WSS_MTU,
+ OSF_WSS_MODULO,
+ OSF_WSS_MAX,
+};
+
+/*
+ * Add/remove fingerprint from the kernel.
+ */
+enum xt_osf_msg_types {
+ OSF_MSG_ADD,
+ OSF_MSG_REMOVE,
+ OSF_MSG_MAX,
+};
+
+enum xt_osf_attr_type {
+ OSF_ATTR_UNSPEC,
+ OSF_ATTR_FINGER,
+ OSF_ATTR_MAX,
+};
+
+#endif /* _XT_OSF_H */
diff --git a/include/linux/netfilter/xt_owner.h b/include/linux/netfilter/xt_owner.h
index c84e52cfe41..2081761714b 100644
--- a/include/linux/netfilter/xt_owner.h
+++ b/include/linux/netfilter/xt_owner.h
@@ -1,6 +1,8 @@
#ifndef _XT_OWNER_MATCH_H
#define _XT_OWNER_MATCH_H
+#include <linux/types.h>
+
enum {
XT_OWNER_UID = 1 << 0,
XT_OWNER_GID = 1 << 1,
@@ -8,9 +10,9 @@ enum {
};
struct xt_owner_match_info {
- u_int32_t uid_min, uid_max;
- u_int32_t gid_min, gid_max;
- u_int8_t match, invert;
+ __u32 uid_min, uid_max;
+ __u32 gid_min, gid_max;
+ __u8 match, invert;
};
#endif /* _XT_OWNER_MATCH_H */
diff --git a/include/linux/netfilter/xt_physdev.h b/include/linux/netfilter/xt_physdev.h
index 25a7a1815b5..8555e399886 100644
--- a/include/linux/netfilter/xt_physdev.h
+++ b/include/linux/netfilter/xt_physdev.h
@@ -1,6 +1,8 @@
#ifndef _XT_PHYSDEV_H
#define _XT_PHYSDEV_H
+#include <linux/types.h>
+
#ifdef __KERNEL__
#include <linux/if.h>
#endif
@@ -17,8 +19,8 @@ struct xt_physdev_info {
char in_mask[IFNAMSIZ];
char physoutdev[IFNAMSIZ];
char out_mask[IFNAMSIZ];
- u_int8_t invert;
- u_int8_t bitmask;
+ __u8 invert;
+ __u8 bitmask;
};
#endif /*_XT_PHYSDEV_H*/
diff --git a/include/linux/netfilter/xt_policy.h b/include/linux/netfilter/xt_policy.h
index 053d8cc6546..7bb64e7c853 100644
--- a/include/linux/netfilter/xt_policy.h
+++ b/include/linux/netfilter/xt_policy.h
@@ -1,6 +1,8 @@
#ifndef _XT_POLICY_H
#define _XT_POLICY_H
+#include <linux/types.h>
+
#define XT_POLICY_MAX_ELEM 4
enum xt_policy_flags
@@ -19,7 +21,7 @@ enum xt_policy_modes
struct xt_policy_spec
{
- u_int8_t saddr:1,
+ __u8 saddr:1,
daddr:1,
proto:1,
mode:1,
@@ -55,9 +57,9 @@ struct xt_policy_elem
#endif
};
__be32 spi;
- u_int32_t reqid;
- u_int8_t proto;
- u_int8_t mode;
+ __u32 reqid;
+ __u8 proto;
+ __u8 mode;
struct xt_policy_spec match;
struct xt_policy_spec invert;
@@ -66,8 +68,8 @@ struct xt_policy_elem
struct xt_policy_info
{
struct xt_policy_elem pol[XT_POLICY_MAX_ELEM];
- u_int16_t flags;
- u_int16_t len;
+ __u16 flags;
+ __u16 len;
};
#endif /* _XT_POLICY_H */
diff --git a/include/linux/netfilter/xt_quota.h b/include/linux/netfilter/xt_quota.h
index 4c8368d781e..8dc89dfc136 100644
--- a/include/linux/netfilter/xt_quota.h
+++ b/include/linux/netfilter/xt_quota.h
@@ -6,13 +6,15 @@ enum xt_quota_flags {
};
#define XT_QUOTA_MASK 0x1
+struct xt_quota_priv;
+
struct xt_quota_info {
u_int32_t flags;
u_int32_t pad;
/* Used internally by the kernel */
aligned_u64 quota;
- struct xt_quota_info *master;
+ struct xt_quota_priv *master;
};
#endif /* _XT_QUOTA_H */
diff --git a/include/linux/netfilter/xt_rateest.h b/include/linux/netfilter/xt_rateest.h
index 2010cb74250..d40a6196842 100644
--- a/include/linux/netfilter/xt_rateest.h
+++ b/include/linux/netfilter/xt_rateest.h
@@ -1,6 +1,8 @@
#ifndef _XT_RATEEST_MATCH_H
#define _XT_RATEEST_MATCH_H
+#include <linux/types.h>
+
enum xt_rateest_match_flags {
XT_RATEEST_MATCH_INVERT = 1<<0,
XT_RATEEST_MATCH_ABS = 1<<1,
@@ -20,12 +22,12 @@ enum xt_rateest_match_mode {
struct xt_rateest_match_info {
char name1[IFNAMSIZ];
char name2[IFNAMSIZ];
- u_int16_t flags;
- u_int16_t mode;
- u_int32_t bps1;
- u_int32_t pps1;
- u_int32_t bps2;
- u_int32_t pps2;
+ __u16 flags;
+ __u16 mode;
+ __u32 bps1;
+ __u32 pps1;
+ __u32 bps2;
+ __u32 pps2;
/* Used internally by the kernel */
struct xt_rateest *est1 __attribute__((aligned(8)));
diff --git a/include/linux/netfilter/xt_realm.h b/include/linux/netfilter/xt_realm.h
index 220e8724571..d4a82ee56a0 100644
--- a/include/linux/netfilter/xt_realm.h
+++ b/include/linux/netfilter/xt_realm.h
@@ -1,10 +1,12 @@
#ifndef _XT_REALM_H
#define _XT_REALM_H
+#include <linux/types.h>
+
struct xt_realm_info {
- u_int32_t id;
- u_int32_t mask;
- u_int8_t invert;
+ __u32 id;
+ __u32 mask;
+ __u8 invert;
};
#endif /* _XT_REALM_H */
diff --git a/include/linux/netfilter/xt_recent.h b/include/linux/netfilter/xt_recent.h
index 5cfeb81c679..d2c27660992 100644
--- a/include/linux/netfilter/xt_recent.h
+++ b/include/linux/netfilter/xt_recent.h
@@ -1,6 +1,8 @@
#ifndef _LINUX_NETFILTER_XT_RECENT_H
#define _LINUX_NETFILTER_XT_RECENT_H 1
+#include <linux/types.h>
+
enum {
XT_RECENT_CHECK = 1 << 0,
XT_RECENT_SET = 1 << 1,
@@ -15,12 +17,12 @@ enum {
};
struct xt_recent_mtinfo {
- u_int32_t seconds;
- u_int32_t hit_count;
- u_int8_t check_set;
- u_int8_t invert;
+ __u32 seconds;
+ __u32 hit_count;
+ __u8 check_set;
+ __u8 invert;
char name[XT_RECENT_NAME_LEN];
- u_int8_t side;
+ __u8 side;
};
#endif /* _LINUX_NETFILTER_XT_RECENT_H */
diff --git a/include/linux/netfilter/xt_sctp.h b/include/linux/netfilter/xt_sctp.h
index 32000ba6ece..29287be696a 100644
--- a/include/linux/netfilter/xt_sctp.h
+++ b/include/linux/netfilter/xt_sctp.h
@@ -1,6 +1,8 @@
#ifndef _XT_SCTP_H_
#define _XT_SCTP_H_
+#include <linux/types.h>
+
#define XT_SCTP_SRC_PORTS 0x01
#define XT_SCTP_DEST_PORTS 0x02
#define XT_SCTP_CHUNK_TYPES 0x04
@@ -8,49 +10,49 @@
#define XT_SCTP_VALID_FLAGS 0x07
struct xt_sctp_flag_info {
- u_int8_t chunktype;
- u_int8_t flag;
- u_int8_t flag_mask;
+ __u8 chunktype;
+ __u8 flag;
+ __u8 flag_mask;
};
#define XT_NUM_SCTP_FLAGS 4
struct xt_sctp_info {
- u_int16_t dpts[2]; /* Min, Max */
- u_int16_t spts[2]; /* Min, Max */
+ __u16 dpts[2]; /* Min, Max */
+ __u16 spts[2]; /* Min, Max */
- u_int32_t chunkmap[256 / sizeof (u_int32_t)]; /* Bit mask of chunks to be matched according to RFC 2960 */
+ __u32 chunkmap[256 / sizeof (__u32)]; /* Bit mask of chunks to be matched according to RFC 2960 */
#define SCTP_CHUNK_MATCH_ANY 0x01 /* Match if any of the chunk types are present */
#define SCTP_CHUNK_MATCH_ALL 0x02 /* Match if all of the chunk types are present */
#define SCTP_CHUNK_MATCH_ONLY 0x04 /* Match if these are the only chunk types present */
- u_int32_t chunk_match_type;
+ __u32 chunk_match_type;
struct xt_sctp_flag_info flag_info[XT_NUM_SCTP_FLAGS];
int flag_count;
- u_int32_t flags;
- u_int32_t invflags;
+ __u32 flags;
+ __u32 invflags;
};
#define bytes(type) (sizeof(type) * 8)
#define SCTP_CHUNKMAP_SET(chunkmap, type) \
do { \
- (chunkmap)[type / bytes(u_int32_t)] |= \
- 1 << (type % bytes(u_int32_t)); \
+ (chunkmap)[type / bytes(__u32)] |= \
+ 1 << (type % bytes(__u32)); \
} while (0)
#define SCTP_CHUNKMAP_CLEAR(chunkmap, type) \
do { \
- (chunkmap)[type / bytes(u_int32_t)] &= \
- ~(1 << (type % bytes(u_int32_t))); \
+ (chunkmap)[type / bytes(__u32)] &= \
+ ~(1 << (type % bytes(__u32))); \
} while (0)
#define SCTP_CHUNKMAP_IS_SET(chunkmap, type) \
({ \
- ((chunkmap)[type / bytes (u_int32_t)] & \
- (1 << (type % bytes (u_int32_t)))) ? 1: 0; \
+ ((chunkmap)[type / bytes (__u32)] & \
+ (1 << (type % bytes (__u32)))) ? 1: 0; \
})
#define SCTP_CHUNKMAP_RESET(chunkmap) \
@@ -65,7 +67,7 @@ struct xt_sctp_info {
#define SCTP_CHUNKMAP_IS_CLEAR(chunkmap) \
__sctp_chunkmap_is_clear((chunkmap), ARRAY_SIZE(chunkmap))
static inline bool
-__sctp_chunkmap_is_clear(const u_int32_t *chunkmap, unsigned int n)
+__sctp_chunkmap_is_clear(const __u32 *chunkmap, unsigned int n)
{
unsigned int i;
for (i = 0; i < n; ++i)
@@ -77,7 +79,7 @@ __sctp_chunkmap_is_clear(const u_int32_t *chunkmap, unsigned int n)
#define SCTP_CHUNKMAP_IS_ALL_SET(chunkmap) \
__sctp_chunkmap_is_all_set((chunkmap), ARRAY_SIZE(chunkmap))
static inline bool
-__sctp_chunkmap_is_all_set(const u_int32_t *chunkmap, unsigned int n)
+__sctp_chunkmap_is_all_set(const __u32 *chunkmap, unsigned int n)
{
unsigned int i;
for (i = 0; i < n; ++i)
diff --git a/include/linux/netfilter/xt_socket.h b/include/linux/netfilter/xt_socket.h
new file mode 100644
index 00000000000..6f475b8ff34
--- /dev/null
+++ b/include/linux/netfilter/xt_socket.h
@@ -0,0 +1,12 @@
+#ifndef _XT_SOCKET_H
+#define _XT_SOCKET_H
+
+enum {
+ XT_SOCKET_TRANSPARENT = 1 << 0,
+};
+
+struct xt_socket_mtinfo1 {
+ __u8 flags;
+};
+
+#endif /* _XT_SOCKET_H */
diff --git a/include/linux/netfilter/xt_statistic.h b/include/linux/netfilter/xt_statistic.h
index 3d38bc97504..4e983ef0c96 100644
--- a/include/linux/netfilter/xt_statistic.h
+++ b/include/linux/netfilter/xt_statistic.h
@@ -1,6 +1,8 @@
#ifndef _XT_STATISTIC_H
#define _XT_STATISTIC_H
+#include <linux/types.h>
+
enum xt_statistic_mode {
XT_STATISTIC_MODE_RANDOM,
XT_STATISTIC_MODE_NTH,
@@ -13,21 +15,22 @@ enum xt_statistic_flags {
};
#define XT_STATISTIC_MASK 0x1
+struct xt_statistic_priv;
+
struct xt_statistic_info {
- u_int16_t mode;
- u_int16_t flags;
+ __u16 mode;
+ __u16 flags;
union {
struct {
- u_int32_t probability;
+ __u32 probability;
} random;
struct {
- u_int32_t every;
- u_int32_t packet;
- /* Used internally by the kernel */
- u_int32_t count;
+ __u32 every;
+ __u32 packet;
+ __u32 count; /* unused */
} nth;
} u;
- struct xt_statistic_info *master __attribute__((aligned(8)));
+ struct xt_statistic_priv *master __attribute__((aligned(8)));
};
#endif /* _XT_STATISTIC_H */
diff --git a/include/linux/netfilter/xt_string.h b/include/linux/netfilter/xt_string.h
index 8a6ba7bbef9..ecbb95fc89e 100644
--- a/include/linux/netfilter/xt_string.h
+++ b/include/linux/netfilter/xt_string.h
@@ -1,6 +1,8 @@
#ifndef _XT_STRING_H
#define _XT_STRING_H
+#include <linux/types.h>
+
#define XT_STRING_MAX_PATTERN_SIZE 128
#define XT_STRING_MAX_ALGO_NAME_SIZE 16
@@ -11,18 +13,18 @@ enum {
struct xt_string_info
{
- u_int16_t from_offset;
- u_int16_t to_offset;
+ __u16 from_offset;
+ __u16 to_offset;
char algo[XT_STRING_MAX_ALGO_NAME_SIZE];
char pattern[XT_STRING_MAX_PATTERN_SIZE];
- u_int8_t patlen;
+ __u8 patlen;
union {
struct {
- u_int8_t invert;
+ __u8 invert;
} v0;
struct {
- u_int8_t flags;
+ __u8 flags;
} v1;
} u;
diff --git a/include/linux/netfilter/xt_tcpmss.h b/include/linux/netfilter/xt_tcpmss.h
index e03274c4c79..fbac56b9e66 100644
--- a/include/linux/netfilter/xt_tcpmss.h
+++ b/include/linux/netfilter/xt_tcpmss.h
@@ -1,9 +1,11 @@
#ifndef _XT_TCPMSS_MATCH_H
#define _XT_TCPMSS_MATCH_H
+#include <linux/types.h>
+
struct xt_tcpmss_match_info {
- u_int16_t mss_min, mss_max;
- u_int8_t invert;
+ __u16 mss_min, mss_max;
+ __u8 invert;
};
#endif /*_XT_TCPMSS_MATCH_H*/
diff --git a/include/linux/netfilter/xt_tcpudp.h b/include/linux/netfilter/xt_tcpudp.h
index 78bc65f11ad..a490a0bc1d2 100644
--- a/include/linux/netfilter/xt_tcpudp.h
+++ b/include/linux/netfilter/xt_tcpudp.h
@@ -1,15 +1,17 @@
#ifndef _XT_TCPUDP_H
#define _XT_TCPUDP_H
+#include <linux/types.h>
+
/* TCP matching stuff */
struct xt_tcp
{
- u_int16_t spts[2]; /* Source port range. */
- u_int16_t dpts[2]; /* Destination port range. */
- u_int8_t option; /* TCP Option iff non-zero*/
- u_int8_t flg_mask; /* TCP flags mask byte */
- u_int8_t flg_cmp; /* TCP flags compare byte */
- u_int8_t invflags; /* Inverse flags */
+ __u16 spts[2]; /* Source port range. */
+ __u16 dpts[2]; /* Destination port range. */
+ __u8 option; /* TCP Option iff non-zero*/
+ __u8 flg_mask; /* TCP flags mask byte */
+ __u8 flg_cmp; /* TCP flags compare byte */
+ __u8 invflags; /* Inverse flags */
};
/* Values for "inv" field in struct ipt_tcp. */
@@ -22,9 +24,9 @@ struct xt_tcp
/* UDP matching stuff */
struct xt_udp
{
- u_int16_t spts[2]; /* Source port range. */
- u_int16_t dpts[2]; /* Destination port range. */
- u_int8_t invflags; /* Inverse flags */
+ __u16 spts[2]; /* Source port range. */
+ __u16 dpts[2]; /* Destination port range. */
+ __u8 invflags; /* Inverse flags */
};
/* Values for "invflags" field in struct ipt_udp. */
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index 499aa937590..f8105e54716 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -59,9 +59,9 @@ static inline int nf_bridge_maybe_copy_header(struct sk_buff *skb)
static inline unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb)
{
switch (skb->protocol) {
- case __constant_htons(ETH_P_8021Q):
+ case __cpu_to_be16(ETH_P_8021Q):
return VLAN_HLEN;
- case __constant_htons(ETH_P_PPP_SES):
+ case __cpu_to_be16(ETH_P_PPP_SES):
return PPPOE_SES_HLEN;
default:
return 0;
diff --git a/include/linux/netfilter_ipv4/ipt_owner.h b/include/linux/netfilter_ipv4/ipt_owner.h
index 92f4bdac54e..a78445be999 100644
--- a/include/linux/netfilter_ipv4/ipt_owner.h
+++ b/include/linux/netfilter_ipv4/ipt_owner.h
@@ -9,10 +9,10 @@
#define IPT_OWNER_COMM 0x10
struct ipt_owner_info {
- uid_t uid;
- gid_t gid;
- pid_t pid;
- pid_t sid;
+ __kernel_uid32_t uid;
+ __kernel_gid32_t gid;
+ __kernel_pid_t pid;
+ __kernel_pid_t sid;
char comm[16];
u_int8_t match, invert; /* flags */
};
diff --git a/include/linux/netfilter_ipv6/Kbuild b/include/linux/netfilter_ipv6/Kbuild
index 8887a5fcd1d..aca4bd1f6d7 100644
--- a/include/linux/netfilter_ipv6/Kbuild
+++ b/include/linux/netfilter_ipv6/Kbuild
@@ -11,6 +11,7 @@ header-y += ip6t_length.h
header-y += ip6t_limit.h
header-y += ip6t_mac.h
header-y += ip6t_mark.h
+header-y += ip6t_mh.h
header-y += ip6t_multiport.h
header-y += ip6t_opts.h
header-y += ip6t_owner.h
diff --git a/include/linux/netfilter_ipv6/ip6t_owner.h b/include/linux/netfilter_ipv6/ip6t_owner.h
index 19937da3d10..ec5cc7a38c4 100644
--- a/include/linux/netfilter_ipv6/ip6t_owner.h
+++ b/include/linux/netfilter_ipv6/ip6t_owner.h
@@ -8,10 +8,10 @@
#define IP6T_OWNER_SID 0x08
struct ip6t_owner_info {
- uid_t uid;
- gid_t gid;
- pid_t pid;
- pid_t sid;
+ __kernel_uid32_t uid;
+ __kernel_gid32_t gid;
+ __kernel_pid_t pid;
+ __kernel_pid_t sid;
u_int8_t match, invert; /* flags */
};
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 51b09a1f46c..5ba398e9030 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -103,6 +103,8 @@ struct nlmsgerr
#define NETLINK_ADD_MEMBERSHIP 1
#define NETLINK_DROP_MEMBERSHIP 2
#define NETLINK_PKTINFO 3
+#define NETLINK_BROADCAST_ERROR 4
+#define NETLINK_NO_ENOBUFS 5
struct nl_pktinfo
{
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index e38d3c9dccd..2524267210d 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -18,7 +18,7 @@ struct netpoll {
const char *name;
void (*rx_hook)(struct netpoll *, int, char *, int);
- u32 local_ip, remote_ip;
+ __be32 local_ip, remote_ip;
u16 local_port, remote_port;
u8 remote_mac[ETH_ALEN];
};
@@ -63,6 +63,13 @@ static inline int netpoll_rx(struct sk_buff *skb)
return ret;
}
+static inline int netpoll_rx_on(struct sk_buff *skb)
+{
+ struct netpoll_info *npinfo = skb->dev->npinfo;
+
+ return npinfo && (npinfo->rx_np || npinfo->rx_flags);
+}
+
static inline int netpoll_receive_skb(struct sk_buff *skb)
{
if (!list_empty(&skb->dev->napi_list))
@@ -99,6 +106,10 @@ static inline int netpoll_rx(struct sk_buff *skb)
{
return 0;
}
+static inline int netpoll_rx_on(struct sk_buff *skb)
+{
+ return 0;
+}
static inline int netpoll_receive_skb(struct sk_buff *skb)
{
return 0;
diff --git a/include/linux/nfs.h b/include/linux/nfs.h
index 54af92c1c70..f387919bbc5 100644
--- a/include/linux/nfs.h
+++ b/include/linux/nfs.h
@@ -25,8 +25,9 @@
#define NFSMODE_SOCK 0140000
#define NFSMODE_FIFO 0010000
-#define NFS_MNT_PROGRAM 100005
-#define NFS_MNT_PORT 627
+#define NFS_MNT_PROGRAM 100005
+#define NFS_MNT_VERSION 1
+#define NFS_MNT3_VERSION 3
/*
* NFS stats. The good thing with these values is that NFSv3 errors are
@@ -109,7 +110,6 @@
NFSERR_FILE_OPEN = 10046, /* v4 */
NFSERR_ADMIN_REVOKED = 10047, /* v4 */
NFSERR_CB_PATH_DOWN = 10048, /* v4 */
- NFSERR_REPLAY_ME = 10049 /* v4 */
};
/* NFSv2 file types - beware, these are not the same in NFSv3 */
diff --git a/include/linux/nfs2.h b/include/linux/nfs2.h
index 0ed9517138f..fde24b30cc9 100644
--- a/include/linux/nfs2.h
+++ b/include/linux/nfs2.h
@@ -64,11 +64,4 @@ struct nfs2_fh {
#define NFSPROC_READDIR 16
#define NFSPROC_STATFS 17
-#define NFS_MNT_PROGRAM 100005
-#define NFS_MNT_VERSION 1
-#define MNTPROC_NULL 0
-#define MNTPROC_MNT 1
-#define MNTPROC_UMNT 3
-#define MNTPROC_UMNTALL 4
-
#endif /* _LINUX_NFS2_H */
diff --git a/include/linux/nfs3.h b/include/linux/nfs3.h
index 539f3b550ea..ac33806ec7f 100644
--- a/include/linux/nfs3.h
+++ b/include/linux/nfs3.h
@@ -88,12 +88,7 @@ struct nfs3_fh {
#define NFS3PROC_PATHCONF 20
#define NFS3PROC_COMMIT 21
-#define NFS_MNT3_PROGRAM 100005
#define NFS_MNT3_VERSION 3
-#define MOUNTPROC3_NULL 0
-#define MOUNTPROC3_MNT 1
-#define MOUNTPROC3_UMNT 3
-#define MOUNTPROC3_UMNTALL 4
#if defined(__KERNEL__)
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index b912311a56b..bd2eba53066 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -21,6 +21,8 @@
#define NFS4_FHSIZE 128
#define NFS4_MAXPATHLEN PATH_MAX
#define NFS4_MAXNAMLEN NAME_MAX
+#define NFS4_OPAQUE_LIMIT 1024
+#define NFS4_MAX_SESSIONID_LEN 16
#define NFS4_ACCESS_READ 0x0001
#define NFS4_ACCESS_LOOKUP 0x0002
@@ -38,6 +40,7 @@
#define NFS4_OPEN_RESULT_CONFIRM 0x0002
#define NFS4_OPEN_RESULT_LOCKTYPE_POSIX 0x0004
+#define NFS4_SHARE_ACCESS_MASK 0x000F
#define NFS4_SHARE_ACCESS_READ 0x0001
#define NFS4_SHARE_ACCESS_WRITE 0x0002
#define NFS4_SHARE_ACCESS_BOTH 0x0003
@@ -45,6 +48,19 @@
#define NFS4_SHARE_DENY_WRITE 0x0002
#define NFS4_SHARE_DENY_BOTH 0x0003
+/* nfs41 */
+#define NFS4_SHARE_WANT_MASK 0xFF00
+#define NFS4_SHARE_WANT_NO_PREFERENCE 0x0000
+#define NFS4_SHARE_WANT_READ_DELEG 0x0100
+#define NFS4_SHARE_WANT_WRITE_DELEG 0x0200
+#define NFS4_SHARE_WANT_ANY_DELEG 0x0300
+#define NFS4_SHARE_WANT_NO_DELEG 0x0400
+#define NFS4_SHARE_WANT_CANCEL 0x0500
+
+#define NFS4_SHARE_WHEN_MASK 0xF0000
+#define NFS4_SHARE_SIGNAL_DELEG_WHEN_RESRC_AVAIL 0x10000
+#define NFS4_SHARE_PUSH_DELEG_WHEN_UNCONTENDED 0x20000
+
#define NFS4_SET_TO_SERVER_TIME 0
#define NFS4_SET_TO_CLIENT_TIME 1
@@ -88,8 +104,43 @@
#define NFS4_ACE_GENERIC_EXECUTE 0x001200A0
#define NFS4_ACE_MASK_ALL 0x001F01FF
+#define EXCHGID4_FLAG_SUPP_MOVED_REFER 0x00000001
+#define EXCHGID4_FLAG_SUPP_MOVED_MIGR 0x00000002
+#define EXCHGID4_FLAG_USE_NON_PNFS 0x00010000
+#define EXCHGID4_FLAG_USE_PNFS_MDS 0x00020000
+#define EXCHGID4_FLAG_USE_PNFS_DS 0x00040000
+#define EXCHGID4_FLAG_UPD_CONFIRMED_REC_A 0x40000000
+#define EXCHGID4_FLAG_CONFIRMED_R 0x80000000
+/*
+ * Since the validity of these bits depends on whether
+ * they're set in the argument or response, have separate
+ * invalid flag masks for arg (_A) and resp (_R).
+ */
+#define EXCHGID4_FLAG_MASK_A 0x40070003
+#define EXCHGID4_FLAG_MASK_R 0x80070003
+
+#define SEQ4_STATUS_CB_PATH_DOWN 0x00000001
+#define SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRING 0x00000002
+#define SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRED 0x00000004
+#define SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED 0x00000008
+#define SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED 0x00000010
+#define SEQ4_STATUS_ADMIN_STATE_REVOKED 0x00000020
+#define SEQ4_STATUS_RECALLABLE_STATE_REVOKED 0x00000040
+#define SEQ4_STATUS_LEASE_MOVED 0x00000080
+#define SEQ4_STATUS_RESTART_RECLAIM_NEEDED 0x00000100
+
#define NFS4_MAX_UINT64 (~(u64)0)
+/* An NFS4 sessions server must support at least NFS4_MAX_OPS operations.
+ * If a compound requires more operations, adjust NFS4_MAX_OPS accordingly.
+ */
+#define NFS4_MAX_OPS 8
+
+/* Our NFS4 client back channel server only wants the cb_sequene and the
+ * actual operation per compound
+ */
+#define NFS4_MAX_BACK_CHANNEL_OPS 2
+
enum nfs4_acl_whotype {
NFS4_ACL_WHO_NAMED = 0,
NFS4_ACL_WHO_OWNER,
@@ -154,6 +205,28 @@ enum nfs_opnum4 {
OP_VERIFY = 37,
OP_WRITE = 38,
OP_RELEASE_LOCKOWNER = 39,
+
+ /* nfs41 */
+ OP_BACKCHANNEL_CTL = 40,
+ OP_BIND_CONN_TO_SESSION = 41,
+ OP_EXCHANGE_ID = 42,
+ OP_CREATE_SESSION = 43,
+ OP_DESTROY_SESSION = 44,
+ OP_FREE_STATEID = 45,
+ OP_GET_DIR_DELEGATION = 46,
+ OP_GETDEVICEINFO = 47,
+ OP_GETDEVICELIST = 48,
+ OP_LAYOUTCOMMIT = 49,
+ OP_LAYOUTGET = 50,
+ OP_LAYOUTRETURN = 51,
+ OP_SECINFO_NO_NAME = 52,
+ OP_SEQUENCE = 53,
+ OP_SET_SSV = 54,
+ OP_TEST_STATEID = 55,
+ OP_WANT_DELEGATION = 56,
+ OP_DESTROY_CLIENTID = 57,
+ OP_RECLAIM_COMPLETE = 58,
+
OP_ILLEGAL = 10044,
};
@@ -230,7 +303,48 @@ enum nfsstat4 {
NFS4ERR_DEADLOCK = 10045,
NFS4ERR_FILE_OPEN = 10046,
NFS4ERR_ADMIN_REVOKED = 10047,
- NFS4ERR_CB_PATH_DOWN = 10048
+ NFS4ERR_CB_PATH_DOWN = 10048,
+
+ /* nfs41 */
+ NFS4ERR_BADIOMODE = 10049,
+ NFS4ERR_BADLAYOUT = 10050,
+ NFS4ERR_BAD_SESSION_DIGEST = 10051,
+ NFS4ERR_BADSESSION = 10052,
+ NFS4ERR_BADSLOT = 10053,
+ NFS4ERR_COMPLETE_ALREADY = 10054,
+ NFS4ERR_CONN_NOT_BOUND_TO_SESSION = 10055,
+ NFS4ERR_DELEG_ALREADY_WANTED = 10056,
+ NFS4ERR_BACK_CHAN_BUSY = 10057, /* backchan reqs outstanding */
+ NFS4ERR_LAYOUTTRYLATER = 10058,
+ NFS4ERR_LAYOUTUNAVAILABLE = 10059,
+ NFS4ERR_NOMATCHING_LAYOUT = 10060,
+ NFS4ERR_RECALLCONFLICT = 10061,
+ NFS4ERR_UNKNOWN_LAYOUTTYPE = 10062,
+ NFS4ERR_SEQ_MISORDERED = 10063, /* unexpected seq.id in req */
+ NFS4ERR_SEQUENCE_POS = 10064, /* [CB_]SEQ. op not 1st op */
+ NFS4ERR_REQ_TOO_BIG = 10065, /* request too big */
+ NFS4ERR_REP_TOO_BIG = 10066, /* reply too big */
+ NFS4ERR_REP_TOO_BIG_TO_CACHE = 10067, /* rep. not all cached */
+ NFS4ERR_RETRY_UNCACHED_REP = 10068, /* retry & rep. uncached */
+ NFS4ERR_UNSAFE_COMPOUND = 10069, /* retry/recovery too hard */
+ NFS4ERR_TOO_MANY_OPS = 10070, /* too many ops in [CB_]COMP */
+ NFS4ERR_OP_NOT_IN_SESSION = 10071, /* op needs [CB_]SEQ. op */
+ NFS4ERR_HASH_ALG_UNSUPP = 10072, /* hash alg. not supp. */
+ /* Error 10073 is unused. */
+ NFS4ERR_CLIENTID_BUSY = 10074, /* clientid has state */
+ NFS4ERR_PNFS_IO_HOLE = 10075, /* IO to _SPARSE file hole */
+ NFS4ERR_SEQ_FALSE_RETRY = 10076, /* retry not origional */
+ NFS4ERR_BAD_HIGH_SLOT = 10077, /* sequence arg bad */
+ NFS4ERR_DEADSESSION = 10078, /* persistent session dead */
+ NFS4ERR_ENCR_ALG_UNSUPP = 10079, /* SSV alg mismatch */
+ NFS4ERR_PNFS_NO_LAYOUT = 10080, /* direct I/O with no layout */
+ NFS4ERR_NOT_ONLY_OP = 10081, /* bad compound */
+ NFS4ERR_WRONG_CRED = 10082, /* permissions:state change */
+ NFS4ERR_WRONG_TYPE = 10083, /* current operation mismatch */
+ NFS4ERR_DIRDELEG_UNAVAIL = 10084, /* no directory delegation */
+ NFS4ERR_REJECT_DELEG = 10085, /* on callback */
+ NFS4ERR_RETURNCONFLICT = 10086, /* outstanding layoutreturn */
+ NFS4ERR_DELEG_REVOKED = 10087, /* deleg./layout revoked */
};
/*
@@ -265,7 +379,13 @@ enum opentype4 {
enum createmode4 {
NFS4_CREATE_UNCHECKED = 0,
NFS4_CREATE_GUARDED = 1,
- NFS4_CREATE_EXCLUSIVE = 2
+ NFS4_CREATE_EXCLUSIVE = 2,
+ /*
+ * New to NFSv4.1. If session is persistent,
+ * GUARDED4 MUST be used. Otherwise, use
+ * EXCLUSIVE4_1 instead of EXCLUSIVE4.
+ */
+ NFS4_CREATE_EXCLUSIVE4_1 = 3
};
enum limit_by4 {
@@ -301,6 +421,8 @@ enum lock_type4 {
#define FATTR4_WORD0_UNIQUE_HANDLES (1UL << 9)
#define FATTR4_WORD0_LEASE_TIME (1UL << 10)
#define FATTR4_WORD0_RDATTR_ERROR (1UL << 11)
+/* Mandatory in NFSv4.1 */
+#define FATTR4_WORD2_SUPPATTR_EXCLCREAT (1UL << 11)
/* Recommended Attributes */
#define FATTR4_WORD0_ACL (1UL << 12)
@@ -351,6 +473,13 @@ enum lock_type4 {
#define NFSPROC4_NULL 0
#define NFSPROC4_COMPOUND 1
#define NFS4_MINOR_VERSION 0
+
+#if defined(CONFIG_NFS_V4_1)
+#define NFS4_MAX_MINOR_VERSION 1
+#else
+#define NFS4_MAX_MINOR_VERSION 0
+#endif /* CONFIG_NFS_V4_1 */
+
#define NFS4_DEBUG 1
/* Index of predefined Linux client operations */
@@ -391,6 +520,29 @@ enum {
NFSPROC4_CLNT_GETACL,
NFSPROC4_CLNT_SETACL,
NFSPROC4_CLNT_FS_LOCATIONS,
+
+ /* nfs41 */
+ NFSPROC4_CLNT_EXCHANGE_ID,
+ NFSPROC4_CLNT_CREATE_SESSION,
+ NFSPROC4_CLNT_DESTROY_SESSION,
+ NFSPROC4_CLNT_SEQUENCE,
+ NFSPROC4_CLNT_GET_LEASE_TIME,
+};
+
+/* nfs41 types */
+struct nfs4_sessionid {
+ unsigned char data[NFS4_MAX_SESSIONID_LEN];
+};
+
+/* Create Session Flags */
+#define SESSION4_PERSIST 0x001
+#define SESSION4_BACK_CHAN 0x002
+#define SESSION4_RDMA 0x004
+
+enum state_protect_how4 {
+ SP4_NONE = 0,
+ SP4_MACH_CRED = 1,
+ SP4_SSV = 2
};
#endif
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index db867b04ac3..fdffb413b19 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -166,8 +166,7 @@ struct nfs_inode {
*/
struct radix_tree_root nfs_page_tree;
- unsigned long ncommit,
- npages;
+ unsigned long npages;
/* Open contexts for shared mmap writes */
struct list_head open_files;
@@ -186,6 +185,9 @@ struct nfs_inode {
fmode_t delegation_state;
struct rw_semaphore rwsem;
#endif /* CONFIG_NFS_V4*/
+#ifdef CONFIG_NFS_FSCACHE
+ struct fscache_cookie *fscache;
+#endif
struct inode vfs_inode;
};
@@ -207,6 +209,9 @@ struct nfs_inode {
#define NFS_INO_STALE (1) /* possible stale inode */
#define NFS_INO_ACL_LRU_SET (2) /* Inode is on the LRU list */
#define NFS_INO_MOUNTPOINT (3) /* inode is remote mountpoint */
+#define NFS_INO_FLUSHING (4) /* inode is flushing out data */
+#define NFS_INO_FSCACHE (5) /* inode can be cached by FS-Cache */
+#define NFS_INO_FSCACHE_LOCK (6) /* FS-Cache cookie management lock */
static inline struct nfs_inode *NFS_I(const struct inode *inode)
{
@@ -260,6 +265,11 @@ static inline int NFS_STALE(const struct inode *inode)
return test_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
}
+static inline int NFS_FSCACHE(const struct inode *inode)
+{
+ return test_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags);
+}
+
static inline __u64 NFS_FILEID(const struct inode *inode)
{
return NFS_I(inode)->fileid;
@@ -415,7 +425,7 @@ extern const struct inode_operations nfs_dir_inode_operations;
extern const struct inode_operations nfs3_dir_inode_operations;
#endif /* CONFIG_NFS_V3 */
extern const struct file_operations nfs_dir_operations;
-extern struct dentry_operations nfs_dentry_operations;
+extern const struct dentry_operations nfs_dentry_operations;
extern void nfs_force_lookup_revalidate(struct inode *dir);
extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh, struct nfs_fattr *fattr);
@@ -506,6 +516,8 @@ extern int nfs_readpages(struct file *, struct address_space *,
struct list_head *, unsigned);
extern int nfs_readpage_result(struct rpc_task *, struct nfs_read_data *);
extern void nfs_readdata_release(void *data);
+extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
+ struct page *);
/*
* Allocate nfs_read_data structures
@@ -583,6 +595,7 @@ extern void * nfs_root_data(void);
#define NFSDBG_CALLBACK 0x0100
#define NFSDBG_CLIENT 0x0200
#define NFSDBG_MOUNT 0x0400
+#define NFSDBG_FSCACHE 0x0800
#define NFSDBG_ALL 0xFFFF
#ifdef __KERNEL__
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 9bb81aec91c..19fe15d1204 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -4,11 +4,17 @@
#include <linux/list.h>
#include <linux/backing-dev.h>
#include <linux/wait.h>
+#include <linux/nfs_xdr.h>
+#include <linux/sunrpc/xprt.h>
#include <asm/atomic.h>
+struct nfs4_session;
struct nfs_iostats;
struct nlm_host;
+struct nfs4_sequence_args;
+struct nfs4_sequence_res;
+struct nfs_server;
/*
* The nfs_client identifies our client state to the server.
@@ -18,6 +24,7 @@ struct nfs_client {
int cl_cons_state; /* current construction state (-ve: init error) */
#define NFS_CS_READY 0 /* ready to be used */
#define NFS_CS_INITING 1 /* busy initialising */
+#define NFS_CS_SESSION_INITING 2 /* busy initialising session */
unsigned long cl_res_state; /* NFS resources state */
#define NFS_CS_CALLBACK 1 /* - callback started */
#define NFS_CS_IDMAP 2 /* - idmap started */
@@ -32,6 +39,7 @@ struct nfs_client {
const struct nfs_rpc_ops *rpc_ops; /* NFS protocol vector */
int cl_proto; /* Network transport protocol */
+ u32 cl_minorversion;/* NFSv4 minorversion */
struct rpc_cred *cl_machine_cred;
#ifdef CONFIG_NFS_V4
@@ -63,6 +71,25 @@ struct nfs_client {
*/
char cl_ipaddr[48];
unsigned char cl_id_uniquifier;
+ int (* cl_call_sync)(struct nfs_server *server,
+ struct rpc_message *msg,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ int cache_reply);
+#endif /* CONFIG_NFS_V4 */
+
+#ifdef CONFIG_NFS_V4_1
+ /* clientid returned from EXCHANGE_ID, used by session operations */
+ u64 cl_ex_clid;
+ /* The sequence id to use for the next CREATE_SESSION */
+ u32 cl_seqid;
+ /* The flags used for obtaining the clientid during EXCHANGE_ID */
+ u32 cl_exchange_flags;
+ struct nfs4_session *cl_session; /* sharred session */
+#endif /* CONFIG_NFS_V4_1 */
+
+#ifdef CONFIG_NFS_FSCACHE
+ struct fscache_cookie *fscache; /* client index cache cookie */
#endif
};
@@ -96,16 +123,28 @@ struct nfs_server {
unsigned int acdirmin;
unsigned int acdirmax;
unsigned int namelen;
+ unsigned int options; /* extra options enabled by mount */
+#define NFS_OPTION_FSCACHE 0x00000001 /* - local caching enabled */
struct nfs_fsid fsid;
__u64 maxfilesize; /* maximum file size */
unsigned long mount_time; /* when this fs was mounted */
dev_t s_dev; /* superblock dev numbers */
+#ifdef CONFIG_NFS_FSCACHE
+ struct nfs_fscache_key *fscache_key; /* unique key for superblock */
+ struct fscache_cookie *fscache; /* superblock cookie */
+#endif
+
#ifdef CONFIG_NFS_V4
u32 attr_bitmask[2];/* V4 bitmask representing the set
of attributes supported on this
filesystem */
+ u32 cache_consistency_bitmask[2];
+ /* V4 bitmask representing the subset
+ of change attribute, size, ctime
+ and mtime attributes supported by
+ the server */
u32 acl_bitmask; /* V4 bitmask representing the ACEs
that are supported on this
filesystem */
@@ -129,4 +168,46 @@ struct nfs_server {
#define NFS_CAP_ACLS (1U << 3)
#define NFS_CAP_ATOMIC_OPEN (1U << 4)
+
+/* maximum number of slots to use */
+#define NFS4_MAX_SLOT_TABLE RPC_MAX_SLOT_TABLE
+
+#if defined(CONFIG_NFS_V4_1)
+
+/* Sessions */
+#define SLOT_TABLE_SZ (NFS4_MAX_SLOT_TABLE/(8*sizeof(long)))
+struct nfs4_slot_table {
+ struct nfs4_slot *slots; /* seqid per slot */
+ unsigned long used_slots[SLOT_TABLE_SZ]; /* used/unused bitmap */
+ spinlock_t slot_tbl_lock;
+ struct rpc_wait_queue slot_tbl_waitq; /* allocators may wait here */
+ int max_slots; /* # slots in table */
+ int highest_used_slotid; /* sent to server on each SEQ.
+ * op for dynamic resizing */
+};
+
+static inline int slot_idx(struct nfs4_slot_table *tbl, struct nfs4_slot *sp)
+{
+ return sp - tbl->slots;
+}
+
+/*
+ * Session related parameters
+ */
+struct nfs4_session {
+ struct nfs4_sessionid sess_id;
+ u32 flags;
+ unsigned long session_state;
+ u32 hash_alg;
+ u32 ssv_len;
+
+ /* The fore and back channel */
+ struct nfs4_channel_attrs fc_attrs;
+ struct nfs4_slot_table fc_slot_table;
+ struct nfs4_channel_attrs bc_attrs;
+ struct nfs4_slot_table bc_slot_table;
+ struct nfs_client *clp;
+};
+
+#endif /* CONFIG_NFS_V4_1 */
#endif
diff --git a/include/linux/nfs_iostat.h b/include/linux/nfs_iostat.h
index 1cb9a3fed2b..68b10f5f890 100644
--- a/include/linux/nfs_iostat.h
+++ b/include/linux/nfs_iostat.h
@@ -116,4 +116,16 @@ enum nfs_stat_eventcounters {
__NFSIOS_COUNTSMAX,
};
+/*
+ * NFS local caching servicing counters
+ */
+enum nfs_stat_fscachecounters {
+ NFSIOS_FSCACHE_PAGES_READ_OK,
+ NFSIOS_FSCACHE_PAGES_READ_FAIL,
+ NFSIOS_FSCACHE_PAGES_WRITTEN_OK,
+ NFSIOS_FSCACHE_PAGES_WRITTEN_FAIL,
+ NFSIOS_FSCACHE_PAGES_UNCACHED,
+ __NFSIOS_FSCACHEMAX,
+};
+
#endif /* _LINUX_NFS_IOSTAT */
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 2e5f00066af..62f63fb0c4c 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -27,12 +27,8 @@ static inline int nfs_fsid_equal(const struct nfs_fsid *a, const struct nfs_fsid
}
struct nfs_fattr {
- unsigned short valid; /* which fields are valid */
- __u64 pre_size; /* pre_op_attr.size */
- struct timespec pre_mtime; /* pre_op_attr.mtime */
- struct timespec pre_ctime; /* pre_op_attr.ctime */
- enum nfs_ftype type; /* always use NFSv2 types */
- __u32 mode;
+ unsigned int valid; /* which fields are valid */
+ umode_t mode;
__u32 nlink;
__u32 uid;
__u32 gid;
@@ -52,19 +48,55 @@ struct nfs_fattr {
struct timespec atime;
struct timespec mtime;
struct timespec ctime;
- __u32 bitmap[2]; /* NFSv4 returned attribute bitmap */
__u64 change_attr; /* NFSv4 change attribute */
__u64 pre_change_attr;/* pre-op NFSv4 change attribute */
+ __u64 pre_size; /* pre_op_attr.size */
+ struct timespec pre_mtime; /* pre_op_attr.mtime */
+ struct timespec pre_ctime; /* pre_op_attr.ctime */
unsigned long time_start;
unsigned long gencount;
};
-#define NFS_ATTR_WCC 0x0001 /* pre-op WCC data */
-#define NFS_ATTR_FATTR 0x0002 /* post-op attributes */
-#define NFS_ATTR_FATTR_V3 0x0004 /* NFSv3 attributes */
-#define NFS_ATTR_FATTR_V4 0x0008 /* NFSv4 change attribute */
-#define NFS_ATTR_WCC_V4 0x0010 /* pre-op change attribute */
-#define NFS_ATTR_FATTR_V4_REFERRAL 0x0020 /* NFSv4 referral */
+#define NFS_ATTR_FATTR_TYPE (1U << 0)
+#define NFS_ATTR_FATTR_MODE (1U << 1)
+#define NFS_ATTR_FATTR_NLINK (1U << 2)
+#define NFS_ATTR_FATTR_OWNER (1U << 3)
+#define NFS_ATTR_FATTR_GROUP (1U << 4)
+#define NFS_ATTR_FATTR_RDEV (1U << 5)
+#define NFS_ATTR_FATTR_SIZE (1U << 6)
+#define NFS_ATTR_FATTR_PRESIZE (1U << 7)
+#define NFS_ATTR_FATTR_BLOCKS_USED (1U << 8)
+#define NFS_ATTR_FATTR_SPACE_USED (1U << 9)
+#define NFS_ATTR_FATTR_FSID (1U << 10)
+#define NFS_ATTR_FATTR_FILEID (1U << 11)
+#define NFS_ATTR_FATTR_ATIME (1U << 12)
+#define NFS_ATTR_FATTR_MTIME (1U << 13)
+#define NFS_ATTR_FATTR_CTIME (1U << 14)
+#define NFS_ATTR_FATTR_PREMTIME (1U << 15)
+#define NFS_ATTR_FATTR_PRECTIME (1U << 16)
+#define NFS_ATTR_FATTR_CHANGE (1U << 17)
+#define NFS_ATTR_FATTR_PRECHANGE (1U << 18)
+#define NFS_ATTR_FATTR_V4_REFERRAL (1U << 19) /* NFSv4 referral */
+
+#define NFS_ATTR_FATTR (NFS_ATTR_FATTR_TYPE \
+ | NFS_ATTR_FATTR_MODE \
+ | NFS_ATTR_FATTR_NLINK \
+ | NFS_ATTR_FATTR_OWNER \
+ | NFS_ATTR_FATTR_GROUP \
+ | NFS_ATTR_FATTR_RDEV \
+ | NFS_ATTR_FATTR_SIZE \
+ | NFS_ATTR_FATTR_FSID \
+ | NFS_ATTR_FATTR_FILEID \
+ | NFS_ATTR_FATTR_ATIME \
+ | NFS_ATTR_FATTR_MTIME \
+ | NFS_ATTR_FATTR_CTIME)
+#define NFS_ATTR_FATTR_V2 (NFS_ATTR_FATTR \
+ | NFS_ATTR_FATTR_BLOCKS_USED)
+#define NFS_ATTR_FATTR_V3 (NFS_ATTR_FATTR \
+ | NFS_ATTR_FATTR_SPACE_USED)
+#define NFS_ATTR_FATTR_V4 (NFS_ATTR_FATTR \
+ | NFS_ATTR_FATTR_SPACE_USED \
+ | NFS_ATTR_FATTR_CHANGE)
/*
* Info on the file system
@@ -113,6 +145,44 @@ struct nfs4_change_info {
};
struct nfs_seqid;
+
+/* nfs41 sessions channel attributes */
+struct nfs4_channel_attrs {
+ u32 headerpadsz;
+ u32 max_rqst_sz;
+ u32 max_resp_sz;
+ u32 max_resp_sz_cached;
+ u32 max_ops;
+ u32 max_reqs;
+};
+
+/* nfs41 sessions slot seqid */
+struct nfs4_slot {
+ u32 seq_nr;
+};
+
+struct nfs4_sequence_args {
+ struct nfs4_session *sa_session;
+ u8 sa_slotid;
+ u8 sa_cache_this;
+};
+
+struct nfs4_sequence_res {
+ struct nfs4_session *sr_session;
+ u8 sr_slotid; /* slot used to send request */
+ unsigned long sr_renewal_time;
+ int sr_status; /* sequence operation status */
+};
+
+struct nfs4_get_lease_time_args {
+ struct nfs4_sequence_args la_seq_args;
+};
+
+struct nfs4_get_lease_time_res {
+ struct nfs_fsinfo *lr_fsinfo;
+ struct nfs4_sequence_res lr_seq_res;
+};
+
/*
* Arguments to the open call.
*/
@@ -133,6 +203,7 @@ struct nfs_openargs {
const struct nfs_server *server; /* Needed for ID mapping */
const u32 * bitmask;
__u32 claim;
+ struct nfs4_sequence_args seq_args;
};
struct nfs_openres {
@@ -149,6 +220,7 @@ struct nfs_openres {
__u32 do_recall;
__u64 maxsize;
__u32 attrset[NFS4_BITMAP_SIZE];
+ struct nfs4_sequence_res seq_res;
};
/*
@@ -174,6 +246,7 @@ struct nfs_closeargs {
struct nfs_seqid * seqid;
fmode_t fmode;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs_closeres {
@@ -181,6 +254,7 @@ struct nfs_closeres {
struct nfs_fattr * fattr;
struct nfs_seqid * seqid;
const struct nfs_server *server;
+ struct nfs4_sequence_res seq_res;
};
/*
* * Arguments to the lock,lockt, and locku call.
@@ -201,12 +275,14 @@ struct nfs_lock_args {
unsigned char block : 1;
unsigned char reclaim : 1;
unsigned char new_lock_owner : 1;
+ struct nfs4_sequence_args seq_args;
};
struct nfs_lock_res {
nfs4_stateid stateid;
struct nfs_seqid * lock_seqid;
struct nfs_seqid * open_seqid;
+ struct nfs4_sequence_res seq_res;
};
struct nfs_locku_args {
@@ -214,32 +290,38 @@ struct nfs_locku_args {
struct file_lock * fl;
struct nfs_seqid * seqid;
nfs4_stateid * stateid;
+ struct nfs4_sequence_args seq_args;
};
struct nfs_locku_res {
nfs4_stateid stateid;
struct nfs_seqid * seqid;
+ struct nfs4_sequence_res seq_res;
};
struct nfs_lockt_args {
struct nfs_fh * fh;
struct file_lock * fl;
struct nfs_lowner lock_owner;
+ struct nfs4_sequence_args seq_args;
};
struct nfs_lockt_res {
struct file_lock * denied; /* LOCK, LOCKT failed */
+ struct nfs4_sequence_res seq_res;
};
struct nfs4_delegreturnargs {
const struct nfs_fh *fhandle;
const nfs4_stateid *stateid;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs4_delegreturnres {
struct nfs_fattr * fattr;
const struct nfs_server *server;
+ struct nfs4_sequence_res seq_res;
};
/*
@@ -252,12 +334,14 @@ struct nfs_readargs {
__u32 count;
unsigned int pgbase;
struct page ** pages;
+ struct nfs4_sequence_args seq_args;
};
struct nfs_readres {
struct nfs_fattr * fattr;
__u32 count;
int eof;
+ struct nfs4_sequence_res seq_res;
};
/*
@@ -272,6 +356,7 @@ struct nfs_writeargs {
unsigned int pgbase;
struct page ** pages;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs_writeverf {
@@ -284,6 +369,7 @@ struct nfs_writeres {
struct nfs_writeverf * verf;
__u32 count;
const struct nfs_server *server;
+ struct nfs4_sequence_res seq_res;
};
/*
@@ -293,12 +379,14 @@ struct nfs_removeargs {
const struct nfs_fh *fh;
struct qstr name;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs_removeres {
const struct nfs_server *server;
struct nfs4_change_info cinfo;
struct nfs_fattr dir_attr;
+ struct nfs4_sequence_res seq_res;
};
/*
@@ -351,6 +439,7 @@ struct nfs_setattrargs {
struct iattr * iap;
const struct nfs_server * server; /* Needed for name mapping */
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs_setaclargs {
@@ -358,6 +447,11 @@ struct nfs_setaclargs {
size_t acl_len;
unsigned int acl_pgbase;
struct page ** acl_pages;
+ struct nfs4_sequence_args seq_args;
+};
+
+struct nfs_setaclres {
+ struct nfs4_sequence_res seq_res;
};
struct nfs_getaclargs {
@@ -365,11 +459,18 @@ struct nfs_getaclargs {
size_t acl_len;
unsigned int acl_pgbase;
struct page ** acl_pages;
+ struct nfs4_sequence_args seq_args;
+};
+
+struct nfs_getaclres {
+ size_t acl_len;
+ struct nfs4_sequence_res seq_res;
};
struct nfs_setattrres {
struct nfs_fattr * fattr;
const struct nfs_server * server;
+ struct nfs4_sequence_res seq_res;
};
struct nfs_linkargs {
@@ -551,6 +652,7 @@ struct nfs4_accessargs {
const struct nfs_fh * fh;
const u32 * bitmask;
u32 access;
+ struct nfs4_sequence_args seq_args;
};
struct nfs4_accessres {
@@ -558,6 +660,7 @@ struct nfs4_accessres {
struct nfs_fattr * fattr;
u32 supported;
u32 access;
+ struct nfs4_sequence_res seq_res;
};
struct nfs4_create_arg {
@@ -577,6 +680,7 @@ struct nfs4_create_arg {
const struct iattr * attrs;
const struct nfs_fh * dir_fh;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs4_create_res {
@@ -585,21 +689,30 @@ struct nfs4_create_res {
struct nfs_fattr * fattr;
struct nfs4_change_info dir_cinfo;
struct nfs_fattr * dir_fattr;
+ struct nfs4_sequence_res seq_res;
};
struct nfs4_fsinfo_arg {
const struct nfs_fh * fh;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
+};
+
+struct nfs4_fsinfo_res {
+ struct nfs_fsinfo *fsinfo;
+ struct nfs4_sequence_res seq_res;
};
struct nfs4_getattr_arg {
const struct nfs_fh * fh;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs4_getattr_res {
const struct nfs_server * server;
struct nfs_fattr * fattr;
+ struct nfs4_sequence_res seq_res;
};
struct nfs4_link_arg {
@@ -607,6 +720,7 @@ struct nfs4_link_arg {
const struct nfs_fh * dir_fh;
const struct qstr * name;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs4_link_res {
@@ -614,6 +728,7 @@ struct nfs4_link_res {
struct nfs_fattr * fattr;
struct nfs4_change_info cinfo;
struct nfs_fattr * dir_attr;
+ struct nfs4_sequence_res seq_res;
};
@@ -621,21 +736,30 @@ struct nfs4_lookup_arg {
const struct nfs_fh * dir_fh;
const struct qstr * name;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs4_lookup_res {
const struct nfs_server * server;
struct nfs_fattr * fattr;
struct nfs_fh * fh;
+ struct nfs4_sequence_res seq_res;
};
struct nfs4_lookup_root_arg {
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs4_pathconf_arg {
const struct nfs_fh * fh;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
+};
+
+struct nfs4_pathconf_res {
+ struct nfs_pathconf *pathconf;
+ struct nfs4_sequence_res seq_res;
};
struct nfs4_readdir_arg {
@@ -646,11 +770,13 @@ struct nfs4_readdir_arg {
struct page ** pages; /* zero-copy data */
unsigned int pgbase; /* zero-copy data */
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs4_readdir_res {
nfs4_verifier verifier;
unsigned int pgbase;
+ struct nfs4_sequence_res seq_res;
};
struct nfs4_readlink {
@@ -658,6 +784,11 @@ struct nfs4_readlink {
unsigned int pgbase;
unsigned int pglen; /* zero-copy data */
struct page ** pages; /* zero-copy data */
+ struct nfs4_sequence_args seq_args;
+};
+
+struct nfs4_readlink_res {
+ struct nfs4_sequence_res seq_res;
};
struct nfs4_rename_arg {
@@ -666,6 +797,7 @@ struct nfs4_rename_arg {
const struct qstr * old_name;
const struct qstr * new_name;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs4_rename_res {
@@ -674,6 +806,7 @@ struct nfs4_rename_res {
struct nfs_fattr * old_fattr;
struct nfs4_change_info new_cinfo;
struct nfs_fattr * new_fattr;
+ struct nfs4_sequence_res seq_res;
};
#define NFS4_SETCLIENTID_NAMELEN (127)
@@ -692,6 +825,17 @@ struct nfs4_setclientid {
struct nfs4_statfs_arg {
const struct nfs_fh * fh;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
+};
+
+struct nfs4_statfs_res {
+ struct nfs_fsstat *fsstat;
+ struct nfs4_sequence_res seq_res;
+};
+
+struct nfs4_server_caps_arg {
+ struct nfs_fh *fhandle;
+ struct nfs4_sequence_args seq_args;
};
struct nfs4_server_caps_res {
@@ -699,6 +843,7 @@ struct nfs4_server_caps_res {
u32 acl_bitmask;
u32 has_links;
u32 has_symlinks;
+ struct nfs4_sequence_res seq_res;
};
struct nfs4_string {
@@ -733,10 +878,68 @@ struct nfs4_fs_locations_arg {
const struct qstr *name;
struct page *page;
const u32 *bitmask;
+ struct nfs4_sequence_args seq_args;
+};
+
+struct nfs4_fs_locations_res {
+ struct nfs4_fs_locations *fs_locations;
+ struct nfs4_sequence_res seq_res;
};
#endif /* CONFIG_NFS_V4 */
+struct nfstime4 {
+ u64 seconds;
+ u32 nseconds;
+};
+
+#ifdef CONFIG_NFS_V4_1
+struct nfs_impl_id4 {
+ u32 domain_len;
+ char *domain;
+ u32 name_len;
+ char *name;
+ struct nfstime4 date;
+};
+
+#define NFS4_EXCHANGE_ID_LEN (48)
+struct nfs41_exchange_id_args {
+ struct nfs_client *client;
+ nfs4_verifier *verifier;
+ unsigned int id_len;
+ char id[NFS4_EXCHANGE_ID_LEN];
+ u32 flags;
+};
+
+struct server_owner {
+ uint64_t minor_id;
+ uint32_t major_id_sz;
+ char major_id[NFS4_OPAQUE_LIMIT];
+};
+
+struct server_scope {
+ uint32_t server_scope_sz;
+ char server_scope[NFS4_OPAQUE_LIMIT];
+};
+
+struct nfs41_exchange_id_res {
+ struct nfs_client *client;
+ u32 flags;
+};
+
+struct nfs41_create_session_args {
+ struct nfs_client *client;
+ uint32_t flags;
+ uint32_t cb_program;
+ struct nfs4_channel_attrs fc_attrs; /* Fore Channel */
+ struct nfs4_channel_attrs bc_attrs; /* Back Channel */
+};
+
+struct nfs41_create_session_res {
+ struct nfs_client *client;
+};
+#endif /* CONFIG_NFS_V4_1 */
+
struct nfs_page;
#define NFS_PAGEVEC_SIZE (8U)
@@ -785,7 +988,7 @@ struct nfs_access_entry;
*/
struct nfs_rpc_ops {
u32 version; /* Protocol version */
- struct dentry_operations *dentry_ops;
+ const struct dentry_operations *dentry_ops;
const struct inode_operations *dir_inode_ops;
const struct inode_operations *file_inode_ops;
@@ -836,6 +1039,7 @@ struct nfs_rpc_ops {
int (*lock)(struct file *, int, struct file_lock *);
int (*lock_check_bounds)(const struct file_lock *);
void (*clear_acl_cache)(struct inode *);
+ void (*close_context)(struct nfs_open_context *ctx, int);
};
/*
diff --git a/include/linux/nfsd/cache.h b/include/linux/nfsd/cache.h
index 04b355c801d..3a3f58934f5 100644
--- a/include/linux/nfsd/cache.h
+++ b/include/linux/nfsd/cache.h
@@ -14,8 +14,7 @@
#include <linux/uio.h>
/*
- * Representation of a reply cache entry. The first two members *must*
- * be hash_next and hash_prev.
+ * Representation of a reply cache entry.
*/
struct svc_cacherep {
struct hlist_node c_hash;
@@ -76,4 +75,12 @@ void nfsd_reply_cache_shutdown(void);
int nfsd_cache_lookup(struct svc_rqst *, int);
void nfsd_cache_update(struct svc_rqst *, int, __be32 *);
+#ifdef CONFIG_NFSD_V4
+void nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp);
+#else /* CONFIG_NFSD_V4 */
+static inline void nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp)
+{
+}
+#endif /* CONFIG_NFSD_V4 */
+
#endif /* NFSCACHE_H */
diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h
index bcd0201589f..a6d9ef2bb34 100644
--- a/include/linux/nfsd/export.h
+++ b/include/linux/nfsd/export.h
@@ -125,11 +125,9 @@ void nfsd_export_flush(void);
void exp_readlock(void);
void exp_readunlock(void);
struct svc_export * rqst_exp_get_by_name(struct svc_rqst *,
- struct vfsmount *,
- struct dentry *);
+ struct path *);
struct svc_export * rqst_exp_parent(struct svc_rqst *,
- struct vfsmount *mnt,
- struct dentry *dentry);
+ struct path *);
int exp_rootfh(struct auth_domain *,
char *path, struct knfsd_fh *, int maxsize);
__be32 exp_pseudoroot(struct svc_rqst *, struct svc_fh *);
diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h
index e19f45991b2..2b49d676d0c 100644
--- a/include/linux/nfsd/nfsd.h
+++ b/include/linux/nfsd/nfsd.h
@@ -23,7 +23,7 @@
/*
* nfsd version
*/
-#define NFSD_SUPPORTED_MINOR_VERSION 0
+#define NFSD_SUPPORTED_MINOR_VERSION 1
/*
* Flags for nfsd_permission
@@ -53,6 +53,7 @@ typedef int (*nfsd_dirop_t)(struct inode *, struct dentry *, int, int);
extern struct svc_program nfsd_program;
extern struct svc_version nfsd_version2, nfsd_version3,
nfsd_version4;
+extern u32 nfsd_supported_minorversion;
extern struct mutex nfsd_mutex;
extern struct svc_serv *nfsd_serv;
@@ -105,7 +106,7 @@ void nfsd_close(struct file *);
__be32 nfsd_read(struct svc_rqst *, struct svc_fh *, struct file *,
loff_t, struct kvec *, int, unsigned long *);
__be32 nfsd_write(struct svc_rqst *, struct svc_fh *,struct file *,
- loff_t, struct kvec *,int, unsigned long, int *);
+ loff_t, struct kvec *,int, unsigned long *, int *);
__be32 nfsd_readlink(struct svc_rqst *, struct svc_fh *,
char *, int *);
__be32 nfsd_symlink(struct svc_rqst *, struct svc_fh *,
@@ -149,6 +150,7 @@ int nfsd_set_posix_acl(struct svc_fh *, int, struct posix_acl *);
enum vers_op {NFSD_SET, NFSD_CLEAR, NFSD_TEST, NFSD_AVAIL };
int nfsd_vers(int vers, enum vers_op change);
+int nfsd_minorversion(u32 minorversion, enum vers_op change);
void nfsd_reset_versions(void);
int nfsd_create_serv(void);
@@ -186,78 +188,119 @@ void nfsd_lockd_shutdown(void);
/*
* These macros provide pre-xdr'ed values for faster operation.
*/
-#define nfs_ok __constant_htonl(NFS_OK)
-#define nfserr_perm __constant_htonl(NFSERR_PERM)
-#define nfserr_noent __constant_htonl(NFSERR_NOENT)
-#define nfserr_io __constant_htonl(NFSERR_IO)
-#define nfserr_nxio __constant_htonl(NFSERR_NXIO)
-#define nfserr_eagain __constant_htonl(NFSERR_EAGAIN)
-#define nfserr_acces __constant_htonl(NFSERR_ACCES)
-#define nfserr_exist __constant_htonl(NFSERR_EXIST)
-#define nfserr_xdev __constant_htonl(NFSERR_XDEV)
-#define nfserr_nodev __constant_htonl(NFSERR_NODEV)
-#define nfserr_notdir __constant_htonl(NFSERR_NOTDIR)
-#define nfserr_isdir __constant_htonl(NFSERR_ISDIR)
-#define nfserr_inval __constant_htonl(NFSERR_INVAL)
-#define nfserr_fbig __constant_htonl(NFSERR_FBIG)
-#define nfserr_nospc __constant_htonl(NFSERR_NOSPC)
-#define nfserr_rofs __constant_htonl(NFSERR_ROFS)
-#define nfserr_mlink __constant_htonl(NFSERR_MLINK)
-#define nfserr_opnotsupp __constant_htonl(NFSERR_OPNOTSUPP)
-#define nfserr_nametoolong __constant_htonl(NFSERR_NAMETOOLONG)
-#define nfserr_notempty __constant_htonl(NFSERR_NOTEMPTY)
-#define nfserr_dquot __constant_htonl(NFSERR_DQUOT)
-#define nfserr_stale __constant_htonl(NFSERR_STALE)
-#define nfserr_remote __constant_htonl(NFSERR_REMOTE)
-#define nfserr_wflush __constant_htonl(NFSERR_WFLUSH)
-#define nfserr_badhandle __constant_htonl(NFSERR_BADHANDLE)
-#define nfserr_notsync __constant_htonl(NFSERR_NOT_SYNC)
-#define nfserr_badcookie __constant_htonl(NFSERR_BAD_COOKIE)
-#define nfserr_notsupp __constant_htonl(NFSERR_NOTSUPP)
-#define nfserr_toosmall __constant_htonl(NFSERR_TOOSMALL)
-#define nfserr_serverfault __constant_htonl(NFSERR_SERVERFAULT)
-#define nfserr_badtype __constant_htonl(NFSERR_BADTYPE)
-#define nfserr_jukebox __constant_htonl(NFSERR_JUKEBOX)
-#define nfserr_denied __constant_htonl(NFSERR_DENIED)
-#define nfserr_deadlock __constant_htonl(NFSERR_DEADLOCK)
-#define nfserr_expired __constant_htonl(NFSERR_EXPIRED)
-#define nfserr_bad_cookie __constant_htonl(NFSERR_BAD_COOKIE)
-#define nfserr_same __constant_htonl(NFSERR_SAME)
-#define nfserr_clid_inuse __constant_htonl(NFSERR_CLID_INUSE)
-#define nfserr_stale_clientid __constant_htonl(NFSERR_STALE_CLIENTID)
-#define nfserr_resource __constant_htonl(NFSERR_RESOURCE)
-#define nfserr_moved __constant_htonl(NFSERR_MOVED)
-#define nfserr_nofilehandle __constant_htonl(NFSERR_NOFILEHANDLE)
-#define nfserr_minor_vers_mismatch __constant_htonl(NFSERR_MINOR_VERS_MISMATCH)
-#define nfserr_share_denied __constant_htonl(NFSERR_SHARE_DENIED)
-#define nfserr_stale_stateid __constant_htonl(NFSERR_STALE_STATEID)
-#define nfserr_old_stateid __constant_htonl(NFSERR_OLD_STATEID)
-#define nfserr_bad_stateid __constant_htonl(NFSERR_BAD_STATEID)
-#define nfserr_bad_seqid __constant_htonl(NFSERR_BAD_SEQID)
-#define nfserr_symlink __constant_htonl(NFSERR_SYMLINK)
-#define nfserr_not_same __constant_htonl(NFSERR_NOT_SAME)
-#define nfserr_restorefh __constant_htonl(NFSERR_RESTOREFH)
-#define nfserr_attrnotsupp __constant_htonl(NFSERR_ATTRNOTSUPP)
-#define nfserr_bad_xdr __constant_htonl(NFSERR_BAD_XDR)
-#define nfserr_openmode __constant_htonl(NFSERR_OPENMODE)
-#define nfserr_locks_held __constant_htonl(NFSERR_LOCKS_HELD)
-#define nfserr_op_illegal __constant_htonl(NFSERR_OP_ILLEGAL)
-#define nfserr_grace __constant_htonl(NFSERR_GRACE)
-#define nfserr_no_grace __constant_htonl(NFSERR_NO_GRACE)
-#define nfserr_reclaim_bad __constant_htonl(NFSERR_RECLAIM_BAD)
-#define nfserr_badname __constant_htonl(NFSERR_BADNAME)
-#define nfserr_cb_path_down __constant_htonl(NFSERR_CB_PATH_DOWN)
-#define nfserr_locked __constant_htonl(NFSERR_LOCKED)
-#define nfserr_wrongsec __constant_htonl(NFSERR_WRONGSEC)
-#define nfserr_replay_me __constant_htonl(NFSERR_REPLAY_ME)
+#define nfs_ok cpu_to_be32(NFS_OK)
+#define nfserr_perm cpu_to_be32(NFSERR_PERM)
+#define nfserr_noent cpu_to_be32(NFSERR_NOENT)
+#define nfserr_io cpu_to_be32(NFSERR_IO)
+#define nfserr_nxio cpu_to_be32(NFSERR_NXIO)
+#define nfserr_eagain cpu_to_be32(NFSERR_EAGAIN)
+#define nfserr_acces cpu_to_be32(NFSERR_ACCES)
+#define nfserr_exist cpu_to_be32(NFSERR_EXIST)
+#define nfserr_xdev cpu_to_be32(NFSERR_XDEV)
+#define nfserr_nodev cpu_to_be32(NFSERR_NODEV)
+#define nfserr_notdir cpu_to_be32(NFSERR_NOTDIR)
+#define nfserr_isdir cpu_to_be32(NFSERR_ISDIR)
+#define nfserr_inval cpu_to_be32(NFSERR_INVAL)
+#define nfserr_fbig cpu_to_be32(NFSERR_FBIG)
+#define nfserr_nospc cpu_to_be32(NFSERR_NOSPC)
+#define nfserr_rofs cpu_to_be32(NFSERR_ROFS)
+#define nfserr_mlink cpu_to_be32(NFSERR_MLINK)
+#define nfserr_opnotsupp cpu_to_be32(NFSERR_OPNOTSUPP)
+#define nfserr_nametoolong cpu_to_be32(NFSERR_NAMETOOLONG)
+#define nfserr_notempty cpu_to_be32(NFSERR_NOTEMPTY)
+#define nfserr_dquot cpu_to_be32(NFSERR_DQUOT)
+#define nfserr_stale cpu_to_be32(NFSERR_STALE)
+#define nfserr_remote cpu_to_be32(NFSERR_REMOTE)
+#define nfserr_wflush cpu_to_be32(NFSERR_WFLUSH)
+#define nfserr_badhandle cpu_to_be32(NFSERR_BADHANDLE)
+#define nfserr_notsync cpu_to_be32(NFSERR_NOT_SYNC)
+#define nfserr_badcookie cpu_to_be32(NFSERR_BAD_COOKIE)
+#define nfserr_notsupp cpu_to_be32(NFSERR_NOTSUPP)
+#define nfserr_toosmall cpu_to_be32(NFSERR_TOOSMALL)
+#define nfserr_serverfault cpu_to_be32(NFSERR_SERVERFAULT)
+#define nfserr_badtype cpu_to_be32(NFSERR_BADTYPE)
+#define nfserr_jukebox cpu_to_be32(NFSERR_JUKEBOX)
+#define nfserr_denied cpu_to_be32(NFSERR_DENIED)
+#define nfserr_deadlock cpu_to_be32(NFSERR_DEADLOCK)
+#define nfserr_expired cpu_to_be32(NFSERR_EXPIRED)
+#define nfserr_bad_cookie cpu_to_be32(NFSERR_BAD_COOKIE)
+#define nfserr_same cpu_to_be32(NFSERR_SAME)
+#define nfserr_clid_inuse cpu_to_be32(NFSERR_CLID_INUSE)
+#define nfserr_stale_clientid cpu_to_be32(NFSERR_STALE_CLIENTID)
+#define nfserr_resource cpu_to_be32(NFSERR_RESOURCE)
+#define nfserr_moved cpu_to_be32(NFSERR_MOVED)
+#define nfserr_nofilehandle cpu_to_be32(NFSERR_NOFILEHANDLE)
+#define nfserr_minor_vers_mismatch cpu_to_be32(NFSERR_MINOR_VERS_MISMATCH)
+#define nfserr_share_denied cpu_to_be32(NFSERR_SHARE_DENIED)
+#define nfserr_stale_stateid cpu_to_be32(NFSERR_STALE_STATEID)
+#define nfserr_old_stateid cpu_to_be32(NFSERR_OLD_STATEID)
+#define nfserr_bad_stateid cpu_to_be32(NFSERR_BAD_STATEID)
+#define nfserr_bad_seqid cpu_to_be32(NFSERR_BAD_SEQID)
+#define nfserr_symlink cpu_to_be32(NFSERR_SYMLINK)
+#define nfserr_not_same cpu_to_be32(NFSERR_NOT_SAME)
+#define nfserr_restorefh cpu_to_be32(NFSERR_RESTOREFH)
+#define nfserr_attrnotsupp cpu_to_be32(NFSERR_ATTRNOTSUPP)
+#define nfserr_bad_xdr cpu_to_be32(NFSERR_BAD_XDR)
+#define nfserr_openmode cpu_to_be32(NFSERR_OPENMODE)
+#define nfserr_locks_held cpu_to_be32(NFSERR_LOCKS_HELD)
+#define nfserr_op_illegal cpu_to_be32(NFSERR_OP_ILLEGAL)
+#define nfserr_grace cpu_to_be32(NFSERR_GRACE)
+#define nfserr_no_grace cpu_to_be32(NFSERR_NO_GRACE)
+#define nfserr_reclaim_bad cpu_to_be32(NFSERR_RECLAIM_BAD)
+#define nfserr_badname cpu_to_be32(NFSERR_BADNAME)
+#define nfserr_cb_path_down cpu_to_be32(NFSERR_CB_PATH_DOWN)
+#define nfserr_locked cpu_to_be32(NFSERR_LOCKED)
+#define nfserr_wrongsec cpu_to_be32(NFSERR_WRONGSEC)
+#define nfserr_badiomode cpu_to_be32(NFS4ERR_BADIOMODE)
+#define nfserr_badlayout cpu_to_be32(NFS4ERR_BADLAYOUT)
+#define nfserr_bad_session_digest cpu_to_be32(NFS4ERR_BAD_SESSION_DIGEST)
+#define nfserr_badsession cpu_to_be32(NFS4ERR_BADSESSION)
+#define nfserr_badslot cpu_to_be32(NFS4ERR_BADSLOT)
+#define nfserr_complete_already cpu_to_be32(NFS4ERR_COMPLETE_ALREADY)
+#define nfserr_conn_not_bound_to_session cpu_to_be32(NFS4ERR_CONN_NOT_BOUND_TO_SESSION)
+#define nfserr_deleg_already_wanted cpu_to_be32(NFS4ERR_DELEG_ALREADY_WANTED)
+#define nfserr_back_chan_busy cpu_to_be32(NFS4ERR_BACK_CHAN_BUSY)
+#define nfserr_layouttrylater cpu_to_be32(NFS4ERR_LAYOUTTRYLATER)
+#define nfserr_layoutunavailable cpu_to_be32(NFS4ERR_LAYOUTUNAVAILABLE)
+#define nfserr_nomatching_layout cpu_to_be32(NFS4ERR_NOMATCHING_LAYOUT)
+#define nfserr_recallconflict cpu_to_be32(NFS4ERR_RECALLCONFLICT)
+#define nfserr_unknown_layouttype cpu_to_be32(NFS4ERR_UNKNOWN_LAYOUTTYPE)
+#define nfserr_seq_misordered cpu_to_be32(NFS4ERR_SEQ_MISORDERED)
+#define nfserr_sequence_pos cpu_to_be32(NFS4ERR_SEQUENCE_POS)
+#define nfserr_req_too_big cpu_to_be32(NFS4ERR_REQ_TOO_BIG)
+#define nfserr_rep_too_big cpu_to_be32(NFS4ERR_REP_TOO_BIG)
+#define nfserr_rep_too_big_to_cache cpu_to_be32(NFS4ERR_REP_TOO_BIG_TO_CACHE)
+#define nfserr_retry_uncached_rep cpu_to_be32(NFS4ERR_RETRY_UNCACHED_REP)
+#define nfserr_unsafe_compound cpu_to_be32(NFS4ERR_UNSAFE_COMPOUND)
+#define nfserr_too_many_ops cpu_to_be32(NFS4ERR_TOO_MANY_OPS)
+#define nfserr_op_not_in_session cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION)
+#define nfserr_hash_alg_unsupp cpu_to_be32(NFS4ERR_HASH_ALG_UNSUPP)
+#define nfserr_clientid_busy cpu_to_be32(NFS4ERR_CLIENTID_BUSY)
+#define nfserr_pnfs_io_hole cpu_to_be32(NFS4ERR_PNFS_IO_HOLE)
+#define nfserr_seq_false_retry cpu_to_be32(NFS4ERR_SEQ_FALSE_RETRY)
+#define nfserr_bad_high_slot cpu_to_be32(NFS4ERR_BAD_HIGH_SLOT)
+#define nfserr_deadsession cpu_to_be32(NFS4ERR_DEADSESSION)
+#define nfserr_encr_alg_unsupp cpu_to_be32(NFS4ERR_ENCR_ALG_UNSUPP)
+#define nfserr_pnfs_no_layout cpu_to_be32(NFS4ERR_PNFS_NO_LAYOUT)
+#define nfserr_not_only_op cpu_to_be32(NFS4ERR_NOT_ONLY_OP)
+#define nfserr_wrong_cred cpu_to_be32(NFS4ERR_WRONG_CRED)
+#define nfserr_wrong_type cpu_to_be32(NFS4ERR_WRONG_TYPE)
+#define nfserr_dirdeleg_unavail cpu_to_be32(NFS4ERR_DIRDELEG_UNAVAIL)
+#define nfserr_reject_deleg cpu_to_be32(NFS4ERR_REJECT_DELEG)
+#define nfserr_returnconflict cpu_to_be32(NFS4ERR_RETURNCONFLICT)
+#define nfserr_deleg_revoked cpu_to_be32(NFS4ERR_DELEG_REVOKED)
/* error codes for internal use */
/* if a request fails due to kmalloc failure, it gets dropped.
* Client should resend eventually
*/
-#define nfserr_dropit __constant_htonl(30000)
+#define nfserr_dropit cpu_to_be32(30000)
/* end-of-file indicator in readdir */
-#define nfserr_eof __constant_htonl(30001)
+#define nfserr_eof cpu_to_be32(30001)
+/* replay detected */
+#define nfserr_replay_me cpu_to_be32(11001)
+/* nfs41 replay detected */
+#define nfserr_replay_cache cpu_to_be32(11002)
/* Check for dir entries '.' and '..' */
#define isdotent(n, l) (l < 3 && n[0] == '.' && (l == 1 || n[1] == '.'))
@@ -300,7 +343,7 @@ extern struct timeval nfssvc_boot;
* TIME_BACKUP (unlikely to be supported any time soon)
* TIME_CREATE (unlikely to be supported any time soon)
*/
-#define NFSD_SUPPORTED_ATTRS_WORD0 \
+#define NFSD4_SUPPORTED_ATTRS_WORD0 \
(FATTR4_WORD0_SUPPORTED_ATTRS | FATTR4_WORD0_TYPE | FATTR4_WORD0_FH_EXPIRE_TYPE \
| FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE | FATTR4_WORD0_LINK_SUPPORT \
| FATTR4_WORD0_SYMLINK_SUPPORT | FATTR4_WORD0_NAMED_ATTR | FATTR4_WORD0_FSID \
@@ -312,7 +355,7 @@ extern struct timeval nfssvc_boot;
| FATTR4_WORD0_MAXFILESIZE | FATTR4_WORD0_MAXLINK | FATTR4_WORD0_MAXNAME \
| FATTR4_WORD0_MAXREAD | FATTR4_WORD0_MAXWRITE | FATTR4_WORD0_ACL)
-#define NFSD_SUPPORTED_ATTRS_WORD1 \
+#define NFSD4_SUPPORTED_ATTRS_WORD1 \
(FATTR4_WORD1_MODE | FATTR4_WORD1_NO_TRUNC | FATTR4_WORD1_NUMLINKS \
| FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP | FATTR4_WORD1_RAWDEV \
| FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE | FATTR4_WORD1_SPACE_TOTAL \
@@ -320,6 +363,35 @@ extern struct timeval nfssvc_boot;
| FATTR4_WORD1_TIME_DELTA | FATTR4_WORD1_TIME_METADATA \
| FATTR4_WORD1_TIME_MODIFY | FATTR4_WORD1_TIME_MODIFY_SET | FATTR4_WORD1_MOUNTED_ON_FILEID)
+#define NFSD4_SUPPORTED_ATTRS_WORD2 0
+
+#define NFSD4_1_SUPPORTED_ATTRS_WORD0 \
+ NFSD4_SUPPORTED_ATTRS_WORD0
+
+#define NFSD4_1_SUPPORTED_ATTRS_WORD1 \
+ NFSD4_SUPPORTED_ATTRS_WORD1
+
+#define NFSD4_1_SUPPORTED_ATTRS_WORD2 \
+ (NFSD4_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SUPPATTR_EXCLCREAT)
+
+static inline u32 nfsd_suppattrs0(u32 minorversion)
+{
+ return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD0
+ : NFSD4_SUPPORTED_ATTRS_WORD0;
+}
+
+static inline u32 nfsd_suppattrs1(u32 minorversion)
+{
+ return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD1
+ : NFSD4_SUPPORTED_ATTRS_WORD1;
+}
+
+static inline u32 nfsd_suppattrs2(u32 minorversion)
+{
+ return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD2
+ : NFSD4_SUPPORTED_ATTRS_WORD2;
+}
+
/* These will return ERR_INVAL if specified in GETATTR or READDIR. */
#define NFSD_WRITEONLY_ATTRS_WORD1 \
(FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET)
@@ -330,6 +402,19 @@ extern struct timeval nfssvc_boot;
#define NFSD_WRITEABLE_ATTRS_WORD1 \
(FATTR4_WORD1_MODE | FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP \
| FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET)
+#define NFSD_WRITEABLE_ATTRS_WORD2 0
+
+#define NFSD_SUPPATTR_EXCLCREAT_WORD0 \
+ NFSD_WRITEABLE_ATTRS_WORD0
+/*
+ * we currently store the exclusive create verifier in the v_{a,m}time
+ * attributes so the client can't set these at create time using EXCLUSIVE4_1
+ */
+#define NFSD_SUPPATTR_EXCLCREAT_WORD1 \
+ (NFSD_WRITEABLE_ATTRS_WORD1 & \
+ ~(FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET))
+#define NFSD_SUPPATTR_EXCLCREAT_WORD2 \
+ NFSD_WRITEABLE_ATTRS_WORD2
#endif /* CONFIG_NFSD_V4 */
diff --git a/include/linux/nfsd/nfsfh.h b/include/linux/nfsd/nfsfh.h
index fa317f6c154..8f641c90845 100644
--- a/include/linux/nfsd/nfsfh.h
+++ b/include/linux/nfsd/nfsfh.h
@@ -151,9 +151,15 @@ typedef struct svc_fh {
__u64 fh_pre_size; /* size before operation */
struct timespec fh_pre_mtime; /* mtime before oper */
struct timespec fh_pre_ctime; /* ctime before oper */
+ /*
+ * pre-op nfsv4 change attr: note must check IS_I_VERSION(inode)
+ * to find out if it is valid.
+ */
+ u64 fh_pre_change;
/* Post-op attributes saved in fh_unlock */
struct kstat fh_post_attr; /* full attrs after operation */
+ u64 fh_post_change; /* nfsv4 change; see above */
#endif /* CONFIG_NFSD_V3 */
} svc_fh;
@@ -269,6 +275,13 @@ fh_copy(struct svc_fh *dst, struct svc_fh *src)
return dst;
}
+static inline void
+fh_copy_shallow(struct knfsd_fh *dst, struct knfsd_fh *src)
+{
+ dst->fh_size = src->fh_size;
+ memcpy(&dst->fh_base, &src->fh_base, src->fh_size);
+}
+
static __inline__ struct svc_fh *
fh_init(struct svc_fh *fhp, int maxsize)
{
@@ -291,6 +304,7 @@ fill_pre_wcc(struct svc_fh *fhp)
fhp->fh_pre_mtime = inode->i_mtime;
fhp->fh_pre_ctime = inode->i_ctime;
fhp->fh_pre_size = inode->i_size;
+ fhp->fh_pre_change = inode->i_version;
fhp->fh_pre_saved = 1;
}
}
diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h
index 128298c0362..57ab2ed0845 100644
--- a/include/linux/nfsd/state.h
+++ b/include/linux/nfsd/state.h
@@ -41,7 +41,6 @@
#include <linux/kref.h>
#include <linux/sunrpc/clnt.h>
-#define NFS4_OPAQUE_LIMIT 1024
typedef struct {
u32 cl_boot;
u32 cl_id;
@@ -61,16 +60,6 @@ typedef struct {
#define si_stateownerid si_opaque.so_stateownerid
#define si_fileid si_opaque.so_fileid
-
-struct nfs4_cb_recall {
- u32 cbr_ident;
- int cbr_trunc;
- stateid_t cbr_stateid;
- u32 cbr_fhlen;
- char cbr_fhval[NFS4_FHSIZE];
- struct nfs4_delegation *cbr_dp;
-};
-
struct nfs4_delegation {
struct list_head dl_perfile;
struct list_head dl_perclnt;
@@ -82,23 +71,91 @@ struct nfs4_delegation {
struct file *dl_vfs_file;
u32 dl_type;
time_t dl_time;
- struct nfs4_cb_recall dl_recall;
+/* For recall: */
+ u32 dl_ident;
+ stateid_t dl_stateid;
+ struct knfsd_fh dl_fh;
+ int dl_retries;
};
-#define dl_stateid dl_recall.cbr_stateid
-#define dl_fhlen dl_recall.cbr_fhlen
-#define dl_fhval dl_recall.cbr_fhval
-
/* client delegation callback info */
-struct nfs4_callback {
+struct nfs4_cb_conn {
/* SETCLIENTID info */
u32 cb_addr;
unsigned short cb_port;
u32 cb_prog;
- u32 cb_ident;
+ u32 cb_minorversion;
+ u32 cb_ident; /* minorversion 0 only */
/* RPC client info */
atomic_t cb_set; /* successful CB_NULL call */
struct rpc_clnt * cb_client;
+ struct rpc_cred * cb_cred;
+};
+
+/* Maximum number of slots per session. 128 is useful for long haul TCP */
+#define NFSD_MAX_SLOTS_PER_SESSION 128
+/* Maximum number of pages per slot cache entry */
+#define NFSD_PAGES_PER_SLOT 1
+/* Maximum number of operations per session compound */
+#define NFSD_MAX_OPS_PER_COMPOUND 16
+
+struct nfsd4_cache_entry {
+ __be32 ce_status;
+ struct kvec ce_datav; /* encoded NFSv4.1 data in rq_res.head[0] */
+ struct page *ce_respages[NFSD_PAGES_PER_SLOT + 1];
+ int ce_cachethis;
+ short ce_resused;
+ int ce_opcnt;
+ int ce_rpchdrlen;
+};
+
+struct nfsd4_slot {
+ bool sl_inuse;
+ u32 sl_seqid;
+ struct nfsd4_cache_entry sl_cache_entry;
+};
+
+struct nfsd4_channel_attrs {
+ u32 headerpadsz;
+ u32 maxreq_sz;
+ u32 maxresp_sz;
+ u32 maxresp_cached;
+ u32 maxops;
+ u32 maxreqs;
+ u32 nr_rdma_attrs;
+ u32 rdma_attrs;
+};
+
+struct nfsd4_session {
+ struct kref se_ref;
+ struct list_head se_hash; /* hash by sessionid */
+ struct list_head se_perclnt;
+ u32 se_flags;
+ struct nfs4_client *se_client; /* for expire_client */
+ struct nfs4_sessionid se_sessionid;
+ struct nfsd4_channel_attrs se_fchannel;
+ struct nfsd4_channel_attrs se_bchannel;
+ struct nfsd4_slot se_slots[]; /* forward channel slots */
+};
+
+static inline void
+nfsd4_put_session(struct nfsd4_session *ses)
+{
+ extern void free_session(struct kref *kref);
+ kref_put(&ses->se_ref, free_session);
+}
+
+static inline void
+nfsd4_get_session(struct nfsd4_session *ses)
+{
+ kref_get(&ses->se_ref);
+}
+
+/* formatted contents of nfs4_sessionid */
+struct nfsd4_sessionid {
+ clientid_t clientid;
+ u32 sequence;
+ u32 reserved;
};
#define HEXDIR_LEN 33 /* hex version of 16 byte md5 of cl_name plus '\0' */
@@ -129,9 +186,15 @@ struct nfs4_client {
struct svc_cred cl_cred; /* setclientid principal */
clientid_t cl_clientid; /* generated by server */
nfs4_verifier cl_confirm; /* generated by server */
- struct nfs4_callback cl_callback; /* callback info */
+ struct nfs4_cb_conn cl_cb_conn; /* callback info */
atomic_t cl_count; /* ref count */
u32 cl_firststate; /* recovery dir creation */
+
+ /* for nfs41 */
+ struct list_head cl_sessions;
+ struct nfsd4_slot cl_slot; /* create_session slot */
+ u32 cl_exchange_flags;
+ struct nfs4_sessionid cl_sessionid;
};
/* struct nfs4_client_reset
@@ -168,8 +231,7 @@ struct nfs4_replay {
unsigned int rp_buflen;
char *rp_buf;
unsigned intrp_allocated;
- int rp_openfh_len;
- char rp_openfh[NFS4_FHSIZE];
+ struct knfsd_fh rp_openfh;
char rp_ibuf[NFSD4_REPLAY_ISIZE];
};
@@ -217,7 +279,7 @@ struct nfs4_stateowner {
* share_acces, share_deny on the file.
*/
struct nfs4_file {
- struct kref fi_ref;
+ atomic_t fi_ref;
struct list_head fi_hash; /* hash by "struct inode *" */
struct list_head fi_stateids;
struct list_head fi_delegations;
@@ -259,14 +321,13 @@ struct nfs4_stateid {
};
/* flags for preprocess_seqid_op() */
-#define CHECK_FH 0x00000001
+#define HAS_SESSION 0x00000001
#define CONFIRM 0x00000002
#define OPEN_STATE 0x00000004
#define LOCK_STATE 0x00000008
#define RD_STATE 0x00000010
#define WR_STATE 0x00000020
#define CLOSE_STATE 0x00000040
-#define DELEG_RET 0x00000080
#define seqid_mutating_err(err) \
(((err) != nfserr_stale_clientid) && \
@@ -274,7 +335,9 @@ struct nfs4_stateid {
((err) != nfserr_stale_stateid) && \
((err) != nfserr_bad_stateid))
-extern __be32 nfs4_preprocess_stateid_op(struct svc_fh *current_fh,
+struct nfsd4_compound_state;
+
+extern __be32 nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate,
stateid_t *stateid, int flags, struct file **filp);
extern void nfs4_lock_state(void);
extern void nfs4_unlock_state(void);
@@ -290,7 +353,7 @@ extern void nfsd4_init_recdir(char *recdir_name);
extern int nfsd4_recdir_load(void);
extern void nfsd4_shutdown_recdir(void);
extern int nfs4_client_to_reclaim(const char *name);
-extern int nfs4_has_reclaimed_state(const char *name);
+extern int nfs4_has_reclaimed_state(const char *name, bool use_exchange_id);
extern void nfsd4_recdir_purge_old(void);
extern int nfsd4_create_clid_dir(struct nfs4_client *clp);
extern void nfsd4_remove_clid_dir(struct nfs4_client *clp);
diff --git a/include/linux/nfsd/stats.h b/include/linux/nfsd/stats.h
index 7678cfbe996..2693ef647df 100644
--- a/include/linux/nfsd/stats.h
+++ b/include/linux/nfsd/stats.h
@@ -11,6 +11,11 @@
#include <linux/nfs4.h>
+/* thread usage wraps very million seconds (approx one fortnight) */
+#define NFSD_USAGE_WRAP (HZ*1000000)
+
+#ifdef __KERNEL__
+
struct nfsd_stats {
unsigned int rchits; /* repcache hits */
unsigned int rcmisses; /* repcache hits */
@@ -35,10 +40,6 @@ struct nfsd_stats {
};
-/* thread usage wraps very million seconds (approx one fortnight) */
-#define NFSD_USAGE_WRAP (HZ*1000000)
-
-#ifdef __KERNEL__
extern struct nfsd_stats nfsdstats;
extern struct svc_stat nfsd_svcstats;
diff --git a/include/linux/nfsd/xdr4.h b/include/linux/nfsd/xdr4.h
index 27bd3e38ec5..2bacf753506 100644
--- a/include/linux/nfsd/xdr4.h
+++ b/include/linux/nfsd/xdr4.h
@@ -45,17 +45,32 @@
#define XDR_LEN(n) (((n) + 3) & ~3)
struct nfsd4_compound_state {
- struct svc_fh current_fh;
- struct svc_fh save_fh;
- struct nfs4_stateowner *replay_owner;
-};
+ struct svc_fh current_fh;
+ struct svc_fh save_fh;
+ struct nfs4_stateowner *replay_owner;
+ /* For sessions DRC */
+ struct nfsd4_session *session;
+ struct nfsd4_slot *slot;
+ __be32 *statp;
+ size_t iovlen;
+ u32 minorversion;
+ u32 status;
+};
+
+static inline bool nfsd4_has_session(struct nfsd4_compound_state *cs)
+{
+ return cs->slot != NULL;
+}
struct nfsd4_change_info {
u32 atomic;
+ bool change_supported;
u32 before_ctime_sec;
u32 before_ctime_nsec;
+ u64 before_change;
u32 after_ctime_sec;
u32 after_ctime_nsec;
+ u64 after_change;
};
struct nfsd4_access {
@@ -90,7 +105,7 @@ struct nfsd4_create {
u32 specdata2;
} dev; /* NF4BLK, NF4CHR */
} u;
- u32 cr_bmval[2]; /* request */
+ u32 cr_bmval[3]; /* request */
struct iattr cr_iattr; /* request */
struct nfsd4_change_info cr_cinfo; /* response */
struct nfs4_acl *cr_acl;
@@ -105,7 +120,7 @@ struct nfsd4_delegreturn {
};
struct nfsd4_getattr {
- u32 ga_bmval[2]; /* request */
+ u32 ga_bmval[3]; /* request */
struct svc_fh *ga_fhp; /* response */
};
@@ -206,11 +221,9 @@ struct nfsd4_open {
stateid_t op_delegate_stateid; /* request - response */
u32 op_create; /* request */
u32 op_createmode; /* request */
- u32 op_bmval[2]; /* request */
- union { /* request */
- struct iattr iattr; /* UNCHECKED4,GUARDED4 */
- nfs4_verifier verf; /* EXCLUSIVE4 */
- } u;
+ u32 op_bmval[3]; /* request */
+ struct iattr iattr; /* UNCHECKED4, GUARDED4, EXCLUSIVE4_1 */
+ nfs4_verifier verf; /* EXCLUSIVE4 */
clientid_t op_clientid; /* request */
struct xdr_netobj op_owner; /* request */
u32 op_seqid; /* request */
@@ -224,8 +237,8 @@ struct nfsd4_open {
struct nfs4_stateowner *op_stateowner; /* used during processing */
struct nfs4_acl *op_acl;
};
-#define op_iattr u.iattr
-#define op_verf u.verf
+#define op_iattr iattr
+#define op_verf verf
struct nfsd4_open_confirm {
stateid_t oc_req_stateid /* request */;
@@ -259,7 +272,7 @@ struct nfsd4_readdir {
nfs4_verifier rd_verf; /* request */
u32 rd_dircount; /* request */
u32 rd_maxcount; /* request */
- u32 rd_bmval[2]; /* request */
+ u32 rd_bmval[3]; /* request */
struct svc_rqst *rd_rqstp; /* response */
struct svc_fh * rd_fhp; /* response */
@@ -301,7 +314,7 @@ struct nfsd4_secinfo {
struct nfsd4_setattr {
stateid_t sa_stateid; /* request */
- u32 sa_bmval[2]; /* request */
+ u32 sa_bmval[3]; /* request */
struct iattr sa_iattr; /* request */
struct nfs4_acl *sa_acl;
};
@@ -327,7 +340,7 @@ struct nfsd4_setclientid_confirm {
/* also used for NVERIFY */
struct nfsd4_verify {
- u32 ve_bmval[2]; /* request */
+ u32 ve_bmval[3]; /* request */
u32 ve_attrlen; /* request */
char * ve_attrval; /* request */
};
@@ -344,6 +357,43 @@ struct nfsd4_write {
nfs4_verifier wr_verifier; /* response */
};
+struct nfsd4_exchange_id {
+ nfs4_verifier verifier;
+ struct xdr_netobj clname;
+ u32 flags;
+ clientid_t clientid;
+ u32 seqid;
+ int spa_how;
+};
+
+struct nfsd4_create_session {
+ clientid_t clientid;
+ struct nfs4_sessionid sessionid;
+ u32 seqid;
+ u32 flags;
+ struct nfsd4_channel_attrs fore_channel;
+ struct nfsd4_channel_attrs back_channel;
+ u32 callback_prog;
+ u32 uid;
+ u32 gid;
+};
+
+struct nfsd4_sequence {
+ struct nfs4_sessionid sessionid; /* request/response */
+ u32 seqid; /* request/response */
+ u32 slotid; /* request/response */
+ u32 maxslots; /* request/response */
+ u32 cachethis; /* request */
+#if 0
+ u32 target_maxslots; /* response */
+ u32 status_flags; /* response */
+#endif /* not yet */
+};
+
+struct nfsd4_destroy_session {
+ struct nfs4_sessionid sessionid;
+};
+
struct nfsd4_op {
int opnum;
__be32 status;
@@ -378,6 +428,12 @@ struct nfsd4_op {
struct nfsd4_verify verify;
struct nfsd4_write write;
struct nfsd4_release_lockowner release_lockowner;
+
+ /* NFSv4.1 */
+ struct nfsd4_exchange_id exchange_id;
+ struct nfsd4_create_session create_session;
+ struct nfsd4_destroy_session destroy_session;
+ struct nfsd4_sequence sequence;
} u;
struct nfs4_replay * replay;
};
@@ -416,9 +472,22 @@ struct nfsd4_compoundres {
u32 taglen;
char * tag;
u32 opcnt;
- __be32 * tagp; /* where to encode tag and opcount */
+ __be32 * tagp; /* tag, opcount encode location */
+ struct nfsd4_compound_state cstate;
};
+static inline bool nfsd4_is_solo_sequence(struct nfsd4_compoundres *resp)
+{
+ struct nfsd4_compoundargs *args = resp->rqstp->rq_argp;
+ return args->opcnt == 1;
+}
+
+static inline bool nfsd4_not_cached(struct nfsd4_compoundres *resp)
+{
+ return !resp->cstate.slot->sl_cache_entry.ce_cachethis ||
+ nfsd4_is_solo_sequence(resp);
+}
+
#define NFS4_SVC_XDRSIZE sizeof(struct nfsd4_compoundargs)
static inline void
@@ -426,10 +495,16 @@ set_change_info(struct nfsd4_change_info *cinfo, struct svc_fh *fhp)
{
BUG_ON(!fhp->fh_pre_saved || !fhp->fh_post_saved);
cinfo->atomic = 1;
- cinfo->before_ctime_sec = fhp->fh_pre_ctime.tv_sec;
- cinfo->before_ctime_nsec = fhp->fh_pre_ctime.tv_nsec;
- cinfo->after_ctime_sec = fhp->fh_post_attr.ctime.tv_sec;
- cinfo->after_ctime_nsec = fhp->fh_post_attr.ctime.tv_nsec;
+ cinfo->change_supported = IS_I_VERSION(fhp->fh_dentry->d_inode);
+ if (cinfo->change_supported) {
+ cinfo->before_change = fhp->fh_pre_change;
+ cinfo->after_change = fhp->fh_post_change;
+ } else {
+ cinfo->before_ctime_sec = fhp->fh_pre_ctime.tv_sec;
+ cinfo->before_ctime_nsec = fhp->fh_pre_ctime.tv_nsec;
+ cinfo->after_ctime_sec = fhp->fh_post_attr.ctime.tv_sec;
+ cinfo->after_ctime_nsec = fhp->fh_post_attr.ctime.tv_nsec;
+ }
}
int nfs4svc_encode_voidres(struct svc_rqst *, __be32 *, void *);
@@ -448,7 +523,23 @@ extern __be32 nfsd4_setclientid(struct svc_rqst *rqstp,
extern __be32 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
struct nfsd4_compound_state *,
struct nfsd4_setclientid_confirm *setclientid_confirm);
-extern __be32 nfsd4_process_open1(struct nfsd4_open *open);
+extern void nfsd4_store_cache_entry(struct nfsd4_compoundres *resp);
+extern __be32 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
+ struct nfsd4_sequence *seq);
+extern __be32 nfsd4_exchange_id(struct svc_rqst *rqstp,
+ struct nfsd4_compound_state *,
+struct nfsd4_exchange_id *);
+ extern __be32 nfsd4_create_session(struct svc_rqst *,
+ struct nfsd4_compound_state *,
+ struct nfsd4_create_session *);
+extern __be32 nfsd4_sequence(struct svc_rqst *,
+ struct nfsd4_compound_state *,
+ struct nfsd4_sequence *);
+extern __be32 nfsd4_destroy_session(struct svc_rqst *,
+ struct nfsd4_compound_state *,
+ struct nfsd4_destroy_session *);
+extern __be32 nfsd4_process_open1(struct nfsd4_compound_state *,
+ struct nfsd4_open *open);
extern __be32 nfsd4_process_open2(struct svc_rqst *rqstp,
struct svc_fh *current_fh, struct nfsd4_open *open);
extern __be32 nfsd4_open_confirm(struct svc_rqst *rqstp,
diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h
new file mode 100644
index 00000000000..79fec6af3f9
--- /dev/null
+++ b/include/linux/nilfs2_fs.h
@@ -0,0 +1,801 @@
+/*
+ * nilfs2_fs.h - NILFS2 on-disk structures and common declarations.
+ *
+ * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Written by Koji Sato <koji@osrg.net>
+ * Ryusuke Konishi <ryusuke@osrg.net>
+ */
+/*
+ * linux/include/linux/ext2_fs.h
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ * from
+ *
+ * linux/include/linux/minix_fs.h
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+#ifndef _LINUX_NILFS_FS_H
+#define _LINUX_NILFS_FS_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/*
+ * Inode flags stored in nilfs_inode and on-memory nilfs inode
+ *
+ * We define these flags based on ext2-fs because of the
+ * compatibility reason; to avoid problems in chattr(1)
+ */
+#define NILFS_SECRM_FL 0x00000001 /* Secure deletion */
+#define NILFS_UNRM_FL 0x00000002 /* Undelete */
+#define NILFS_SYNC_FL 0x00000008 /* Synchronous updates */
+#define NILFS_IMMUTABLE_FL 0x00000010 /* Immutable file */
+#define NILFS_APPEND_FL 0x00000020 /* writes to file may only append */
+#define NILFS_NODUMP_FL 0x00000040 /* do not dump file */
+#define NILFS_NOATIME_FL 0x00000080 /* do not update atime */
+/* Reserved for compression usage... */
+#define NILFS_NOTAIL_FL 0x00008000 /* file tail should not be merged */
+#define NILFS_DIRSYNC_FL 0x00010000 /* dirsync behaviour */
+
+#define NILFS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */
+#define NILFS_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */
+
+
+#define NILFS_INODE_BMAP_SIZE 7
+/**
+ * struct nilfs_inode - structure of an inode on disk
+ * @i_blocks: blocks count
+ * @i_size: size in bytes
+ * @i_ctime: creation time (seconds)
+ * @i_mtime: modification time (seconds)
+ * @i_ctime_nsec: creation time (nano seconds)
+ * @i_mtime_nsec: modification time (nano seconds)
+ * @i_uid: user id
+ * @i_gid: group id
+ * @i_mode: file mode
+ * @i_links_count: links count
+ * @i_flags: file flags
+ * @i_bmap: block mapping
+ * @i_xattr: extended attributes
+ * @i_generation: file generation (for NFS)
+ * @i_pad: padding
+ */
+struct nilfs_inode {
+ __le64 i_blocks;
+ __le64 i_size;
+ __le64 i_ctime;
+ __le64 i_mtime;
+ __le32 i_ctime_nsec;
+ __le32 i_mtime_nsec;
+ __le32 i_uid;
+ __le32 i_gid;
+ __le16 i_mode;
+ __le16 i_links_count;
+ __le32 i_flags;
+ __le64 i_bmap[NILFS_INODE_BMAP_SIZE];
+#define i_device_code i_bmap[0]
+ __le64 i_xattr;
+ __le32 i_generation;
+ __le32 i_pad;
+};
+
+/**
+ * struct nilfs_super_root - structure of super root
+ * @sr_sum: check sum
+ * @sr_bytes: byte count of the structure
+ * @sr_flags: flags (reserved)
+ * @sr_nongc_ctime: write time of the last segment not for cleaner operation
+ * @sr_dat: DAT file inode
+ * @sr_cpfile: checkpoint file inode
+ * @sr_sufile: segment usage file inode
+ */
+struct nilfs_super_root {
+ __le32 sr_sum;
+ __le16 sr_bytes;
+ __le16 sr_flags;
+ __le64 sr_nongc_ctime;
+ struct nilfs_inode sr_dat;
+ struct nilfs_inode sr_cpfile;
+ struct nilfs_inode sr_sufile;
+};
+
+#define NILFS_SR_MDT_OFFSET(inode_size, i) \
+ ((unsigned long)&((struct nilfs_super_root *)0)->sr_dat + \
+ (inode_size) * (i))
+#define NILFS_SR_DAT_OFFSET(inode_size) NILFS_SR_MDT_OFFSET(inode_size, 0)
+#define NILFS_SR_CPFILE_OFFSET(inode_size) NILFS_SR_MDT_OFFSET(inode_size, 1)
+#define NILFS_SR_SUFILE_OFFSET(inode_size) NILFS_SR_MDT_OFFSET(inode_size, 2)
+#define NILFS_SR_BYTES (sizeof(struct nilfs_super_root))
+
+/*
+ * Maximal mount counts
+ */
+#define NILFS_DFL_MAX_MNT_COUNT 50 /* 50 mounts */
+
+/*
+ * File system states (sbp->s_state, nilfs->ns_mount_state)
+ */
+#define NILFS_VALID_FS 0x0001 /* Unmounted cleanly */
+#define NILFS_ERROR_FS 0x0002 /* Errors detected */
+#define NILFS_RESIZE_FS 0x0004 /* Resize required */
+
+/*
+ * Mount flags (sbi->s_mount_opt)
+ */
+#define NILFS_MOUNT_ERROR_MODE 0x0070 /* Error mode mask */
+#define NILFS_MOUNT_ERRORS_CONT 0x0010 /* Continue on errors */
+#define NILFS_MOUNT_ERRORS_RO 0x0020 /* Remount fs ro on errors */
+#define NILFS_MOUNT_ERRORS_PANIC 0x0040 /* Panic on errors */
+#define NILFS_MOUNT_SNAPSHOT 0x0080 /* Snapshot flag */
+#define NILFS_MOUNT_BARRIER 0x1000 /* Use block barriers */
+#define NILFS_MOUNT_STRICT_ORDER 0x2000 /* Apply strict in-order
+ semantics also for data */
+
+
+/**
+ * struct nilfs_super_block - structure of super block on disk
+ */
+struct nilfs_super_block {
+ __le32 s_rev_level; /* Revision level */
+ __le16 s_minor_rev_level; /* minor revision level */
+ __le16 s_magic; /* Magic signature */
+
+ __le16 s_bytes; /* Bytes count of CRC calculation
+ for this structure. s_reserved
+ is excluded. */
+ __le16 s_flags; /* flags */
+ __le32 s_crc_seed; /* Seed value of CRC calculation */
+ __le32 s_sum; /* Check sum of super block */
+
+ __le32 s_log_block_size; /* Block size represented as follows
+ blocksize =
+ 1 << (s_log_block_size + 10) */
+ __le64 s_nsegments; /* Number of segments in filesystem */
+ __le64 s_dev_size; /* block device size in bytes */
+ __le64 s_first_data_block; /* 1st seg disk block number */
+ __le32 s_blocks_per_segment; /* number of blocks per full segment */
+ __le32 s_r_segments_percentage; /* Reserved segments percentage */
+
+ __le64 s_last_cno; /* Last checkpoint number */
+ __le64 s_last_pseg; /* disk block addr pseg written last */
+ __le64 s_last_seq; /* seq. number of seg written last */
+ __le64 s_free_blocks_count; /* Free blocks count */
+
+ __le64 s_ctime; /* Creation time (execution time of
+ newfs) */
+ __le64 s_mtime; /* Mount time */
+ __le64 s_wtime; /* Write time */
+ __le16 s_mnt_count; /* Mount count */
+ __le16 s_max_mnt_count; /* Maximal mount count */
+ __le16 s_state; /* File system state */
+ __le16 s_errors; /* Behaviour when detecting errors */
+ __le64 s_lastcheck; /* time of last check */
+
+ __le32 s_checkinterval; /* max. time between checks */
+ __le32 s_creator_os; /* OS */
+ __le16 s_def_resuid; /* Default uid for reserved blocks */
+ __le16 s_def_resgid; /* Default gid for reserved blocks */
+ __le32 s_first_ino; /* First non-reserved inode */
+
+ __le16 s_inode_size; /* Size of an inode */
+ __le16 s_dat_entry_size; /* Size of a dat entry */
+ __le16 s_checkpoint_size; /* Size of a checkpoint */
+ __le16 s_segment_usage_size; /* Size of a segment usage */
+
+ __u8 s_uuid[16]; /* 128-bit uuid for volume */
+ char s_volume_name[16]; /* volume name */
+ char s_last_mounted[64]; /* directory where last mounted */
+
+ __le32 s_c_interval; /* Commit interval of segment */
+ __le32 s_c_block_max; /* Threshold of data amount for
+ the segment construction */
+ __u32 s_reserved[192]; /* padding to the end of the block */
+};
+
+/*
+ * Codes for operating systems
+ */
+#define NILFS_OS_LINUX 0
+/* Codes from 1 to 4 are reserved to keep compatibility with ext2 creator-OS */
+
+/*
+ * Revision levels
+ */
+#define NILFS_CURRENT_REV 2 /* current major revision */
+#define NILFS_MINOR_REV 0 /* minor revision */
+
+/*
+ * Bytes count of super_block for CRC-calculation
+ */
+#define NILFS_SB_BYTES \
+ ((long)&((struct nilfs_super_block *)0)->s_reserved)
+
+/*
+ * Special inode number
+ */
+#define NILFS_ROOT_INO 2 /* Root file inode */
+#define NILFS_DAT_INO 3 /* DAT file */
+#define NILFS_CPFILE_INO 4 /* checkpoint file */
+#define NILFS_SUFILE_INO 5 /* segment usage file */
+#define NILFS_IFILE_INO 6 /* ifile */
+#define NILFS_ATIME_INO 7 /* Atime file (reserved) */
+#define NILFS_XATTR_INO 8 /* Xattribute file (reserved) */
+#define NILFS_SKETCH_INO 10 /* Sketch file */
+#define NILFS_USER_INO 11 /* Fisrt user's file inode number */
+
+#define NILFS_SB_OFFSET_BYTES 1024 /* byte offset of nilfs superblock */
+#define NILFS_SUPER_MAGIC 0x3434 /* NILFS filesystem magic number */
+
+#define NILFS_SEG_MIN_BLOCKS 16 /* Minimum number of blocks in
+ a full segment */
+#define NILFS_PSEG_MIN_BLOCKS 2 /* Minimum number of blocks in
+ a partial segment */
+#define NILFS_MIN_NRSVSEGS 8 /* Minimum number of reserved
+ segments */
+
+/*
+ * bytes offset of secondary super block
+ */
+#define NILFS_SB2_OFFSET_BYTES(devsize) ((((devsize) >> 12) - 1) << 12)
+
+/*
+ * Maximal count of links to a file
+ */
+#define NILFS_LINK_MAX 32000
+
+/*
+ * Structure of a directory entry
+ * (Same as ext2)
+ */
+
+#define NILFS_NAME_LEN 255
+
+/*
+ * The new version of the directory entry. Since V0 structures are
+ * stored in intel byte order, and the name_len field could never be
+ * bigger than 255 chars, it's safe to reclaim the extra byte for the
+ * file_type field.
+ */
+struct nilfs_dir_entry {
+ __le64 inode; /* Inode number */
+ __le16 rec_len; /* Directory entry length */
+ __u8 name_len; /* Name length */
+ __u8 file_type;
+ char name[NILFS_NAME_LEN]; /* File name */
+ char pad;
+};
+
+/*
+ * NILFS directory file types. Only the low 3 bits are used. The
+ * other bits are reserved for now.
+ */
+enum {
+ NILFS_FT_UNKNOWN,
+ NILFS_FT_REG_FILE,
+ NILFS_FT_DIR,
+ NILFS_FT_CHRDEV,
+ NILFS_FT_BLKDEV,
+ NILFS_FT_FIFO,
+ NILFS_FT_SOCK,
+ NILFS_FT_SYMLINK,
+ NILFS_FT_MAX
+};
+
+/*
+ * NILFS_DIR_PAD defines the directory entries boundaries
+ *
+ * NOTE: It must be a multiple of 8
+ */
+#define NILFS_DIR_PAD 8
+#define NILFS_DIR_ROUND (NILFS_DIR_PAD - 1)
+#define NILFS_DIR_REC_LEN(name_len) (((name_len) + 12 + NILFS_DIR_ROUND) & \
+ ~NILFS_DIR_ROUND)
+
+
+/**
+ * struct nilfs_finfo - file information
+ * @fi_ino: inode number
+ * @fi_cno: checkpoint number
+ * @fi_nblocks: number of blocks (including intermediate blocks)
+ * @fi_ndatablk: number of file data blocks
+ */
+struct nilfs_finfo {
+ __le64 fi_ino;
+ __le64 fi_cno;
+ __le32 fi_nblocks;
+ __le32 fi_ndatablk;
+ /* array of virtual block numbers */
+};
+
+/**
+ * struct nilfs_binfo_v - information for the block to which a virtual block number is assigned
+ * @bi_vblocknr: virtual block number
+ * @bi_blkoff: block offset
+ */
+struct nilfs_binfo_v {
+ __le64 bi_vblocknr;
+ __le64 bi_blkoff;
+};
+
+/**
+ * struct nilfs_binfo_dat - information for the block which belongs to the DAT file
+ * @bi_blkoff: block offset
+ * @bi_level: level
+ * @bi_pad: padding
+ */
+struct nilfs_binfo_dat {
+ __le64 bi_blkoff;
+ __u8 bi_level;
+ __u8 bi_pad[7];
+};
+
+/**
+ * union nilfs_binfo: block information
+ * @bi_v: nilfs_binfo_v structure
+ * @bi_dat: nilfs_binfo_dat structure
+ */
+union nilfs_binfo {
+ struct nilfs_binfo_v bi_v;
+ struct nilfs_binfo_dat bi_dat;
+};
+
+/**
+ * struct nilfs_segment_summary - segment summary
+ * @ss_datasum: checksum of data
+ * @ss_sumsum: checksum of segment summary
+ * @ss_magic: magic number
+ * @ss_bytes: size of this structure in bytes
+ * @ss_flags: flags
+ * @ss_seq: sequence number
+ * @ss_create: creation timestamp
+ * @ss_next: next segment
+ * @ss_nblocks: number of blocks
+ * @ss_nfinfo: number of finfo structures
+ * @ss_sumbytes: total size of segment summary in bytes
+ * @ss_pad: padding
+ */
+struct nilfs_segment_summary {
+ __le32 ss_datasum;
+ __le32 ss_sumsum;
+ __le32 ss_magic;
+ __le16 ss_bytes;
+ __le16 ss_flags;
+ __le64 ss_seq;
+ __le64 ss_create;
+ __le64 ss_next;
+ __le32 ss_nblocks;
+ __le32 ss_nfinfo;
+ __le32 ss_sumbytes;
+ __le32 ss_pad;
+ /* array of finfo structures */
+};
+
+#define NILFS_SEGSUM_MAGIC 0x1eaffa11 /* segment summary magic number */
+
+/*
+ * Segment summary flags
+ */
+#define NILFS_SS_LOGBGN 0x0001 /* begins a logical segment */
+#define NILFS_SS_LOGEND 0x0002 /* ends a logical segment */
+#define NILFS_SS_SR 0x0004 /* has super root */
+#define NILFS_SS_SYNDT 0x0008 /* includes data only updates */
+#define NILFS_SS_GC 0x0010 /* segment written for cleaner operation */
+
+/**
+ * struct nilfs_palloc_group_desc - block group descriptor
+ * @pg_nfrees: number of free entries in block group
+ */
+struct nilfs_palloc_group_desc {
+ __le32 pg_nfrees;
+};
+
+/**
+ * struct nilfs_dat_entry - disk address translation entry
+ * @dt_blocknr: block number
+ * @dt_start: start checkpoint number
+ * @dt_end: end checkpoint number
+ * @dt_rsv: reserved for future use
+ */
+struct nilfs_dat_entry {
+ __le64 de_blocknr;
+ __le64 de_start;
+ __le64 de_end;
+ __le64 de_rsv;
+};
+
+/**
+ * struct nilfs_dat_group_desc - block group descriptor
+ * @dg_nfrees: number of free virtual block numbers in block group
+ */
+struct nilfs_dat_group_desc {
+ __le32 dg_nfrees;
+};
+
+
+/**
+ * struct nilfs_snapshot_list - snapshot list
+ * @ssl_next: next checkpoint number on snapshot list
+ * @ssl_prev: previous checkpoint number on snapshot list
+ */
+struct nilfs_snapshot_list {
+ __le64 ssl_next;
+ __le64 ssl_prev;
+};
+
+/**
+ * struct nilfs_checkpoint - checkpoint structure
+ * @cp_flags: flags
+ * @cp_checkpoints_count: checkpoints count in a block
+ * @cp_snapshot_list: snapshot list
+ * @cp_cno: checkpoint number
+ * @cp_create: creation timestamp
+ * @cp_nblk_inc: number of blocks incremented by this checkpoint
+ * @cp_inodes_count: inodes count
+ * @cp_blocks_count: blocks count
+ * @cp_ifile_inode: inode of ifile
+ */
+struct nilfs_checkpoint {
+ __le32 cp_flags;
+ __le32 cp_checkpoints_count;
+ struct nilfs_snapshot_list cp_snapshot_list;
+ __le64 cp_cno;
+ __le64 cp_create;
+ __le64 cp_nblk_inc;
+ __le64 cp_inodes_count;
+ __le64 cp_blocks_count; /* Reserved (might be deleted) */
+
+ /* Do not change the byte offset of ifile inode.
+ To keep the compatibility of the disk format,
+ additional fields should be added behind cp_ifile_inode. */
+ struct nilfs_inode cp_ifile_inode;
+};
+
+/* checkpoint flags */
+enum {
+ NILFS_CHECKPOINT_SNAPSHOT,
+ NILFS_CHECKPOINT_INVALID,
+ NILFS_CHECKPOINT_SKETCH,
+ NILFS_CHECKPOINT_MINOR,
+};
+
+#define NILFS_CHECKPOINT_FNS(flag, name) \
+static inline void \
+nilfs_checkpoint_set_##name(struct nilfs_checkpoint *cp) \
+{ \
+ cp->cp_flags = cpu_to_le32(le32_to_cpu(cp->cp_flags) | \
+ (1UL << NILFS_CHECKPOINT_##flag)); \
+} \
+static inline void \
+nilfs_checkpoint_clear_##name(struct nilfs_checkpoint *cp) \
+{ \
+ cp->cp_flags = cpu_to_le32(le32_to_cpu(cp->cp_flags) & \
+ ~(1UL << NILFS_CHECKPOINT_##flag)); \
+} \
+static inline int \
+nilfs_checkpoint_##name(const struct nilfs_checkpoint *cp) \
+{ \
+ return !!(le32_to_cpu(cp->cp_flags) & \
+ (1UL << NILFS_CHECKPOINT_##flag)); \
+}
+
+NILFS_CHECKPOINT_FNS(SNAPSHOT, snapshot)
+NILFS_CHECKPOINT_FNS(INVALID, invalid)
+NILFS_CHECKPOINT_FNS(MINOR, minor)
+
+/**
+ * struct nilfs_cpinfo - checkpoint information
+ * @ci_flags: flags
+ * @ci_pad: padding
+ * @ci_cno: checkpoint number
+ * @ci_create: creation timestamp
+ * @ci_nblk_inc: number of blocks incremented by this checkpoint
+ * @ci_inodes_count: inodes count
+ * @ci_blocks_count: blocks count
+ * @ci_next: next checkpoint number in snapshot list
+ */
+struct nilfs_cpinfo {
+ __u32 ci_flags;
+ __u32 ci_pad;
+ __u64 ci_cno;
+ __u64 ci_create;
+ __u64 ci_nblk_inc;
+ __u64 ci_inodes_count;
+ __u64 ci_blocks_count;
+ __u64 ci_next;
+};
+
+#define NILFS_CPINFO_FNS(flag, name) \
+static inline int \
+nilfs_cpinfo_##name(const struct nilfs_cpinfo *cpinfo) \
+{ \
+ return !!(cpinfo->ci_flags & (1UL << NILFS_CHECKPOINT_##flag)); \
+}
+
+NILFS_CPINFO_FNS(SNAPSHOT, snapshot)
+NILFS_CPINFO_FNS(INVALID, invalid)
+NILFS_CPINFO_FNS(MINOR, minor)
+
+
+/**
+ * struct nilfs_cpfile_header - checkpoint file header
+ * @ch_ncheckpoints: number of checkpoints
+ * @ch_nsnapshots: number of snapshots
+ * @ch_snapshot_list: snapshot list
+ */
+struct nilfs_cpfile_header {
+ __le64 ch_ncheckpoints;
+ __le64 ch_nsnapshots;
+ struct nilfs_snapshot_list ch_snapshot_list;
+};
+
+#define NILFS_CPFILE_FIRST_CHECKPOINT_OFFSET \
+ ((sizeof(struct nilfs_cpfile_header) + \
+ sizeof(struct nilfs_checkpoint) - 1) / \
+ sizeof(struct nilfs_checkpoint))
+
+/**
+ * struct nilfs_segment_usage - segment usage
+ * @su_lastmod: last modified timestamp
+ * @su_nblocks: number of blocks in segment
+ * @su_flags: flags
+ */
+struct nilfs_segment_usage {
+ __le64 su_lastmod;
+ __le32 su_nblocks;
+ __le32 su_flags;
+};
+
+/* segment usage flag */
+enum {
+ NILFS_SEGMENT_USAGE_ACTIVE,
+ NILFS_SEGMENT_USAGE_DIRTY,
+ NILFS_SEGMENT_USAGE_ERROR,
+
+ /* ... */
+};
+
+#define NILFS_SEGMENT_USAGE_FNS(flag, name) \
+static inline void \
+nilfs_segment_usage_set_##name(struct nilfs_segment_usage *su) \
+{ \
+ su->su_flags = cpu_to_le32(le32_to_cpu(su->su_flags) | \
+ (1UL << NILFS_SEGMENT_USAGE_##flag));\
+} \
+static inline void \
+nilfs_segment_usage_clear_##name(struct nilfs_segment_usage *su) \
+{ \
+ su->su_flags = \
+ cpu_to_le32(le32_to_cpu(su->su_flags) & \
+ ~(1UL << NILFS_SEGMENT_USAGE_##flag)); \
+} \
+static inline int \
+nilfs_segment_usage_##name(const struct nilfs_segment_usage *su) \
+{ \
+ return !!(le32_to_cpu(su->su_flags) & \
+ (1UL << NILFS_SEGMENT_USAGE_##flag)); \
+}
+
+NILFS_SEGMENT_USAGE_FNS(ACTIVE, active)
+NILFS_SEGMENT_USAGE_FNS(DIRTY, dirty)
+NILFS_SEGMENT_USAGE_FNS(ERROR, error)
+
+static inline void
+nilfs_segment_usage_set_clean(struct nilfs_segment_usage *su)
+{
+ su->su_lastmod = cpu_to_le64(0);
+ su->su_nblocks = cpu_to_le32(0);
+ su->su_flags = cpu_to_le32(0);
+}
+
+static inline int
+nilfs_segment_usage_clean(const struct nilfs_segment_usage *su)
+{
+ return !le32_to_cpu(su->su_flags);
+}
+
+/**
+ * struct nilfs_sufile_header - segment usage file header
+ * @sh_ncleansegs: number of clean segments
+ * @sh_ndirtysegs: number of dirty segments
+ * @sh_last_alloc: last allocated segment number
+ */
+struct nilfs_sufile_header {
+ __le64 sh_ncleansegs;
+ __le64 sh_ndirtysegs;
+ __le64 sh_last_alloc;
+ /* ... */
+};
+
+#define NILFS_SUFILE_FIRST_SEGMENT_USAGE_OFFSET \
+ ((sizeof(struct nilfs_sufile_header) + \
+ sizeof(struct nilfs_segment_usage) - 1) / \
+ sizeof(struct nilfs_segment_usage))
+
+/**
+ * nilfs_suinfo - segment usage information
+ * @sui_lastmod:
+ * @sui_nblocks:
+ * @sui_flags:
+ */
+struct nilfs_suinfo {
+ __u64 sui_lastmod;
+ __u32 sui_nblocks;
+ __u32 sui_flags;
+};
+
+#define NILFS_SUINFO_FNS(flag, name) \
+static inline int \
+nilfs_suinfo_##name(const struct nilfs_suinfo *si) \
+{ \
+ return si->sui_flags & (1UL << NILFS_SEGMENT_USAGE_##flag); \
+}
+
+NILFS_SUINFO_FNS(ACTIVE, active)
+NILFS_SUINFO_FNS(DIRTY, dirty)
+NILFS_SUINFO_FNS(ERROR, error)
+
+static inline int nilfs_suinfo_clean(const struct nilfs_suinfo *si)
+{
+ return !si->sui_flags;
+}
+
+/* ioctl */
+enum {
+ NILFS_CHECKPOINT,
+ NILFS_SNAPSHOT,
+};
+
+/**
+ * struct nilfs_cpmode -
+ * @cc_cno:
+ * @cc_mode:
+ */
+struct nilfs_cpmode {
+ __u64 cm_cno;
+ __u32 cm_mode;
+ __u32 cm_pad;
+};
+
+/**
+ * struct nilfs_argv - argument vector
+ * @v_base:
+ * @v_nmembs:
+ * @v_size:
+ * @v_flags:
+ * @v_index:
+ */
+struct nilfs_argv {
+ __u64 v_base;
+ __u32 v_nmembs; /* number of members */
+ __u16 v_size; /* size of members */
+ __u16 v_flags;
+ __u64 v_index;
+};
+
+/**
+ * struct nilfs_period -
+ * @p_start:
+ * @p_end:
+ */
+struct nilfs_period {
+ __u64 p_start;
+ __u64 p_end;
+};
+
+/**
+ * struct nilfs_cpstat -
+ * @cs_cno: checkpoint number
+ * @cs_ncps: number of checkpoints
+ * @cs_nsss: number of snapshots
+ */
+struct nilfs_cpstat {
+ __u64 cs_cno;
+ __u64 cs_ncps;
+ __u64 cs_nsss;
+};
+
+/**
+ * struct nilfs_sustat -
+ * @ss_nsegs: number of segments
+ * @ss_ncleansegs: number of clean segments
+ * @ss_ndirtysegs: number of dirty segments
+ * @ss_ctime: creation time of the last segment
+ * @ss_nongc_ctime: creation time of the last segment not for GC
+ * @ss_prot_seq: least sequence number of segments which must not be reclaimed
+ */
+struct nilfs_sustat {
+ __u64 ss_nsegs;
+ __u64 ss_ncleansegs;
+ __u64 ss_ndirtysegs;
+ __u64 ss_ctime;
+ __u64 ss_nongc_ctime;
+ __u64 ss_prot_seq;
+};
+
+/**
+ * struct nilfs_vinfo - virtual block number information
+ * @vi_vblocknr:
+ * @vi_start:
+ * @vi_end:
+ * @vi_blocknr:
+ */
+struct nilfs_vinfo {
+ __u64 vi_vblocknr;
+ __u64 vi_start;
+ __u64 vi_end;
+ __u64 vi_blocknr;
+};
+
+/**
+ * struct nilfs_vdesc -
+ */
+struct nilfs_vdesc {
+ __u64 vd_ino;
+ __u64 vd_cno;
+ __u64 vd_vblocknr;
+ struct nilfs_period vd_period;
+ __u64 vd_blocknr;
+ __u64 vd_offset;
+ __u32 vd_flags;
+ __u32 vd_pad;
+};
+
+/**
+ * struct nilfs_bdesc -
+ */
+struct nilfs_bdesc {
+ __u64 bd_ino;
+ __u64 bd_oblocknr;
+ __u64 bd_blocknr;
+ __u64 bd_offset;
+ __u32 bd_level;
+ __u32 bd_pad;
+};
+
+#define NILFS_IOCTL_IDENT 'n'
+
+#define NILFS_IOCTL_CHANGE_CPMODE \
+ _IOW(NILFS_IOCTL_IDENT, 0x80, struct nilfs_cpmode)
+#define NILFS_IOCTL_DELETE_CHECKPOINT \
+ _IOW(NILFS_IOCTL_IDENT, 0x81, __u64)
+#define NILFS_IOCTL_GET_CPINFO \
+ _IOR(NILFS_IOCTL_IDENT, 0x82, struct nilfs_argv)
+#define NILFS_IOCTL_GET_CPSTAT \
+ _IOR(NILFS_IOCTL_IDENT, 0x83, struct nilfs_cpstat)
+#define NILFS_IOCTL_GET_SUINFO \
+ _IOR(NILFS_IOCTL_IDENT, 0x84, struct nilfs_argv)
+#define NILFS_IOCTL_GET_SUSTAT \
+ _IOR(NILFS_IOCTL_IDENT, 0x85, struct nilfs_sustat)
+#define NILFS_IOCTL_GET_VINFO \
+ _IOWR(NILFS_IOCTL_IDENT, 0x86, struct nilfs_argv)
+#define NILFS_IOCTL_GET_BDESCS \
+ _IOWR(NILFS_IOCTL_IDENT, 0x87, struct nilfs_argv)
+#define NILFS_IOCTL_CLEAN_SEGMENTS \
+ _IOW(NILFS_IOCTL_IDENT, 0x88, struct nilfs_argv[5])
+#define NILFS_IOCTL_SYNC \
+ _IOR(NILFS_IOCTL_IDENT, 0x8A, __u64)
+#define NILFS_IOCTL_RESIZE \
+ _IOW(NILFS_IOCTL_IDENT, 0x8B, __u64)
+
+#endif /* _LINUX_NILFS_FS_H */
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index e86ed59f9ad..dbea93b694e 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -7,7 +7,7 @@
* Copyright 2008 Michael Wu <flamingice@sourmilk.net>
* Copyright 2008 Luis Carlos Cobo <luisca@cozybit.com>
* Copyright 2008 Michael Buesch <mb@bu3sch.de>
- * Copyright 2008 Luis R. Rodriguez <lrodriguez@atheros.com>
+ * Copyright 2008, 2009 Luis R. Rodriguez <lrodriguez@atheros.com>
* Copyright 2008 Jouni Malinen <jouni.malinen@atheros.com>
* Copyright 2008 Colin McCabe <colin@cozybit.com>
*
@@ -25,6 +25,8 @@
*
*/
+#include <linux/types.h>
+
/**
* DOC: Station handling
*
@@ -46,8 +48,10 @@
* to get a list of all present wiphys.
* @NL80211_CMD_SET_WIPHY: set wiphy parameters, needs %NL80211_ATTR_WIPHY or
* %NL80211_ATTR_IFINDEX; can be used to set %NL80211_ATTR_WIPHY_NAME,
- * %NL80211_ATTR_WIPHY_TXQ_PARAMS, %NL80211_ATTR_WIPHY_FREQ, and/or
- * %NL80211_ATTR_WIPHY_SEC_CHAN_OFFSET.
+ * %NL80211_ATTR_WIPHY_TXQ_PARAMS, %NL80211_ATTR_WIPHY_FREQ,
+ * %NL80211_ATTR_WIPHY_CHANNEL_TYPE, %NL80211_ATTR_WIPHY_RETRY_SHORT,
+ * %NL80211_ATTR_WIPHY_RETRY_LONG, %NL80211_ATTR_WIPHY_FRAG_THRESHOLD,
+ * and/or %NL80211_ATTR_WIPHY_RTS_THRESHOLD.
* @NL80211_CMD_NEW_WIPHY: Newly created wiphy, response to get request
* or rename notification. Has attributes %NL80211_ATTR_WIPHY and
* %NL80211_ATTR_WIPHY_NAME.
@@ -72,11 +76,11 @@
*
* @NL80211_CMD_GET_KEY: Get sequence counter information for a key specified
* by %NL80211_ATTR_KEY_IDX and/or %NL80211_ATTR_MAC.
- * @NL80211_CMD_SET_KEY: Set key attributes %NL80211_ATTR_KEY_DEFAULT or
- * %NL80211_ATTR_KEY_THRESHOLD.
+ * @NL80211_CMD_SET_KEY: Set key attributes %NL80211_ATTR_KEY_DEFAULT,
+ * %NL80211_ATTR_KEY_DEFAULT_MGMT, or %NL80211_ATTR_KEY_THRESHOLD.
* @NL80211_CMD_NEW_KEY: add a key with given %NL80211_ATTR_KEY_DATA,
- * %NL80211_ATTR_KEY_IDX, %NL80211_ATTR_MAC and %NL80211_ATTR_KEY_CIPHER
- * attributes.
+ * %NL80211_ATTR_KEY_IDX, %NL80211_ATTR_MAC, %NL80211_ATTR_KEY_CIPHER,
+ * and %NL80211_ATTR_KEY_SEQ attributes.
* @NL80211_CMD_DEL_KEY: delete a key identified by %NL80211_ATTR_KEY_IDX
* or %NL80211_ATTR_MAC.
*
@@ -84,7 +88,7 @@
* %NL80222_CMD_NEW_BEACON message)
* @NL80211_CMD_SET_BEACON: set the beacon on an access point interface
* using the %NL80211_ATTR_BEACON_INTERVAL, %NL80211_ATTR_DTIM_PERIOD,
- * %NL80211_BEACON_HEAD and %NL80211_BEACON_TAIL attributes.
+ * %NL80211_ATTR_BEACON_HEAD and %NL80211_ATTR_BEACON_TAIL attributes.
* @NL80211_CMD_NEW_BEACON: add a new beacon to an access point interface,
* parameters are like for %NL80211_CMD_SET_BEACON.
* @NL80211_CMD_DEL_BEACON: remove the beacon, stop sending it
@@ -113,6 +117,8 @@
* @NL80211_CMD_SET_BSS: Set BSS attributes for BSS identified by
* %NL80211_ATTR_IFINDEX.
*
+ * @NL80211_CMD_GET_REG: ask the wireless core to send us its currently set
+ * regulatory domain.
* @NL80211_CMD_SET_REG: Set current regulatory domain. CRDA sends this command
* after being queried by the kernel. CRDA replies by sending a regulatory
* domain structure which consists of %NL80211_ATTR_REG_ALPHA set to our
@@ -133,6 +139,109 @@
* @NL80211_CMD_SET_MESH_PARAMS: Set mesh networking properties for the
* interface identified by %NL80211_ATTR_IFINDEX
*
+ * @NL80211_CMD_SET_MGMT_EXTRA_IE: Set extra IEs for management frames. The
+ * interface is identified with %NL80211_ATTR_IFINDEX and the management
+ * frame subtype with %NL80211_ATTR_MGMT_SUBTYPE. The extra IE data to be
+ * added to the end of the specified management frame is specified with
+ * %NL80211_ATTR_IE. If the command succeeds, the requested data will be
+ * added to all specified management frames generated by
+ * kernel/firmware/driver.
+ * Note: This command has been removed and it is only reserved at this
+ * point to avoid re-using existing command number. The functionality this
+ * command was planned for has been provided with cleaner design with the
+ * option to specify additional IEs in NL80211_CMD_TRIGGER_SCAN,
+ * NL80211_CMD_AUTHENTICATE, NL80211_CMD_ASSOCIATE,
+ * NL80211_CMD_DEAUTHENTICATE, and NL80211_CMD_DISASSOCIATE.
+ *
+ * @NL80211_CMD_GET_SCAN: get scan results
+ * @NL80211_CMD_TRIGGER_SCAN: trigger a new scan with the given parameters
+ * @NL80211_CMD_NEW_SCAN_RESULTS: scan notification (as a reply to
+ * NL80211_CMD_GET_SCAN and on the "scan" multicast group)
+ * @NL80211_CMD_SCAN_ABORTED: scan was aborted, for unspecified reasons,
+ * partial scan results may be available
+ *
+ * @NL80211_CMD_REG_CHANGE: indicates to userspace the regulatory domain
+ * has been changed and provides details of the request information
+ * that caused the change such as who initiated the regulatory request
+ * (%NL80211_ATTR_REG_INITIATOR), the wiphy_idx
+ * (%NL80211_ATTR_REG_ALPHA2) on which the request was made from if
+ * the initiator was %NL80211_REGDOM_SET_BY_COUNTRY_IE or
+ * %NL80211_REGDOM_SET_BY_DRIVER, the type of regulatory domain
+ * set (%NL80211_ATTR_REG_TYPE), if the type of regulatory domain is
+ * %NL80211_REG_TYPE_COUNTRY the alpha2 to which we have moved on
+ * to (%NL80211_ATTR_REG_ALPHA2).
+ * @NL80211_CMD_REG_BEACON_HINT: indicates to userspace that an AP beacon
+ * has been found while world roaming thus enabling active scan or
+ * any mode of operation that initiates TX (beacons) on a channel
+ * where we would not have been able to do either before. As an example
+ * if you are world roaming (regulatory domain set to world or if your
+ * driver is using a custom world roaming regulatory domain) and while
+ * doing a passive scan on the 5 GHz band you find an AP there (if not
+ * on a DFS channel) you will now be able to actively scan for that AP
+ * or use AP mode on your card on that same channel. Note that this will
+ * never be used for channels 1-11 on the 2 GHz band as they are always
+ * enabled world wide. This beacon hint is only sent if your device had
+ * either disabled active scanning or beaconing on a channel. We send to
+ * userspace the wiphy on which we removed a restriction from
+ * (%NL80211_ATTR_WIPHY) and the channel on which this occurred
+ * before (%NL80211_ATTR_FREQ_BEFORE) and after (%NL80211_ATTR_FREQ_AFTER)
+ * the beacon hint was processed.
+ *
+ * @NL80211_CMD_AUTHENTICATE: authentication request and notification.
+ * This command is used both as a command (request to authenticate) and
+ * as an event on the "mlme" multicast group indicating completion of the
+ * authentication process.
+ * When used as a command, %NL80211_ATTR_IFINDEX is used to identify the
+ * interface. %NL80211_ATTR_MAC is used to specify PeerSTAAddress (and
+ * BSSID in case of station mode). %NL80211_ATTR_SSID is used to specify
+ * the SSID (mainly for association, but is included in authentication
+ * request, too, to help BSS selection. %NL80211_ATTR_WIPHY_FREQ is used
+ * to specify the frequence of the channel in MHz. %NL80211_ATTR_AUTH_TYPE
+ * is used to specify the authentication type. %NL80211_ATTR_IE is used to
+ * define IEs (VendorSpecificInfo, but also including RSN IE and FT IEs)
+ * to be added to the frame.
+ * When used as an event, this reports reception of an Authentication
+ * frame in station and IBSS modes when the local MLME processed the
+ * frame, i.e., it was for the local STA and was received in correct
+ * state. This is similar to MLME-AUTHENTICATE.confirm primitive in the
+ * MLME SAP interface (kernel providing MLME, userspace SME). The
+ * included %NL80211_ATTR_FRAME attribute contains the management frame
+ * (including both the header and frame body, but not FCS). This event is
+ * also used to indicate if the authentication attempt timed out. In that
+ * case the %NL80211_ATTR_FRAME attribute is replaced with a
+ * %NL80211_ATTR_TIMED_OUT flag (and %NL80211_ATTR_MAC to indicate which
+ * pending authentication timed out).
+ * @NL80211_CMD_ASSOCIATE: association request and notification; like
+ * NL80211_CMD_AUTHENTICATE but for Association and Reassociation
+ * (similar to MLME-ASSOCIATE.request, MLME-REASSOCIATE.request,
+ * MLME-ASSOCIATE.confirm or MLME-REASSOCIATE.confirm primitives).
+ * @NL80211_CMD_DEAUTHENTICATE: deauthentication request and notification; like
+ * NL80211_CMD_AUTHENTICATE but for Deauthentication frames (similar to
+ * MLME-DEAUTHENTICATION.request and MLME-DEAUTHENTICATE.indication
+ * primitives).
+ * @NL80211_CMD_DISASSOCIATE: disassociation request and notification; like
+ * NL80211_CMD_AUTHENTICATE but for Disassociation frames (similar to
+ * MLME-DISASSOCIATE.request and MLME-DISASSOCIATE.indication primitives).
+ *
+ * @NL80211_CMD_MICHAEL_MIC_FAILURE: notification of a locally detected Michael
+ * MIC (part of TKIP) failure; sent on the "mlme" multicast group; the
+ * event includes %NL80211_ATTR_MAC to describe the source MAC address of
+ * the frame with invalid MIC, %NL80211_ATTR_KEY_TYPE to show the key
+ * type, %NL80211_ATTR_KEY_IDX to indicate the key identifier, and
+ * %NL80211_ATTR_KEY_SEQ to indicate the TSC value of the frame; this
+ * event matches with MLME-MICHAELMICFAILURE.indication() primitive
+ *
+ * @NL80211_CMD_JOIN_IBSS: Join a new IBSS -- given at least an SSID and a
+ * FREQ attribute (for the initial frequency if no peer can be found)
+ * and optionally a MAC (as BSSID) and FREQ_FIXED attribute if those
+ * should be fixed rather than automatically determined. Can only be
+ * executed on a network interface that is UP, and fixed BSSID/FREQ
+ * may be rejected. Another optional parameter is the beacon interval,
+ * given in the %NL80211_ATTR_BEACON_INTERVAL attribute, which if not
+ * given defaults to 100 TU (102.4ms).
+ * @NL80211_CMD_LEAVE_IBSS: Leave the IBSS -- no special arguments, the IBSS is
+ * determined by the network interface.
+ *
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
*/
@@ -178,6 +287,29 @@ enum nl80211_commands {
NL80211_CMD_GET_MESH_PARAMS,
NL80211_CMD_SET_MESH_PARAMS,
+ NL80211_CMD_SET_MGMT_EXTRA_IE /* reserved; not used */,
+
+ NL80211_CMD_GET_REG,
+
+ NL80211_CMD_GET_SCAN,
+ NL80211_CMD_TRIGGER_SCAN,
+ NL80211_CMD_NEW_SCAN_RESULTS,
+ NL80211_CMD_SCAN_ABORTED,
+
+ NL80211_CMD_REG_CHANGE,
+
+ NL80211_CMD_AUTHENTICATE,
+ NL80211_CMD_ASSOCIATE,
+ NL80211_CMD_DEAUTHENTICATE,
+ NL80211_CMD_DISASSOCIATE,
+
+ NL80211_CMD_MICHAEL_MIC_FAILURE,
+
+ NL80211_CMD_REG_BEACON_HINT,
+
+ NL80211_CMD_JOIN_IBSS,
+ NL80211_CMD_LEAVE_IBSS,
+
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
@@ -190,6 +322,13 @@ enum nl80211_commands {
* here
*/
#define NL80211_CMD_SET_BSS NL80211_CMD_SET_BSS
+#define NL80211_CMD_SET_MGMT_EXTRA_IE NL80211_CMD_SET_MGMT_EXTRA_IE
+#define NL80211_CMD_REG_CHANGE NL80211_CMD_REG_CHANGE
+#define NL80211_CMD_AUTHENTICATE NL80211_CMD_AUTHENTICATE
+#define NL80211_CMD_ASSOCIATE NL80211_CMD_ASSOCIATE
+#define NL80211_CMD_DEAUTHENTICATE NL80211_CMD_DEAUTHENTICATE
+#define NL80211_CMD_DISASSOCIATE NL80211_CMD_DISASSOCIATE
+#define NL80211_CMD_REG_BEACON_HINT NL80211_CMD_REG_BEACON_HINT
/**
* enum nl80211_attrs - nl80211 netlink attributes
@@ -208,6 +347,18 @@ enum nl80211_commands {
* NL80211_CHAN_HT20 = HT20 only
* NL80211_CHAN_HT40MINUS = secondary channel is below the primary channel
* NL80211_CHAN_HT40PLUS = secondary channel is above the primary channel
+ * @NL80211_ATTR_WIPHY_RETRY_SHORT: TX retry limit for frames whose length is
+ * less than or equal to the RTS threshold; allowed range: 1..255;
+ * dot11ShortRetryLimit; u8
+ * @NL80211_ATTR_WIPHY_RETRY_LONG: TX retry limit for frames whose length is
+ * greater than the RTS threshold; allowed range: 1..255;
+ * dot11ShortLongLimit; u8
+ * @NL80211_ATTR_WIPHY_FRAG_THRESHOLD: fragmentation threshold, i.e., maximum
+ * length in octets for frames; allowed range: 256..8000, disable
+ * fragmentation with (u32)-1; dot11FragmentationThreshold; u32
+ * @NL80211_ATTR_WIPHY_RTS_THRESHOLD: RTS threshold (TX frames with length
+ * larger than or equal to this use RTS/CTS handshake); allowed range:
+ * 0..65536, disable with (u32)-1; dot11RTSThreshold; u32
*
* @NL80211_ATTR_IFINDEX: network interface index of the device to operate on
* @NL80211_ATTR_IFNAME: network interface name
@@ -231,7 +382,7 @@ enum nl80211_commands {
*
* @NL80211_ATTR_STA_AID: Association ID for the station (u16)
* @NL80211_ATTR_STA_FLAGS: flags, nested element with NLA_FLAG attributes of
- * &enum nl80211_sta_flags.
+ * &enum nl80211_sta_flags (deprecated, use %NL80211_ATTR_STA_FLAGS2)
* @NL80211_ATTR_STA_LISTEN_INTERVAL: listen interval as defined by
* IEEE 802.11 7.3.1.6 (u16).
* @NL80211_ATTR_STA_SUPPORTED_RATES: supported rates, array of supported
@@ -284,6 +435,82 @@ enum nl80211_commands {
* supported interface types, each a flag attribute with the number
* of the interface mode.
*
+ * @NL80211_ATTR_MGMT_SUBTYPE: Management frame subtype for
+ * %NL80211_CMD_SET_MGMT_EXTRA_IE.
+ *
+ * @NL80211_ATTR_IE: Information element(s) data (used, e.g., with
+ * %NL80211_CMD_SET_MGMT_EXTRA_IE).
+ *
+ * @NL80211_ATTR_MAX_NUM_SCAN_SSIDS: number of SSIDs you can scan with
+ * a single scan request, a wiphy attribute.
+ * @NL80211_ATTR_MAX_SCAN_IE_LEN: maximum length of information elements
+ * that can be added to a scan request
+ *
+ * @NL80211_ATTR_SCAN_FREQUENCIES: nested attribute with frequencies (in MHz)
+ * @NL80211_ATTR_SCAN_SSIDS: nested attribute with SSIDs, leave out for passive
+ * scanning and include a zero-length SSID (wildcard) for wildcard scan
+ * @NL80211_ATTR_SCAN_GENERATION: the scan generation increases whenever the
+ * scan result list changes (BSS expired or added) so that applications
+ * can verify that they got a single, consistent snapshot (when all dump
+ * messages carried the same generation number)
+ * @NL80211_ATTR_BSS: scan result BSS
+ *
+ * @NL80211_ATTR_REG_INITIATOR: indicates who requested the regulatory domain
+ * currently in effect. This could be any of the %NL80211_REGDOM_SET_BY_*
+ * @NL80211_ATTR_REG_TYPE: indicates the type of the regulatory domain currently
+ * set. This can be one of the nl80211_reg_type (%NL80211_REGDOM_TYPE_*)
+ *
+ * @NL80211_ATTR_SUPPORTED_COMMANDS: wiphy attribute that specifies
+ * an array of command numbers (i.e. a mapping index to command number)
+ * that the driver for the given wiphy supports.
+ *
+ * @NL80211_ATTR_FRAME: frame data (binary attribute), including frame header
+ * and body, but not FCS; used, e.g., with NL80211_CMD_AUTHENTICATE and
+ * NL80211_CMD_ASSOCIATE events
+ * @NL80211_ATTR_SSID: SSID (binary attribute, 0..32 octets)
+ * @NL80211_ATTR_AUTH_TYPE: AuthenticationType, see &enum nl80211_auth_type,
+ * represented as a u32
+ * @NL80211_ATTR_REASON_CODE: ReasonCode for %NL80211_CMD_DEAUTHENTICATE and
+ * %NL80211_CMD_DISASSOCIATE, u16
+ *
+ * @NL80211_ATTR_KEY_TYPE: Key Type, see &enum nl80211_key_type, represented as
+ * a u32
+ *
+ * @NL80211_ATTR_FREQ_BEFORE: A channel which has suffered a regulatory change
+ * due to considerations from a beacon hint. This attribute reflects
+ * the state of the channel _before_ the beacon hint processing. This
+ * attributes consists of a nested attribute containing
+ * NL80211_FREQUENCY_ATTR_*
+ * @NL80211_ATTR_FREQ_AFTER: A channel which has suffered a regulatory change
+ * due to considerations from a beacon hint. This attribute reflects
+ * the state of the channel _after_ the beacon hint processing. This
+ * attributes consists of a nested attribute containing
+ * NL80211_FREQUENCY_ATTR_*
+ *
+ * @NL80211_ATTR_CIPHER_SUITES: a set of u32 values indicating the supported
+ * cipher suites
+ *
+ * @NL80211_ATTR_FREQ_FIXED: a flag indicating the IBSS should not try to look
+ * for other networks on different channels
+ *
+ * @NL80211_ATTR_TIMED_OUT: a flag indicating than an operation timed out; this
+ * is used, e.g., with %NL80211_CMD_AUTHENTICATE event
+ *
+ * @NL80211_ATTR_USE_MFP: Whether management frame protection (IEEE 802.11w) is
+ * used for the association (&enum nl80211_mfp, represented as a u32);
+ * this attribute can be used
+ * with %NL80211_CMD_ASSOCIATE request
+ *
+ * @NL80211_ATTR_STA_FLAGS2: Attribute containing a
+ * &struct nl80211_sta_flag_update.
+ *
+ * @NL80211_ATTR_CONTROL_PORT: A flag indicating whether user space controls
+ * IEEE 802.1X port, i.e., sets/clears %NL80211_STA_FLAG_AUTHORIZED, in
+ * station mode. If the flag is included in %NL80211_CMD_ASSOCIATE
+ * request, the driver will assume that the port is unauthorized until
+ * authorized by user space. Otherwise, port is marked authorized by
+ * default in station mode.
+ *
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
*/
@@ -346,6 +573,52 @@ enum nl80211_attrs {
NL80211_ATTR_WIPHY_FREQ,
NL80211_ATTR_WIPHY_CHANNEL_TYPE,
+ NL80211_ATTR_KEY_DEFAULT_MGMT,
+
+ NL80211_ATTR_MGMT_SUBTYPE,
+ NL80211_ATTR_IE,
+
+ NL80211_ATTR_MAX_NUM_SCAN_SSIDS,
+
+ NL80211_ATTR_SCAN_FREQUENCIES,
+ NL80211_ATTR_SCAN_SSIDS,
+ NL80211_ATTR_SCAN_GENERATION,
+ NL80211_ATTR_BSS,
+
+ NL80211_ATTR_REG_INITIATOR,
+ NL80211_ATTR_REG_TYPE,
+
+ NL80211_ATTR_SUPPORTED_COMMANDS,
+
+ NL80211_ATTR_FRAME,
+ NL80211_ATTR_SSID,
+ NL80211_ATTR_AUTH_TYPE,
+ NL80211_ATTR_REASON_CODE,
+
+ NL80211_ATTR_KEY_TYPE,
+
+ NL80211_ATTR_MAX_SCAN_IE_LEN,
+ NL80211_ATTR_CIPHER_SUITES,
+
+ NL80211_ATTR_FREQ_BEFORE,
+ NL80211_ATTR_FREQ_AFTER,
+
+ NL80211_ATTR_FREQ_FIXED,
+
+
+ NL80211_ATTR_WIPHY_RETRY_SHORT,
+ NL80211_ATTR_WIPHY_RETRY_LONG,
+ NL80211_ATTR_WIPHY_FRAG_THRESHOLD,
+ NL80211_ATTR_WIPHY_RTS_THRESHOLD,
+
+ NL80211_ATTR_TIMED_OUT,
+
+ NL80211_ATTR_USE_MFP,
+
+ NL80211_ATTR_STA_FLAGS2,
+
+ NL80211_ATTR_CONTROL_PORT,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -360,7 +633,15 @@ enum nl80211_attrs {
#define NL80211_ATTR_BSS_BASIC_RATES NL80211_ATTR_BSS_BASIC_RATES
#define NL80211_ATTR_WIPHY_TXQ_PARAMS NL80211_ATTR_WIPHY_TXQ_PARAMS
#define NL80211_ATTR_WIPHY_FREQ NL80211_ATTR_WIPHY_FREQ
-#define NL80211_ATTR_WIPHY_SEC_CHAN_OFFSET NL80211_ATTR_WIPHY_SEC_CHAN_OFFSET
+#define NL80211_ATTR_WIPHY_CHANNEL_TYPE NL80211_ATTR_WIPHY_CHANNEL_TYPE
+#define NL80211_ATTR_MGMT_SUBTYPE NL80211_ATTR_MGMT_SUBTYPE
+#define NL80211_ATTR_IE NL80211_ATTR_IE
+#define NL80211_ATTR_REG_INITIATOR NL80211_ATTR_REG_INITIATOR
+#define NL80211_ATTR_REG_TYPE NL80211_ATTR_REG_TYPE
+#define NL80211_ATTR_FRAME NL80211_ATTR_FRAME
+#define NL80211_ATTR_SSID NL80211_ATTR_SSID
+#define NL80211_ATTR_AUTH_TYPE NL80211_ATTR_AUTH_TYPE
+#define NL80211_ATTR_REASON_CODE NL80211_ATTR_REASON_CODE
#define NL80211_MAX_SUPP_RATES 32
#define NL80211_MAX_SUPP_REG_RULES 32
@@ -412,12 +693,14 @@ enum nl80211_iftype {
* @NL80211_STA_FLAG_SHORT_PREAMBLE: station is capable of receiving frames
* with short barker preamble
* @NL80211_STA_FLAG_WME: station is WME/QoS capable
+ * @NL80211_STA_FLAG_MFP: station uses management frame protection
*/
enum nl80211_sta_flags {
__NL80211_STA_FLAG_INVALID,
NL80211_STA_FLAG_AUTHORIZED,
NL80211_STA_FLAG_SHORT_PREAMBLE,
NL80211_STA_FLAG_WME,
+ NL80211_STA_FLAG_MFP,
/* keep last */
__NL80211_STA_FLAG_AFTER_LAST,
@@ -425,6 +708,18 @@ enum nl80211_sta_flags {
};
/**
+ * struct nl80211_sta_flag_update - station flags mask/set
+ * @mask: mask of station flags to set
+ * @set: which values to set them to
+ *
+ * Both mask and set contain bits as per &enum nl80211_sta_flags.
+ */
+struct nl80211_sta_flag_update {
+ __u32 mask;
+ __u32 set;
+} __attribute__((packed));
+
+/**
* enum nl80211_rate_info - bitrate information
*
* These attribute types are used with %NL80211_STA_INFO_TXRATE
@@ -465,6 +760,9 @@ enum nl80211_rate_info {
* @NL80211_STA_INFO_SIGNAL: signal strength of last received PPDU (u8, dBm)
* @NL80211_STA_INFO_TX_BITRATE: current unicast tx rate, nested attribute
* containing info as possible, see &enum nl80211_sta_info_txrate.
+ * @NL80211_STA_INFO_RX_PACKETS: total received packet (u32, from this station)
+ * @NL80211_STA_INFO_TX_PACKETS: total transmitted packets (u32, to this
+ * station)
*/
enum nl80211_sta_info {
__NL80211_STA_INFO_INVALID,
@@ -476,6 +774,8 @@ enum nl80211_sta_info {
NL80211_STA_INFO_PLINK_STATE,
NL80211_STA_INFO_SIGNAL,
NL80211_STA_INFO_TX_BITRATE,
+ NL80211_STA_INFO_RX_PACKETS,
+ NL80211_STA_INFO_TX_PACKETS,
/* keep last */
__NL80211_STA_INFO_AFTER_LAST,
@@ -607,6 +907,48 @@ enum nl80211_bitrate_attr {
};
/**
+ * enum nl80211_initiator - Indicates the initiator of a reg domain request
+ * @NL80211_REGDOM_SET_BY_CORE: Core queried CRDA for a dynamic world
+ * regulatory domain.
+ * @NL80211_REGDOM_SET_BY_USER: User asked the wireless core to set the
+ * regulatory domain.
+ * @NL80211_REGDOM_SET_BY_DRIVER: a wireless drivers has hinted to the
+ * wireless core it thinks its knows the regulatory domain we should be in.
+ * @NL80211_REGDOM_SET_BY_COUNTRY_IE: the wireless core has received an
+ * 802.11 country information element with regulatory information it
+ * thinks we should consider.
+ */
+enum nl80211_reg_initiator {
+ NL80211_REGDOM_SET_BY_CORE,
+ NL80211_REGDOM_SET_BY_USER,
+ NL80211_REGDOM_SET_BY_DRIVER,
+ NL80211_REGDOM_SET_BY_COUNTRY_IE,
+};
+
+/**
+ * enum nl80211_reg_type - specifies the type of regulatory domain
+ * @NL80211_REGDOM_TYPE_COUNTRY: the regulatory domain set is one that pertains
+ * to a specific country. When this is set you can count on the
+ * ISO / IEC 3166 alpha2 country code being valid.
+ * @NL80211_REGDOM_TYPE_WORLD: the regulatory set domain is the world regulatory
+ * domain.
+ * @NL80211_REGDOM_TYPE_CUSTOM_WORLD: the regulatory domain set is a custom
+ * driver specific world regulatory domain. These do not apply system-wide
+ * and are only applicable to the individual devices which have requested
+ * them to be applied.
+ * @NL80211_REGDOM_TYPE_INTERSECTION: the regulatory domain set is the product
+ * of an intersection between two regulatory domains -- the previously
+ * set regulatory domain on the system and the last accepted regulatory
+ * domain request to be processed.
+ */
+enum nl80211_reg_type {
+ NL80211_REGDOM_TYPE_COUNTRY,
+ NL80211_REGDOM_TYPE_WORLD,
+ NL80211_REGDOM_TYPE_CUSTOM_WORLD,
+ NL80211_REGDOM_TYPE_INTERSECTION,
+};
+
+/**
* enum nl80211_reg_rule_attr - regulatory rule attributes
* @NL80211_ATTR_REG_RULE_FLAGS: a set of flags which specify additional
* considerations for a given frequency range. These are the
@@ -811,4 +1153,75 @@ enum nl80211_channel_type {
NL80211_CHAN_HT40MINUS,
NL80211_CHAN_HT40PLUS
};
+
+/**
+ * enum nl80211_bss - netlink attributes for a BSS
+ *
+ * @__NL80211_BSS_INVALID: invalid
+ * @NL80211_BSS_FREQUENCY: frequency in MHz (u32)
+ * @NL80211_BSS_TSF: TSF of the received probe response/beacon (u64)
+ * @NL80211_BSS_BEACON_INTERVAL: beacon interval of the (I)BSS (u16)
+ * @NL80211_BSS_CAPABILITY: capability field (CPU order, u16)
+ * @NL80211_BSS_INFORMATION_ELEMENTS: binary attribute containing the
+ * raw information elements from the probe response/beacon (bin)
+ * @NL80211_BSS_SIGNAL_MBM: signal strength of probe response/beacon
+ * in mBm (100 * dBm) (s32)
+ * @NL80211_BSS_SIGNAL_UNSPEC: signal strength of the probe response/beacon
+ * in unspecified units, scaled to 0..100 (u8)
+ * @__NL80211_BSS_AFTER_LAST: internal
+ * @NL80211_BSS_MAX: highest BSS attribute
+ */
+enum nl80211_bss {
+ __NL80211_BSS_INVALID,
+ NL80211_BSS_BSSID,
+ NL80211_BSS_FREQUENCY,
+ NL80211_BSS_TSF,
+ NL80211_BSS_BEACON_INTERVAL,
+ NL80211_BSS_CAPABILITY,
+ NL80211_BSS_INFORMATION_ELEMENTS,
+ NL80211_BSS_SIGNAL_MBM,
+ NL80211_BSS_SIGNAL_UNSPEC,
+
+ /* keep last */
+ __NL80211_BSS_AFTER_LAST,
+ NL80211_BSS_MAX = __NL80211_BSS_AFTER_LAST - 1
+};
+
+/**
+ * enum nl80211_auth_type - AuthenticationType
+ *
+ * @NL80211_AUTHTYPE_OPEN_SYSTEM: Open System authentication
+ * @NL80211_AUTHTYPE_SHARED_KEY: Shared Key authentication (WEP only)
+ * @NL80211_AUTHTYPE_FT: Fast BSS Transition (IEEE 802.11r)
+ * @NL80211_AUTHTYPE_NETWORK_EAP: Network EAP (some Cisco APs and mainly LEAP)
+ */
+enum nl80211_auth_type {
+ NL80211_AUTHTYPE_OPEN_SYSTEM,
+ NL80211_AUTHTYPE_SHARED_KEY,
+ NL80211_AUTHTYPE_FT,
+ NL80211_AUTHTYPE_NETWORK_EAP,
+};
+
+/**
+ * enum nl80211_key_type - Key Type
+ * @NL80211_KEYTYPE_GROUP: Group (broadcast/multicast) key
+ * @NL80211_KEYTYPE_PAIRWISE: Pairwise (unicast/individual) key
+ * @NL80211_KEYTYPE_PEERKEY: PeerKey (DLS)
+ */
+enum nl80211_key_type {
+ NL80211_KEYTYPE_GROUP,
+ NL80211_KEYTYPE_PAIRWISE,
+ NL80211_KEYTYPE_PEERKEY,
+};
+
+/**
+ * enum nl80211_mfp - Management frame protection state
+ * @NL80211_MFP_NO: Management frame protection not used
+ * @NL80211_MFP_REQUIRED: Management frame protection required
+ */
+enum nl80211_mfp {
+ NL80211_MFP_NO,
+ NL80211_MFP_REQUIRED,
+};
+
#endif /* __LINUX_NL80211_H */
diff --git a/include/linux/nl802154.h b/include/linux/nl802154.h
new file mode 100644
index 00000000000..2cda00ccfcc
--- /dev/null
+++ b/include/linux/nl802154.h
@@ -0,0 +1,119 @@
+/*
+ * nl802154.h
+ *
+ * Copyright (C) 2007, 2008, 2009 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef NL802154_H
+#define NL802154_H
+
+#define IEEE802154_NL_NAME "802.15.4 MAC"
+#define IEEE802154_MCAST_COORD_NAME "coordinator"
+#define IEEE802154_MCAST_BEACON_NAME "beacon"
+
+enum {
+ __IEEE802154_ATTR_INVALID,
+
+ IEEE802154_ATTR_DEV_NAME,
+ IEEE802154_ATTR_DEV_INDEX,
+
+ IEEE802154_ATTR_STATUS,
+
+ IEEE802154_ATTR_SHORT_ADDR,
+ IEEE802154_ATTR_HW_ADDR,
+ IEEE802154_ATTR_PAN_ID,
+
+ IEEE802154_ATTR_CHANNEL,
+
+ IEEE802154_ATTR_COORD_SHORT_ADDR,
+ IEEE802154_ATTR_COORD_HW_ADDR,
+ IEEE802154_ATTR_COORD_PAN_ID,
+
+ IEEE802154_ATTR_SRC_SHORT_ADDR,
+ IEEE802154_ATTR_SRC_HW_ADDR,
+ IEEE802154_ATTR_SRC_PAN_ID,
+
+ IEEE802154_ATTR_DEST_SHORT_ADDR,
+ IEEE802154_ATTR_DEST_HW_ADDR,
+ IEEE802154_ATTR_DEST_PAN_ID,
+
+ IEEE802154_ATTR_CAPABILITY,
+ IEEE802154_ATTR_REASON,
+ IEEE802154_ATTR_SCAN_TYPE,
+ IEEE802154_ATTR_CHANNELS,
+ IEEE802154_ATTR_DURATION,
+ IEEE802154_ATTR_ED_LIST,
+ IEEE802154_ATTR_BCN_ORD,
+ IEEE802154_ATTR_SF_ORD,
+ IEEE802154_ATTR_PAN_COORD,
+ IEEE802154_ATTR_BAT_EXT,
+ IEEE802154_ATTR_COORD_REALIGN,
+ IEEE802154_ATTR_SEC,
+
+ __IEEE802154_ATTR_MAX,
+};
+
+#define IEEE802154_ATTR_MAX (__IEEE802154_ATTR_MAX - 1)
+
+extern struct nla_policy ieee802154_policy[];
+
+/* commands */
+/* REQ should be responded with CONF
+ * and INDIC with RESP
+ */
+enum {
+ __IEEE802154_COMMAND_INVALID,
+
+ IEEE802154_ASSOCIATE_REQ,
+ IEEE802154_ASSOCIATE_CONF,
+ IEEE802154_DISASSOCIATE_REQ,
+ IEEE802154_DISASSOCIATE_CONF,
+ IEEE802154_GET_REQ,
+ IEEE802154_GET_CONF,
+ IEEE802154_RESET_REQ,
+ IEEE802154_RESET_CONF,
+ IEEE802154_SCAN_REQ,
+ IEEE802154_SCAN_CONF,
+ IEEE802154_SET_REQ,
+ IEEE802154_SET_CONF,
+ IEEE802154_START_REQ,
+ IEEE802154_START_CONF,
+ IEEE802154_SYNC_REQ,
+ IEEE802154_POLL_REQ,
+ IEEE802154_POLL_CONF,
+
+ IEEE802154_ASSOCIATE_INDIC,
+ IEEE802154_ASSOCIATE_RESP,
+ IEEE802154_DISASSOCIATE_INDIC,
+ IEEE802154_BEACON_NOTIFY_INDIC,
+ IEEE802154_ORPHAN_INDIC,
+ IEEE802154_ORPHAN_RESP,
+ IEEE802154_COMM_STATUS_INDIC,
+ IEEE802154_SYNC_LOSS_INDIC,
+
+ IEEE802154_GTS_REQ, /* Not supported yet */
+ IEEE802154_GTS_INDIC, /* Not supported yet */
+ IEEE802154_GTS_CONF, /* Not supported yet */
+ IEEE802154_RX_ENABLE_REQ, /* Not supported yet */
+ IEEE802154_RX_ENABLE_CONF, /* Not supported yet */
+
+ __IEEE802154_CMD_MAX,
+};
+
+#define IEEE802154_CMD_MAX (__IEEE802154_CMD_MAX - 1)
+
+#endif
diff --git a/include/linux/nls.h b/include/linux/nls.h
index 6a882208301..d47beef08df 100644
--- a/include/linux/nls.h
+++ b/include/linux/nls.h
@@ -3,8 +3,23 @@
#include <linux/init.h>
-/* unicode character */
-typedef __u16 wchar_t;
+/* Unicode has changed over the years. Unicode code points no longer
+ * fit into 16 bits; as of Unicode 5 valid code points range from 0
+ * to 0x10ffff (17 planes, where each plane holds 65536 code points).
+ *
+ * The original decision to represent Unicode characters as 16-bit
+ * wchar_t values is now outdated. But plane 0 still includes the
+ * most commonly used characters, so we will retain it. The newer
+ * 32-bit unicode_t type can be used when it is necessary to
+ * represent the full Unicode character set.
+ */
+
+/* Plane-0 Unicode character */
+typedef u16 wchar_t;
+#define MAX_WCHAR_T 0xffff
+
+/* Arbitrary Unicode character */
+typedef u32 unicode_t;
struct nls_table {
const char *charset;
@@ -21,6 +36,13 @@ struct nls_table {
/* this value hold the maximum octet of charset */
#define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
+/* Byte order for UTF-16 strings */
+enum utf16_endian {
+ UTF16_HOST_ENDIAN,
+ UTF16_LITTLE_ENDIAN,
+ UTF16_BIG_ENDIAN
+};
+
/* nls.c */
extern int register_nls(struct nls_table *);
extern int unregister_nls(struct nls_table *);
@@ -28,10 +50,11 @@ extern struct nls_table *load_nls(char *);
extern void unload_nls(struct nls_table *);
extern struct nls_table *load_nls_default(void);
-extern int utf8_mbtowc(wchar_t *, const __u8 *, int);
-extern int utf8_mbstowcs(wchar_t *, const __u8 *, int);
-extern int utf8_wctomb(__u8 *, wchar_t, int);
-extern int utf8_wcstombs(__u8 *, const wchar_t *, int);
+extern int utf8_to_utf32(const u8 *s, int len, unicode_t *pu);
+extern int utf32_to_utf8(unicode_t u, u8 *s, int maxlen);
+extern int utf8s_to_utf16s(const u8 *s, int len, wchar_t *pwcs);
+extern int utf16s_to_utf8s(const wchar_t *pwcs, int len,
+ enum utf16_endian endian, u8 *s, int maxlen);
static inline unsigned char nls_tolower(struct nls_table *t, unsigned char c)
{
@@ -58,6 +81,25 @@ static inline int nls_strnicmp(struct nls_table *t, const unsigned char *s1,
return 0;
}
+/*
+ * nls_nullsize - return length of null character for codepage
+ * @codepage - codepage for which to return length of NULL terminator
+ *
+ * Since we can't guarantee that the null terminator will be a particular
+ * length, we have to check against the codepage. If there's a problem
+ * determining it, assume a single-byte NULL terminator.
+ */
+static inline int
+nls_nullsize(const struct nls_table *codepage)
+{
+ int charlen;
+ char tmp[NLS_MAX_CHARSET_SIZE];
+
+ charlen = codepage->uni2char(0, tmp, NLS_MAX_CHARSET_SIZE);
+
+ return charlen > 0 ? charlen : 1;
+}
+
#define MODULE_ALIAS_NLS(name) MODULE_ALIAS("nls_" __stringify(name))
#endif /* _LINUX_NLS_H */
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index 848025cd708..829b94b156f 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -408,6 +408,19 @@ static inline int num_node_state(enum node_states state)
#define next_online_node(nid) next_node((nid), node_states[N_ONLINE])
extern int nr_node_ids;
+extern int nr_online_nodes;
+
+static inline void node_set_online(int nid)
+{
+ node_set_state(nid, N_ONLINE);
+ nr_online_nodes = num_node_state(N_ONLINE);
+}
+
+static inline void node_set_offline(int nid)
+{
+ node_clear_state(nid, N_ONLINE);
+ nr_online_nodes = num_node_state(N_ONLINE);
+}
#else
static inline int node_state(int node, enum node_states state)
@@ -434,7 +447,10 @@ static inline int num_node_state(enum node_states state)
#define first_online_node 0
#define next_online_node(nid) (MAX_NUMNODES)
#define nr_node_ids 1
+#define nr_online_nodes 1
+#define node_set_online(node) node_set_state((node), N_ONLINE)
+#define node_set_offline(node) node_clear_state((node), N_ONLINE)
#endif
#define node_online_map node_states[N_ONLINE]
@@ -454,9 +470,6 @@ static inline int num_node_state(enum node_states state)
#define node_online(node) node_state((node), N_ONLINE)
#define node_possible(node) node_state((node), N_POSSIBLE)
-#define node_set_online(node) node_set_state((node), N_ONLINE)
-#define node_set_offline(node) node_clear_state((node), N_ONLINE)
-
#define for_each_node(node) for_each_node_state(node, N_POSSIBLE)
#define for_each_online_node(node) for_each_node_state(node, N_ONLINE)
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index b86fa2ffca0..81bc252dc8a 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -198,6 +198,7 @@ static inline int notifier_to_errno(int ret)
#define NETDEV_CHANGENAME 0x000A
#define NETDEV_FEAT_CHANGE 0x000B
#define NETDEV_BONDING_FAILOVER 0x000C
+#define NETDEV_PRE_UP 0x000D
#define SYS_DOWN 0x0001 /* Notify of system down */
#define SYS_RESTART SYS_DOWN
diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h
index afad7dec1b3..7b370c7cfef 100644
--- a/include/linux/nsproxy.h
+++ b/include/linux/nsproxy.h
@@ -8,6 +8,7 @@ struct mnt_namespace;
struct uts_namespace;
struct ipc_namespace;
struct pid_namespace;
+struct fs_struct;
/*
* A structure to contain pointers to all per-process
diff --git a/include/linux/nubus.h b/include/linux/nubus.h
index 7382af37473..e137b3c486a 100644
--- a/include/linux/nubus.h
+++ b/include/linux/nubus.h
@@ -237,6 +237,7 @@ struct nubus_dirent
int mask;
};
+#ifdef __KERNEL__
struct nubus_board {
struct nubus_board* next;
struct nubus_dev* first_dev;
@@ -351,6 +352,7 @@ void nubus_get_rsrc_mem(void* dest,
void nubus_get_rsrc_str(void* dest,
const struct nubus_dirent *dirent,
int maxlen);
+#endif /* __KERNEL__ */
/* We'd like to get rid of this eventually. Only daynaport.c uses it now. */
static inline void *nubus_slot_addr(int slot)
diff --git a/include/linux/of.h b/include/linux/of.h
index 6a7efa242f5..7be2d1043c1 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -77,6 +77,9 @@ extern int of_n_size_cells(struct device_node *np);
extern const struct of_device_id *of_match_node(
const struct of_device_id *matches, const struct device_node *node);
extern int of_modalias_node(struct device_node *node, char *modalias, int len);
+extern struct device_node *of_parse_phandle(struct device_node *np,
+ const char *phandle_name,
+ int index);
extern int of_parse_phandles_with_args(struct device_node *np,
const char *list_name, const char *cells_name, int index,
struct device_node **out_node, const void **out_args);
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
new file mode 100644
index 00000000000..c9663c69030
--- /dev/null
+++ b/include/linux/of_mdio.h
@@ -0,0 +1,22 @@
+/*
+ * OF helpers for the MDIO (Ethernet PHY) API
+ *
+ * Copyright (c) 2009 Secret Lab Technologies, Ltd.
+ *
+ * This file is released under the GPLv2
+ */
+
+#ifndef __LINUX_OF_MDIO_H
+#define __LINUX_OF_MDIO_H
+
+#include <linux/phy.h>
+#include <linux/of.h>
+
+extern int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np);
+extern struct phy_device *of_phy_find_device(struct device_node *phy_np);
+extern struct phy_device *of_phy_connect(struct net_device *dev,
+ struct device_node *phy_np,
+ void (*hndlr)(struct net_device *),
+ u32 flags, phy_interface_t iface);
+
+#endif /* __LINUX_OF_MDIO_H */
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index 3d327b67d7e..90840665133 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -51,6 +51,16 @@ extern int of_register_driver(struct of_platform_driver *drv,
struct bus_type *bus);
extern void of_unregister_driver(struct of_platform_driver *drv);
+/* Platform drivers register/unregister */
+static inline int of_register_platform_driver(struct of_platform_driver *drv)
+{
+ return of_register_driver(drv, &of_platform_bus_type);
+}
+static inline void of_unregister_platform_driver(struct of_platform_driver *drv)
+{
+ of_unregister_driver(drv);
+}
+
#include <asm/of_platform.h>
extern struct of_device *of_find_device_by_node(struct device_node *np);
diff --git a/include/linux/page-debug-flags.h b/include/linux/page-debug-flags.h
new file mode 100644
index 00000000000..b0638fd91e9
--- /dev/null
+++ b/include/linux/page-debug-flags.h
@@ -0,0 +1,30 @@
+#ifndef LINUX_PAGE_DEBUG_FLAGS_H
+#define LINUX_PAGE_DEBUG_FLAGS_H
+
+/*
+ * page->debug_flags bits:
+ *
+ * PAGE_DEBUG_FLAG_POISON is set for poisoned pages. This is used to
+ * implement generic debug pagealloc feature. The pages are filled with
+ * poison patterns and set this flag after free_pages(). The poisoned
+ * pages are verified whether the patterns are not corrupted and clear
+ * the flag before alloc_pages().
+ */
+
+enum page_debug_flags {
+ PAGE_DEBUG_FLAG_POISON, /* Page is poisoned */
+};
+
+/*
+ * Ensure that CONFIG_WANT_PAGE_DEBUG_FLAGS reliably
+ * gets turned off when no debug features are enabling it!
+ */
+
+#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
+#if !defined(CONFIG_PAGE_POISONING) \
+/* && !defined(CONFIG_PAGE_DEBUG_SOMETHING_ELSE) && ... */
+#error WANT_PAGE_DEBUG_FLAGS is turned on with no debug features!
+#endif
+#endif /* CONFIG_WANT_PAGE_DEBUG_FLAGS */
+
+#endif /* LINUX_PAGE_DEBUG_FLAGS_H */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 219a523ecdb..e2e5ce54359 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -82,6 +82,7 @@ enum pageflags {
PG_arch_1,
PG_reserved,
PG_private, /* If pagecache, has fs-private data */
+ PG_private_2, /* If pagecache, has fs aux data */
PG_writeback, /* Page is under writeback */
#ifdef CONFIG_PAGEFLAGS_EXTENDED
PG_head, /* A head page */
@@ -94,8 +95,8 @@ enum pageflags {
PG_reclaim, /* To be reclaimed asap */
PG_buddy, /* Page is free, on buddy lists */
PG_swapbacked, /* Page is backed by RAM/swap */
-#ifdef CONFIG_UNEVICTABLE_LRU
PG_unevictable, /* Page is "unevictable" */
+#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
PG_mlocked, /* Page is vma mlocked */
#endif
#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
@@ -106,12 +107,17 @@ enum pageflags {
/* Filesystems */
PG_checked = PG_owner_priv_1,
+ /* Two page bits are conscripted by FS-Cache to maintain local caching
+ * state. These bits are set on pages belonging to the netfs's inodes
+ * when those inodes are being locally cached.
+ */
+ PG_fscache = PG_private_2, /* page backed by cache */
+
/* XEN */
PG_pinned = PG_owner_priv_1,
PG_savepinned = PG_dirty,
/* SLOB */
- PG_slob_page = PG_active,
PG_slob_free = PG_private,
/* SLUB */
@@ -180,7 +186,7 @@ static inline int TestClearPage##uname(struct page *page) { return 0; }
struct page; /* forward declaration */
-TESTPAGEFLAG(Locked, locked)
+TESTPAGEFLAG(Locked, locked) TESTSETFLAG(Locked, locked)
PAGEFLAG(Error, error)
PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced)
PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty)
@@ -192,17 +198,24 @@ PAGEFLAG(Checked, checked) /* Used by some filesystems */
PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */
PAGEFLAG(SavePinned, savepinned); /* Xen */
PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
-PAGEFLAG(Private, private) __CLEARPAGEFLAG(Private, private)
- __SETPAGEFLAG(Private, private)
PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
-__PAGEFLAG(SlobPage, slob_page)
__PAGEFLAG(SlobFree, slob_free)
__PAGEFLAG(SlubFrozen, slub_frozen)
__PAGEFLAG(SlubDebug, slub_debug)
/*
+ * Private page markings that may be used by the filesystem that owns the page
+ * for its own purposes.
+ * - PG_private and PG_private_2 cause releasepage() and co to be invoked
+ */
+PAGEFLAG(Private, private) __SETPAGEFLAG(Private, private)
+ __CLEARPAGEFLAG(Private, private)
+PAGEFLAG(Private2, private_2) TESTSCFLAG(Private2, private_2)
+PAGEFLAG(OwnerPriv1, owner_priv_1) TESTCLEARFLAG(OwnerPriv1, owner_priv_1)
+
+/*
* Only test-and-set exist for PG_writeback. The unconditional operators are
* risky: they bypass page accounting.
*/
@@ -231,23 +244,17 @@ PAGEFLAG_FALSE(SwapCache)
SETPAGEFLAG_NOOP(SwapCache) CLEARPAGEFLAG_NOOP(SwapCache)
#endif
-#ifdef CONFIG_UNEVICTABLE_LRU
PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable)
TESTCLEARFLAG(Unevictable, unevictable)
+#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
#define MLOCK_PAGES 1
PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked)
TESTSCFLAG(Mlocked, mlocked)
-
#else
-
#define MLOCK_PAGES 0
PAGEFLAG_FALSE(Mlocked)
SETPAGEFLAG_NOOP(Mlocked) TESTCLEARFLAG_FALSE(Mlocked)
-
-PAGEFLAG_FALSE(Unevictable) TESTCLEARFLAG_FALSE(Unevictable)
- SETPAGEFLAG_NOOP(Unevictable) CLEARPAGEFLAG_NOOP(Unevictable)
- __CLEARPAGEFLAG_NOOP(Unevictable)
#endif
#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
@@ -365,11 +372,9 @@ static inline void __ClearPageTail(struct page *page)
#endif /* !PAGEFLAGS_EXTENDED */
-#ifdef CONFIG_UNEVICTABLE_LRU
-#define __PG_UNEVICTABLE (1 << PG_unevictable)
+#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
#define __PG_MLOCKED (1 << PG_mlocked)
#else
-#define __PG_UNEVICTABLE 0
#define __PG_MLOCKED 0
#endif
@@ -378,10 +383,11 @@ static inline void __ClearPageTail(struct page *page)
* these flags set. It they are, there is a problem.
*/
#define PAGE_FLAGS_CHECK_AT_FREE \
- (1 << PG_lru | 1 << PG_private | 1 << PG_locked | \
- 1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \
- 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \
- __PG_UNEVICTABLE | __PG_MLOCKED)
+ (1 << PG_lru | 1 << PG_locked | \
+ 1 << PG_private | 1 << PG_private_2 | \
+ 1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \
+ 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \
+ 1 << PG_unevictable | __PG_MLOCKED)
/*
* Flags checked when a page is prepped for return by the page allocator.
@@ -391,4 +397,16 @@ static inline void __ClearPageTail(struct page *page)
#define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1)
#endif /* !__GENERATING_BOUNDS_H */
+
+/**
+ * page_has_private - Determine if page has private stuff
+ * @page: The page to be checked
+ *
+ * Determine if a page has private stuff, indicating that release routines
+ * should be invoked upon it.
+ */
+#define page_has_private(page) \
+ ((page)->flags & ((1 << PG_private) | \
+ (1 << PG_private_2)))
+
#endif /* PAGE_FLAGS_H */
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
index 602cc1fdee9..13f126c89ae 100644
--- a/include/linux/page_cgroup.h
+++ b/include/linux/page_cgroup.h
@@ -18,7 +18,19 @@ struct page_cgroup {
};
void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat);
-void __init page_cgroup_init(void);
+
+#ifdef CONFIG_SPARSEMEM
+static inline void __init page_cgroup_init_flatmem(void)
+{
+}
+extern void __init page_cgroup_init(void);
+#else
+void __init page_cgroup_init_flatmem(void);
+static inline void __init page_cgroup_init(void)
+{
+}
+#endif
+
struct page_cgroup *lookup_page_cgroup(struct page *page);
enum {
@@ -87,28 +99,31 @@ static inline void page_cgroup_init(void)
{
}
+static inline void __init page_cgroup_init_flatmem(void)
+{
+}
+
#endif
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
#include <linux/swap.h>
-extern struct mem_cgroup *
-swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem);
-extern struct mem_cgroup *lookup_swap_cgroup(swp_entry_t ent);
+extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id);
+extern unsigned short lookup_swap_cgroup(swp_entry_t ent);
extern int swap_cgroup_swapon(int type, unsigned long max_pages);
extern void swap_cgroup_swapoff(int type);
#else
#include <linux/swap.h>
static inline
-struct mem_cgroup *swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem)
+unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
{
- return NULL;
+ return 0;
}
static inline
-struct mem_cgroup *lookup_swap_cgroup(swp_entry_t ent)
+unsigned short lookup_swap_cgroup(swp_entry_t ent)
{
- return NULL;
+ return 0;
}
static inline int
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 01ca0856caf..aec3252afcf 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -18,9 +18,12 @@
* Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
* allocation mode flags.
*/
-#define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */
-#define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */
-#define AS_MM_ALL_LOCKS (__GFP_BITS_SHIFT + 2) /* under mm_take_all_locks() */
+enum mapping_flags {
+ AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */
+ AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
+ AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
+ AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
+};
static inline void mapping_set_error(struct address_space *mapping, int error)
{
@@ -32,9 +35,6 @@ static inline void mapping_set_error(struct address_space *mapping, int error)
}
}
-#ifdef CONFIG_UNEVICTABLE_LRU
-#define AS_UNEVICTABLE (__GFP_BITS_SHIFT + 2) /* e.g., ramdisk, SHM_LOCK */
-
static inline void mapping_set_unevictable(struct address_space *mapping)
{
set_bit(AS_UNEVICTABLE, &mapping->flags);
@@ -51,14 +51,6 @@ static inline int mapping_unevictable(struct address_space *mapping)
return test_bit(AS_UNEVICTABLE, &mapping->flags);
return !!mapping;
}
-#else
-static inline void mapping_set_unevictable(struct address_space *mapping) { }
-static inline void mapping_clear_unevictable(struct address_space *mapping) { }
-static inline int mapping_unevictable(struct address_space *mapping)
-{
- return 0;
-}
-#endif
static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
{
@@ -380,6 +372,11 @@ static inline void wait_on_page_writeback(struct page *page)
extern void end_page_writeback(struct page *page);
/*
+ * Add an arbitrary waiter to a page's wait queue
+ */
+extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
+
+/*
* Fault a userspace page into pagetables. Return non-zero on a fault.
*
* This assumes that two userspace pages are always sufficient. That's
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index 7b2886fa7fd..bab82f4c571 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -24,7 +24,6 @@ void __pagevec_release(struct pagevec *pvec);
void __pagevec_free(struct pagevec *pvec);
void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru);
void pagevec_strip(struct pagevec *pvec);
-void pagevec_swap_free(struct pagevec *pvec);
unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
pgoff_t start, unsigned nr_pages);
unsigned pagevec_lookup_tag(struct pagevec *pvec,
diff --git a/include/linux/parport.h b/include/linux/parport.h
index e1f83c5065c..38a423ed3c0 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -324,6 +324,10 @@ struct parport {
int spintime;
atomic_t ref_count;
+ unsigned long devflags;
+#define PARPORT_DEVPROC_REGISTERED 0
+ struct pardevice *proc_device; /* Currently register proc device */
+
struct list_head full_list;
struct parport *slaves[3];
};
diff --git a/include/linux/parport_pc.h b/include/linux/parport_pc.h
index ea8c6d84996..cc1767f5cca 100644
--- a/include/linux/parport_pc.h
+++ b/include/linux/parport_pc.h
@@ -228,10 +228,11 @@ extern void parport_pc_release_resources(struct parport *p);
extern int parport_pc_claim_resources(struct parport *p);
/* PCMCIA code will want to get us to look at a port. Provide a mechanism. */
-extern struct parport *parport_pc_probe_port (unsigned long base,
- unsigned long base_hi,
- int irq, int dma,
- struct device *dev);
-extern void parport_pc_unregister_port (struct parport *p);
+extern struct parport *parport_pc_probe_port(unsigned long base,
+ unsigned long base_hi,
+ int irq, int dma,
+ struct device *dev,
+ int irqflags);
+extern void parport_pc_unregister_port(struct parport *p);
#endif
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 042c166f65d..93a7c08f869 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -10,72 +10,25 @@
#include <linux/acpi.h>
-#define OSC_QUERY_TYPE 0
-#define OSC_SUPPORT_TYPE 1
-#define OSC_CONTROL_TYPE 2
-#define OSC_SUPPORT_MASKS 0x1f
-
-/*
- * _OSC DW0 Definition
- */
-#define OSC_QUERY_ENABLE 1
-#define OSC_REQUEST_ERROR 2
-#define OSC_INVALID_UUID_ERROR 4
-#define OSC_INVALID_REVISION_ERROR 8
-#define OSC_CAPABILITIES_MASK_ERROR 16
-
-/*
- * _OSC DW1 Definition (OS Support Fields)
- */
-#define OSC_EXT_PCI_CONFIG_SUPPORT 1
-#define OSC_ACTIVE_STATE_PWR_SUPPORT 2
-#define OSC_CLOCK_PWR_CAPABILITY_SUPPORT 4
-#define OSC_PCI_SEGMENT_GROUPS_SUPPORT 8
-#define OSC_MSI_SUPPORT 16
-
-/*
- * _OSC DW1 Definition (OS Control Fields)
- */
-#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 1
-#define OSC_SHPC_NATIVE_HP_CONTROL 2
-#define OSC_PCI_EXPRESS_PME_CONTROL 4
-#define OSC_PCI_EXPRESS_AER_CONTROL 8
-#define OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL 16
-
-#define OSC_CONTROL_MASKS (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | \
- OSC_SHPC_NATIVE_HP_CONTROL | \
- OSC_PCI_EXPRESS_PME_CONTROL | \
- OSC_PCI_EXPRESS_AER_CONTROL | \
- OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL)
-
#ifdef CONFIG_ACPI
-extern acpi_status pci_osc_control_set(acpi_handle handle, u32 flags);
-int pci_acpi_osc_support(acpi_handle handle, u32 flags);
static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev)
{
- /* Find root host bridge */
- while (pdev->bus->self)
- pdev = pdev->bus->self;
-
- return acpi_get_pci_rootbridge_handle(pci_domain_nr(pdev->bus),
- pdev->bus->number);
+ struct pci_bus *pbus = pdev->bus;
+ /* Find a PCI root bus */
+ while (!pci_is_root_bus(pbus))
+ pbus = pbus->parent;
+ return acpi_get_pci_rootbridge_handle(pci_domain_nr(pbus),
+ pbus->number);
}
static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus)
{
- int seg = pci_domain_nr(pbus), busnr = pbus->number;
- struct pci_dev *bridge = pbus->self;
- if (bridge)
- return DEVICE_ACPI_HANDLE(&(bridge->dev));
- return acpi_get_pci_rootbridge_handle(seg, busnr);
+ if (!pci_is_root_bus(pbus))
+ return DEVICE_ACPI_HANDLE(&(pbus->self->dev));
+ return acpi_get_pci_rootbridge_handle(pci_domain_nr(pbus),
+ pbus->number);
}
#else
-#if !defined(AE_ERROR)
-typedef u32 acpi_status;
-#define AE_ERROR (acpi_status) (0x0001)
-#endif
-static inline acpi_status pci_osc_control_set(acpi_handle handle, u32 flags)
-{return AE_ERROR;}
static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev)
{ return NULL; }
#endif
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 7bd624bfdcf..1365c745bdb 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -52,6 +52,7 @@
#include <asm/atomic.h>
#include <linux/device.h>
#include <linux/io.h>
+#include <linux/irqreturn.h>
/* Include the ID list */
#include <linux/pci_ids.h>
@@ -93,6 +94,12 @@ enum {
/* #6: expansion ROM resource */
PCI_ROM_RESOURCE,
+ /* device specific resources */
+#ifdef CONFIG_PCI_IOV
+ PCI_IOV_RESOURCES,
+ PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1,
+#endif
+
/* resources assigned to buses behind the bridge */
#define PCI_BRIDGE_RESOURCE_NUM 4
@@ -117,6 +124,14 @@ typedef int __bitwise pci_power_t;
#define PCI_UNKNOWN ((pci_power_t __force) 5)
#define PCI_POWER_ERROR ((pci_power_t __force) -1)
+/* Remember to update this when the list above changes! */
+extern const char *pci_power_names[];
+
+static inline const char *pci_power_name(pci_power_t state)
+{
+ return pci_power_names[1 + (int) state];
+}
+
#define PCI_PM_D2_DELAY 200
#define PCI_PM_D3_WAIT 10
#define PCI_PM_BUS_WAIT 50
@@ -180,6 +195,7 @@ struct pci_cap_saved_state {
struct pcie_link_state;
struct pci_vpd;
+struct pci_sriov;
/*
* The pci_dev structure is used to describe PCI devices.
@@ -257,6 +273,8 @@ struct pci_dev {
unsigned int is_managed:1;
unsigned int is_pcie:1;
unsigned int state_saved:1;
+ unsigned int is_physfn:1;
+ unsigned int is_virtfn:1;
pci_dev_flags_t dev_flags;
atomic_t enable_cnt; /* pci_enable_device has been called */
@@ -270,6 +288,12 @@ struct pci_dev {
struct list_head msi_list;
#endif
struct pci_vpd *vpd;
+#ifdef CONFIG_PCI_IOV
+ union {
+ struct pci_sriov *sriov; /* SR-IOV capability related */
+ struct pci_dev *physfn; /* the PF this VF is associated with */
+ };
+#endif
};
extern struct pci_dev *alloc_pci_dev(void);
@@ -341,6 +365,15 @@ struct pci_bus {
#define pci_bus_b(n) list_entry(n, struct pci_bus, node)
#define to_pci_bus(n) container_of(n, struct pci_bus, dev)
+/*
+ * Returns true if the pci bus is root (behind host-pci bridge),
+ * false otherwise
+ */
+static inline bool pci_is_root_bus(struct pci_bus *pbus)
+{
+ return !(pbus->parent);
+}
+
#ifdef CONFIG_PCI_MSI
static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
{
@@ -528,7 +561,7 @@ void pcibios_update_irq(struct pci_dev *, int irq);
/* Generic PCI functions used internally */
extern struct pci_bus *pci_find_bus(int domain, int busnr);
-void pci_bus_add_devices(struct pci_bus *bus);
+void pci_bus_add_devices(const struct pci_bus *bus);
struct pci_bus *pci_scan_bus_parented(struct device *parent, int bus,
struct pci_ops *ops, void *sysdata);
static inline struct pci_bus * __devinit pci_scan_bus(int bus, struct pci_ops *ops,
@@ -574,8 +607,6 @@ extern void pci_sort_breadthfirst(void);
struct pci_dev __deprecated *pci_find_device(unsigned int vendor,
unsigned int device,
struct pci_dev *from);
-struct pci_dev __deprecated *pci_find_slot(unsigned int bus,
- unsigned int devfn);
#endif /* CONFIG_PCI_LEGACY */
enum pci_lost_interrupt_reason {
@@ -614,6 +645,7 @@ int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn,
int where, u16 val);
int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn,
int where, u32 val);
+struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops);
static inline int pci_read_config_byte(struct pci_dev *dev, int where, u8 *val)
{
@@ -649,6 +681,11 @@ int __must_check pci_reenable_device(struct pci_dev *);
int __must_check pcim_enable_device(struct pci_dev *pdev);
void pcim_pin_device(struct pci_dev *pdev);
+static inline int pci_is_enabled(struct pci_dev *pdev)
+{
+ return (atomic_read(&pdev->enable_cnt) > 0);
+}
+
static inline int pci_is_managed(struct pci_dev *pdev)
{
return pdev->is_managed;
@@ -673,8 +710,8 @@ int pcix_get_mmrbc(struct pci_dev *dev);
int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc);
int pcie_get_readrq(struct pci_dev *dev);
int pcie_set_readrq(struct pci_dev *dev, int rq);
+int __pci_reset_function(struct pci_dev *dev);
int pci_reset_function(struct pci_dev *dev);
-int pci_execute_reset_function(struct pci_dev *dev);
void pci_update_resource(struct pci_dev *dev, int resno);
int __must_check pci_assign_resource(struct pci_dev *dev, int i);
int pci_select_bars(struct pci_dev *dev, unsigned long flags);
@@ -689,11 +726,12 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size);
/* Power management related routines */
int pci_save_state(struct pci_dev *dev);
int pci_restore_state(struct pci_dev *dev);
+int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state);
int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
void pci_pme_active(struct pci_dev *dev, bool enable);
-int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable);
+int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable);
int pci_wake_from_d3(struct pci_dev *dev, bool enable);
pci_power_t pci_target_state(struct pci_dev *dev);
int pci_prepare_to_sleep(struct pci_dev *dev);
@@ -701,6 +739,9 @@ int pci_back_from_sleep(struct pci_dev *dev);
/* Functions for PCI Hotplug drivers to use */
int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
+#ifdef CONFIG_HOTPLUG
+unsigned int pci_rescan_bus(struct pci_bus *bus);
+#endif
/* Vital product data routines */
ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
@@ -708,7 +749,7 @@ ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void
int pci_vpd_truncate(struct pci_dev *dev, size_t size);
/* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
-void pci_bus_assign_resources(struct pci_bus *bus);
+void pci_bus_assign_resources(const struct pci_bus *bus);
void pci_bus_size_bridges(struct pci_bus *bus);
int pci_claim_resource(struct pci_dev *, int);
void pci_assign_unassigned_resources(void);
@@ -756,7 +797,7 @@ const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
int pass);
-void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *),
+void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
void *userdata);
int pci_cfg_space_size_ext(struct pci_dev *dev);
int pci_cfg_space_size(struct pci_dev *dev);
@@ -789,7 +830,7 @@ struct msix_entry {
#ifndef CONFIG_PCI_MSI
-static inline int pci_enable_msi(struct pci_dev *dev)
+static inline int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec)
{
return -1;
}
@@ -799,6 +840,10 @@ static inline void pci_msi_shutdown(struct pci_dev *dev)
static inline void pci_disable_msi(struct pci_dev *dev)
{ }
+static inline int pci_msix_table_size(struct pci_dev *dev)
+{
+ return 0;
+}
static inline int pci_enable_msix(struct pci_dev *dev,
struct msix_entry *entries, int nvec)
{
@@ -820,9 +865,10 @@ static inline int pci_msi_enabled(void)
return 0;
}
#else
-extern int pci_enable_msi(struct pci_dev *dev);
+extern int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec);
extern void pci_msi_shutdown(struct pci_dev *dev);
extern void pci_disable_msi(struct pci_dev *dev);
+extern int pci_msix_table_size(struct pci_dev *dev);
extern int pci_enable_msix(struct pci_dev *dev,
struct msix_entry *entries, int nvec);
extern void pci_msix_shutdown(struct pci_dev *dev);
@@ -841,6 +887,19 @@ static inline int pcie_aspm_enabled(void)
extern int pcie_aspm_enabled(void);
#endif
+#ifndef CONFIG_PCIE_ECRC
+static inline void pcie_set_ecrc_checking(struct pci_dev *dev)
+{
+ return;
+}
+static inline void pcie_ecrc_get_policy(char *str) {};
+#else
+extern void pcie_set_ecrc_checking(struct pci_dev *dev);
+extern void pcie_ecrc_get_policy(char *str);
+#endif
+
+#define pci_enable_msi(pdev) pci_enable_msi_block(pdev, 1)
+
#ifdef CONFIG_HT_IRQ
/* The functions a driver should call */
int ht_create_irq(struct pci_dev *dev, int idx);
@@ -895,12 +954,6 @@ static inline struct pci_dev *pci_find_device(unsigned int vendor,
return NULL;
}
-static inline struct pci_dev *pci_find_slot(unsigned int bus,
- unsigned int devfn)
-{
- return NULL;
-}
-
static inline struct pci_dev *pci_get_device(unsigned int vendor,
unsigned int device,
struct pci_dev *from)
@@ -1056,6 +1109,10 @@ static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
#include <asm/pci.h>
+#ifndef PCIBIOS_MAX_MEM_32
+#define PCIBIOS_MAX_MEM_32 (-1)
+#endif
+
/* these helpers provide future and backwards compatibility
* for accessing popular PCI BAR info */
#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start)
@@ -1194,5 +1251,28 @@ int pci_ext_cfg_avail(struct pci_dev *dev);
void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
+#ifdef CONFIG_PCI_IOV
+extern int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
+extern void pci_disable_sriov(struct pci_dev *dev);
+extern irqreturn_t pci_sriov_migration(struct pci_dev *dev);
+#else
+static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
+{
+ return -ENODEV;
+}
+static inline void pci_disable_sriov(struct pci_dev *dev)
+{
+}
+static inline irqreturn_t pci_sriov_migration(struct pci_dev *dev)
+{
+ return IRQ_NONE;
+}
+#endif
+
+#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
+extern void pci_hp_create_module_link(struct pci_slot *pci_slot);
+extern void pci_hp_remove_module_link(struct pci_slot *pci_slot);
+#endif
+
#endif /* __KERNEL__ */
#endif /* LINUX_PCI_H */
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index 20998746518..b3646cd7fd5 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -66,17 +66,10 @@ enum pcie_link_speed {
PCIE_LNK_SPEED_UNKNOWN = 0xFF,
};
-struct hotplug_slot;
-struct hotplug_slot_attribute {
- struct attribute attr;
- ssize_t (*show)(struct hotplug_slot *, char *);
- ssize_t (*store)(struct hotplug_slot *, const char *, size_t);
-};
-#define to_hotplug_attr(n) container_of(n, struct hotplug_slot_attribute, attr);
-
/**
* struct hotplug_slot_ops -the callbacks that the hotplug pci core can use
* @owner: The module owner of this structure
+ * @mod_name: The module name (KBUILD_MODNAME) of this structure
* @enable_slot: Called when the user wants to enable a specific pci slot
* @disable_slot: Called when the user wants to disable a specific pci slot
* @set_attention_status: Called to set the specific slot's attention LED to
@@ -109,6 +102,7 @@ struct hotplug_slot_attribute {
*/
struct hotplug_slot_ops {
struct module *owner;
+ const char *mod_name;
int (*enable_slot) (struct hotplug_slot *slot);
int (*disable_slot) (struct hotplug_slot *slot);
int (*set_attention_status) (struct hotplug_slot *slot, u8 value);
@@ -167,12 +161,21 @@ static inline const char *hotplug_slot_name(const struct hotplug_slot *slot)
return pci_slot_name(slot->pci_slot);
}
-extern int pci_hp_register(struct hotplug_slot *, struct pci_bus *, int nr,
- const char *name);
+extern int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *pbus,
+ int nr, const char *name,
+ struct module *owner, const char *mod_name);
extern int pci_hp_deregister(struct hotplug_slot *slot);
extern int __must_check pci_hp_change_slot_info (struct hotplug_slot *slot,
struct hotplug_slot_info *info);
+static inline int pci_hp_register(struct hotplug_slot *slot,
+ struct pci_bus *pbus,
+ int devnr, const char *name)
+{
+ return __pci_hp_register(slot, pbus, devnr, name,
+ THIS_MODULE, KBUILD_MODNAME);
+}
+
/* PCI Setting Record (Type 0) */
struct hpp_type0 {
u32 revision;
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index aca8c458aa8..a3b00036579 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -104,6 +104,7 @@
#define PCI_CLASS_SERIAL_USB_UHCI 0x0c0300
#define PCI_CLASS_SERIAL_USB_OHCI 0x0c0310
#define PCI_CLASS_SERIAL_USB_EHCI 0x0c0320
+#define PCI_CLASS_SERIAL_USB_XHCI 0x0c0330
#define PCI_CLASS_SERIAL_FIBER 0x0c04
#define PCI_CLASS_SERIAL_SMBUS 0x0c05
@@ -526,6 +527,7 @@
#define PCI_DEVICE_ID_AMD_OPUS_7443 0x7443
#define PCI_DEVICE_ID_AMD_VIPER_7443 0x7443
#define PCI_DEVICE_ID_AMD_OPUS_7445 0x7445
+#define PCI_DEVICE_ID_AMD_8111_PCI 0x7460
#define PCI_DEVICE_ID_AMD_8111_LPC 0x7468
#define PCI_DEVICE_ID_AMD_8111_IDE 0x7469
#define PCI_DEVICE_ID_AMD_8111_SMBUS2 0x746a
@@ -834,6 +836,8 @@
#define PCI_DEVICE_ID_PROMISE_20276 0x5275
#define PCI_DEVICE_ID_PROMISE_20277 0x7275
+#define PCI_VENDOR_ID_FOXCONN 0x105b
+
#define PCI_VENDOR_ID_UMC 0x1060
#define PCI_DEVICE_ID_UMC_UM8673F 0x0101
#define PCI_DEVICE_ID_UMC_UM8886BF 0x673a
@@ -941,6 +945,32 @@
#define PCI_DEVICE_ID_SUN_TOMATILLO 0xa801
#define PCI_DEVICE_ID_SUN_CASSINI 0xabba
+#define PCI_VENDOR_ID_NI 0x1093
+#define PCI_DEVICE_ID_NI_PCI2322 0xd130
+#define PCI_DEVICE_ID_NI_PCI2324 0xd140
+#define PCI_DEVICE_ID_NI_PCI2328 0xd150
+#define PCI_DEVICE_ID_NI_PXI8422_2322 0xd190
+#define PCI_DEVICE_ID_NI_PXI8422_2324 0xd1a0
+#define PCI_DEVICE_ID_NI_PXI8420_2322 0xd1d0
+#define PCI_DEVICE_ID_NI_PXI8420_2324 0xd1e0
+#define PCI_DEVICE_ID_NI_PXI8420_2328 0xd1f0
+#define PCI_DEVICE_ID_NI_PXI8420_23216 0xd1f1
+#define PCI_DEVICE_ID_NI_PCI2322I 0xd250
+#define PCI_DEVICE_ID_NI_PCI2324I 0xd270
+#define PCI_DEVICE_ID_NI_PCI23216 0xd2b0
+#define PCI_DEVICE_ID_NI_PXI8430_2322 0x7080
+#define PCI_DEVICE_ID_NI_PCI8430_2322 0x70db
+#define PCI_DEVICE_ID_NI_PXI8430_2324 0x70dd
+#define PCI_DEVICE_ID_NI_PCI8430_2324 0x70df
+#define PCI_DEVICE_ID_NI_PXI8430_2328 0x70e2
+#define PCI_DEVICE_ID_NI_PCI8430_2328 0x70e4
+#define PCI_DEVICE_ID_NI_PXI8430_23216 0x70e6
+#define PCI_DEVICE_ID_NI_PCI8430_23216 0x70e7
+#define PCI_DEVICE_ID_NI_PXI8432_2322 0x70e8
+#define PCI_DEVICE_ID_NI_PCI8432_2322 0x70ea
+#define PCI_DEVICE_ID_NI_PXI8432_2324 0x70ec
+#define PCI_DEVICE_ID_NI_PCI8432_2324 0x70ee
+
#define PCI_VENDOR_ID_CMD 0x1095
#define PCI_DEVICE_ID_CMD_643 0x0643
#define PCI_DEVICE_ID_CMD_646 0x0646
@@ -976,6 +1006,7 @@
#define PCI_DEVICE_ID_PLX_PCI200SYN 0x3196
#define PCI_DEVICE_ID_PLX_9030 0x9030
#define PCI_DEVICE_ID_PLX_9050 0x9050
+#define PCI_DEVICE_ID_PLX_9056 0x9056
#define PCI_DEVICE_ID_PLX_9080 0x9080
#define PCI_DEVICE_ID_PLX_GTEK_SERIAL2 0xa001
@@ -1037,8 +1068,6 @@
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SMBUS 0x0034
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE 0x0035
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA 0x0036
-#define PCI_DEVICE_ID_NVIDIA_NVENET_10 0x0037
-#define PCI_DEVICE_ID_NVIDIA_NVENET_11 0x0038
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2 0x003e
#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800_ULTRA 0x0040
#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800 0x0041
@@ -1049,21 +1078,16 @@
#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE 0x0053
#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA 0x0054
#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2 0x0055
-#define PCI_DEVICE_ID_NVIDIA_NVENET_8 0x0056
-#define PCI_DEVICE_ID_NVIDIA_NVENET_9 0x0057
#define PCI_DEVICE_ID_NVIDIA_CK804_AUDIO 0x0059
#define PCI_DEVICE_ID_NVIDIA_CK804_PCIE 0x005d
#define PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS 0x0064
#define PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE 0x0065
-#define PCI_DEVICE_ID_NVIDIA_NVENET_2 0x0066
#define PCI_DEVICE_ID_NVIDIA_MCP2_MODEM 0x0069
#define PCI_DEVICE_ID_NVIDIA_MCP2_AUDIO 0x006a
#define PCI_DEVICE_ID_NVIDIA_NFORCE2S_SMBUS 0x0084
#define PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE 0x0085
-#define PCI_DEVICE_ID_NVIDIA_NVENET_4 0x0086
#define PCI_DEVICE_ID_NVIDIA_MCP2S_MODEM 0x0089
#define PCI_DEVICE_ID_NVIDIA_CK8_AUDIO 0x008a
-#define PCI_DEVICE_ID_NVIDIA_NVENET_5 0x008c
#define PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA 0x008e
#define PCI_DEVICE_ID_NVIDIA_GEFORCE_7800_GT 0x0090
#define PCI_DEVICE_ID_NVIDIA_GEFORCE_7800_GTX 0x0091
@@ -1079,15 +1103,12 @@
#define PCI_DEVICE_ID_NVIDIA_NFORCE3 0x00d1
#define PCI_DEVICE_ID_NVIDIA_NFORCE3_SMBUS 0x00d4
#define PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE 0x00d5
-#define PCI_DEVICE_ID_NVIDIA_NVENET_3 0x00d6
#define PCI_DEVICE_ID_NVIDIA_MCP3_MODEM 0x00d9
#define PCI_DEVICE_ID_NVIDIA_MCP3_AUDIO 0x00da
-#define PCI_DEVICE_ID_NVIDIA_NVENET_7 0x00df
#define PCI_DEVICE_ID_NVIDIA_NFORCE3S 0x00e1
#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA 0x00e3
#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SMBUS 0x00e4
#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE 0x00e5
-#define PCI_DEVICE_ID_NVIDIA_NVENET_6 0x00e6
#define PCI_DEVICE_ID_NVIDIA_CK8S_AUDIO 0x00ea
#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2 0x00ee
#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6800_ALT1 0x00f0
@@ -1147,7 +1168,6 @@
#define PCI_DEVICE_ID_NVIDIA_NFORCE_SMBUS 0x01b4
#define PCI_DEVICE_ID_NVIDIA_NFORCE_IDE 0x01bc
#define PCI_DEVICE_ID_NVIDIA_MCP1_MODEM 0x01c1
-#define PCI_DEVICE_ID_NVIDIA_NVENET_1 0x01c3
#define PCI_DEVICE_ID_NVIDIA_NFORCE2 0x01e0
#define PCI_DEVICE_ID_NVIDIA_GEFORCE3 0x0200
#define PCI_DEVICE_ID_NVIDIA_GEFORCE3_1 0x0201
@@ -1170,8 +1190,6 @@
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE 0x036E
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA 0x037E
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2 0x037F
-#define PCI_DEVICE_ID_NVIDIA_NVENET_12 0x0268
-#define PCI_DEVICE_ID_NVIDIA_NVENET_13 0x0269
#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800 0x0280
#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800_8X 0x0281
#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800SE 0x0282
@@ -1218,42 +1236,21 @@
#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5700_2 0x0348
#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_GO1000 0x034C
#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1100 0x034E
-#define PCI_DEVICE_ID_NVIDIA_NVENET_14 0x0372
#define PCI_DEVICE_ID_NVIDIA_NVENET_15 0x0373
-#define PCI_DEVICE_ID_NVIDIA_NVENET_16 0x03E5
-#define PCI_DEVICE_ID_NVIDIA_NVENET_17 0x03E6
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA 0x03E7
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SMBUS 0x03EB
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE 0x03EC
-#define PCI_DEVICE_ID_NVIDIA_NVENET_18 0x03EE
-#define PCI_DEVICE_ID_NVIDIA_NVENET_19 0x03EF
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2 0x03F6
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3 0x03F7
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_SMBUS 0x0446
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE 0x0448
-#define PCI_DEVICE_ID_NVIDIA_NVENET_20 0x0450
-#define PCI_DEVICE_ID_NVIDIA_NVENET_21 0x0451
-#define PCI_DEVICE_ID_NVIDIA_NVENET_22 0x0452
-#define PCI_DEVICE_ID_NVIDIA_NVENET_23 0x0453
-#define PCI_DEVICE_ID_NVIDIA_NVENET_24 0x054C
-#define PCI_DEVICE_ID_NVIDIA_NVENET_25 0x054D
-#define PCI_DEVICE_ID_NVIDIA_NVENET_26 0x054E
-#define PCI_DEVICE_ID_NVIDIA_NVENET_27 0x054F
-#define PCI_DEVICE_ID_NVIDIA_NVENET_28 0x07DC
-#define PCI_DEVICE_ID_NVIDIA_NVENET_29 0x07DD
-#define PCI_DEVICE_ID_NVIDIA_NVENET_30 0x07DE
-#define PCI_DEVICE_ID_NVIDIA_NVENET_31 0x07DF
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_SMBUS 0x0542
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE 0x0560
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE 0x056C
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP78S_SMBUS 0x0752
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE 0x0759
-#define PCI_DEVICE_ID_NVIDIA_NVENET_32 0x0760
-#define PCI_DEVICE_ID_NVIDIA_NVENET_33 0x0761
-#define PCI_DEVICE_ID_NVIDIA_NVENET_34 0x0762
-#define PCI_DEVICE_ID_NVIDIA_NVENET_35 0x0763
-#define PCI_DEVICE_ID_NVIDIA_NVENET_36 0x0AB0
-#define PCI_DEVICE_ID_NVIDIA_NVENET_37 0x0AB1
-#define PCI_DEVICE_ID_NVIDIA_NVENET_38 0x0AB2
-#define PCI_DEVICE_ID_NVIDIA_NVENET_39 0x0AB3
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_SMBUS 0x07D8
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP79_SMBUS 0x0AA2
#define PCI_VENDOR_ID_IMS 0x10e0
#define PCI_DEVICE_ID_IMS_TT128 0x9128
@@ -1281,6 +1278,13 @@
#define PCI_VENDOR_ID_CREATIVE 0x1102 /* duplicate: ECTIVA */
#define PCI_DEVICE_ID_CREATIVE_EMU10K1 0x0002
+#define PCI_DEVICE_ID_CREATIVE_20K1 0x0005
+#define PCI_DEVICE_ID_CREATIVE_20K2 0x000b
+#define PCI_SUBDEVICE_ID_CREATIVE_SB0760 0x0024
+#define PCI_SUBDEVICE_ID_CREATIVE_SB08801 0x0041
+#define PCI_SUBDEVICE_ID_CREATIVE_SB08802 0x0042
+#define PCI_SUBDEVICE_ID_CREATIVE_SB08803 0x0043
+#define PCI_SUBDEVICE_ID_CREATIVE_HENDRIX 0x6000
#define PCI_VENDOR_ID_ECTIVA 0x1102 /* duplicate: CREATIVE */
#define PCI_DEVICE_ID_ECTIVA_EV1938 0x8938
@@ -1373,7 +1377,7 @@
#define PCI_DEVICE_ID_VIA_82C598_1 0x8598
#define PCI_DEVICE_ID_VIA_838X_1 0xB188
#define PCI_DEVICE_ID_VIA_83_87XX_1 0xB198
-#define PCI_DEVICE_ID_VIA_C409_IDE 0XC409
+#define PCI_DEVICE_ID_VIA_VX855_IDE 0xC409
#define PCI_DEVICE_ID_VIA_ANON 0xFFFF
#define PCI_VENDOR_ID_SIEMENS 0x110A
@@ -1473,6 +1477,7 @@
#define PCI_DEVICE_ID_SERVERWORKS_HT1000IDE 0x0214
#define PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2 0x0217
#define PCI_DEVICE_ID_SERVERWORKS_CSB6LPC 0x0227
+#define PCI_DEVICE_ID_SERVERWORKS_HT1100LD 0x0408
#define PCI_VENDOR_ID_SBE 0x1176
#define PCI_DEVICE_ID_SBE_WANXL100 0x0301
@@ -1813,6 +1818,10 @@
#define PCI_SUBDEVICE_ID_HYPERCOPE_METRO 0x0107
#define PCI_SUBDEVICE_ID_HYPERCOPE_CHAMP2 0x0108
+#define PCI_VENDOR_ID_DIGIGRAM 0x1369
+#define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_SERIAL_SUBSYSTEM 0xc001
+#define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_CAE_SERIAL_SUBSYSTEM 0xc002
+
#define PCI_VENDOR_ID_KAWASAKI 0x136b
#define PCI_DEVICE_ID_MCHIP_KL5A72002 0xff01
@@ -1880,6 +1889,8 @@
#define PCI_SUBDEVICE_ID_CCD_SWYX4S 0xB540
#define PCI_SUBDEVICE_ID_CCD_JH4S20 0xB550
#define PCI_SUBDEVICE_ID_CCD_IOB8ST_1 0xB552
+#define PCI_SUBDEVICE_ID_CCD_JHSE1 0xB553
+#define PCI_SUBDEVICE_ID_CCD_JH8S 0xB55B
#define PCI_SUBDEVICE_ID_CCD_BN4S 0xB560
#define PCI_SUBDEVICE_ID_CCD_BN8S 0xB562
#define PCI_SUBDEVICE_ID_CCD_BNE1 0xB563
@@ -1962,15 +1973,19 @@
#define PCI_DEVICE_ID_OXSEMI_PCIe952_1_U 0xC118
#define PCI_DEVICE_ID_OXSEMI_PCIe952_1_GU 0xC11C
#define PCI_DEVICE_ID_OXSEMI_16PCI954 0x9501
+#define PCI_DEVICE_ID_OXSEMI_C950 0x950B
#define PCI_DEVICE_ID_OXSEMI_16PCI95N 0x9511
#define PCI_DEVICE_ID_OXSEMI_16PCI954PP 0x9513
#define PCI_DEVICE_ID_OXSEMI_16PCI952 0x9521
#define PCI_DEVICE_ID_OXSEMI_16PCI952PP 0x9523
+#define PCI_SUBDEVICE_ID_OXSEMI_C950 0x0001
#define PCI_VENDOR_ID_CHELSIO 0x1425
#define PCI_VENDOR_ID_SAMSUNG 0x144d
+#define PCI_VENDOR_ID_AMBIT 0x1468
+
#define PCI_VENDOR_ID_MYRICOM 0x14c1
#define PCI_VENDOR_ID_TITAN 0x14D2
@@ -2077,6 +2092,7 @@
#define PCI_VENDOR_ID_MAINPINE 0x1522
#define PCI_DEVICE_ID_MAINPINE_PBRIDGE 0x0100
#define PCI_VENDOR_ID_ENE 0x1524
+#define PCI_DEVICE_ID_ENE_CB710_FLASH 0x0510
#define PCI_DEVICE_ID_ENE_CB712_SD 0x0550
#define PCI_DEVICE_ID_ENE_CB712_SD_2 0x0551
#define PCI_DEVICE_ID_ENE_CB714_SD 0x0750
@@ -2112,6 +2128,8 @@
#define PCI_DEVICE_ID_MELLANOX_SINAI_OLD 0x5e8c
#define PCI_DEVICE_ID_MELLANOX_SINAI 0x6274
+#define PCI_VENDOR_ID_DFI 0x15bd
+
#define PCI_VENDOR_ID_QUICKNET 0x15e2
#define PCI_DEVICE_ID_QUICKNET_XJ 0x0500
@@ -2215,10 +2233,20 @@
#define PCI_VENDOR_ID_TOPSPIN 0x1867
+#define PCI_VENDOR_ID_SILAN 0x1904
+
#define PCI_VENDOR_ID_TDI 0x192E
#define PCI_DEVICE_ID_TDI_EHCI 0x0101
#define PCI_VENDOR_ID_FREESCALE 0x1957
+#define PCI_DEVICE_ID_MPC8315E 0x00b4
+#define PCI_DEVICE_ID_MPC8315 0x00b5
+#define PCI_DEVICE_ID_MPC8314E 0x00b6
+#define PCI_DEVICE_ID_MPC8314 0x00b7
+#define PCI_DEVICE_ID_MPC8378E 0x00c4
+#define PCI_DEVICE_ID_MPC8378 0x00c5
+#define PCI_DEVICE_ID_MPC8377E 0x00c6
+#define PCI_DEVICE_ID_MPC8377 0x00c7
#define PCI_DEVICE_ID_MPC8548E 0x0012
#define PCI_DEVICE_ID_MPC8548 0x0013
#define PCI_DEVICE_ID_MPC8543E 0x0014
@@ -2226,6 +2254,8 @@
#define PCI_DEVICE_ID_MPC8547E 0x0018
#define PCI_DEVICE_ID_MPC8545E 0x0019
#define PCI_DEVICE_ID_MPC8545 0x001a
+#define PCI_DEVICE_ID_MPC8569E 0x0061
+#define PCI_DEVICE_ID_MPC8569 0x0060
#define PCI_DEVICE_ID_MPC8568E 0x0020
#define PCI_DEVICE_ID_MPC8568 0x0021
#define PCI_DEVICE_ID_MPC8567E 0x0022
@@ -2238,6 +2268,8 @@
#define PCI_DEVICE_ID_MPC8572 0x0041
#define PCI_DEVICE_ID_MPC8536E 0x0050
#define PCI_DEVICE_ID_MPC8536 0x0051
+#define PCI_DEVICE_ID_P2020E 0x0070
+#define PCI_DEVICE_ID_P2020 0x0071
#define PCI_DEVICE_ID_MPC8641 0x7010
#define PCI_DEVICE_ID_MPC8641D 0x7011
#define PCI_DEVICE_ID_MPC8610 0x7018
@@ -2263,6 +2295,10 @@
#define PCI_DEVICE_ID_KORENIX_JETCARDF0 0x1600
#define PCI_DEVICE_ID_KORENIX_JETCARDF1 0x16ff
+#define PCI_VENDOR_ID_QMI 0x1a32
+
+#define PCI_VENDOR_ID_AZWAVE 0x1a3b
+
#define PCI_VENDOR_ID_TEKRAM 0x1de1
#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29
@@ -2373,6 +2409,7 @@
#define PCI_DEVICE_ID_INTEL_82801CA_12 0x248c
#define PCI_DEVICE_ID_INTEL_82801DB_0 0x24c0
#define PCI_DEVICE_ID_INTEL_82801DB_1 0x24c1
+#define PCI_DEVICE_ID_INTEL_82801DB_2 0x24c2
#define PCI_DEVICE_ID_INTEL_82801DB_3 0x24c3
#define PCI_DEVICE_ID_INTEL_82801DB_5 0x24c5
#define PCI_DEVICE_ID_INTEL_82801DB_6 0x24c6
@@ -2463,6 +2500,8 @@
#define PCI_DEVICE_ID_INTEL_IOAT_TBG3 0x3433
#define PCI_DEVICE_ID_INTEL_82830_HB 0x3575
#define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577
+#define PCI_DEVICE_ID_INTEL_82854_HB 0x358c
+#define PCI_DEVICE_ID_INTEL_82854_IG 0x358e
#define PCI_DEVICE_ID_INTEL_82855GM_HB 0x3580
#define PCI_DEVICE_ID_INTEL_82855GM_IG 0x3582
#define PCI_DEVICE_ID_INTEL_E7520_MCH 0x3590
diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
index 027815b4635..83b02f5a25b 100644
--- a/include/linux/pci_regs.h
+++ b/include/linux/pci_regs.h
@@ -235,7 +235,7 @@
#define PCI_PM_CAP_PME_SHIFT 11 /* Start of the PME Mask in PMC */
#define PCI_PM_CTRL 4 /* PM control and status register */
#define PCI_PM_CTRL_STATE_MASK 0x0003 /* Current power state (D0 to D3) */
-#define PCI_PM_CTRL_NO_SOFT_RESET 0x0004 /* No reset for D3hot->D0 */
+#define PCI_PM_CTRL_NO_SOFT_RESET 0x0008 /* No reset for D3hot->D0 */
#define PCI_PM_CTRL_PME_ENABLE 0x0100 /* PME pin enable */
#define PCI_PM_CTRL_DATA_SEL_MASK 0x1e00 /* Data select (??) */
#define PCI_PM_CTRL_DATA_SCALE_MASK 0x6000 /* Data scale (??) */
@@ -295,8 +295,9 @@
#define PCI_MSI_ADDRESS_LO 4 /* Lower 32 bits */
#define PCI_MSI_ADDRESS_HI 8 /* Upper 32 bits (if PCI_MSI_FLAGS_64BIT set) */
#define PCI_MSI_DATA_32 8 /* 16 bits of data for 32-bit devices */
+#define PCI_MSI_MASK_32 12 /* Mask bits register for 32-bit devices */
#define PCI_MSI_DATA_64 12 /* 16 bits of data for 64-bit devices */
-#define PCI_MSI_MASK_BIT 16 /* Mask bits register */
+#define PCI_MSI_MASK_64 16 /* Mask bits register for 64-bit devices */
/* MSI-X registers (these are at offset PCI_MSIX_FLAGS) */
#define PCI_MSIX_FLAGS 2
@@ -304,7 +305,6 @@
#define PCI_MSIX_FLAGS_ENABLE (1 << 15)
#define PCI_MSIX_FLAGS_MASKALL (1 << 14)
#define PCI_MSIX_FLAGS_BIRMASK (7 << 0)
-#define PCI_MSIX_FLAGS_BITMASK (1 << 0)
/* CompactPCI Hotswap Register */
@@ -375,6 +375,8 @@
#define PCI_EXP_TYPE_UPSTREAM 0x5 /* Upstream Port */
#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */
#define PCI_EXP_TYPE_PCI_BRIDGE 0x7 /* PCI/PCI-X Bridge */
+#define PCI_EXP_TYPE_RC_END 0x9 /* Root Complex Integrated Endpoint */
+#define PCI_EXP_TYPE_RC_EC 0x10 /* Root Complex Event Collector */
#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */
#define PCI_EXP_FLAGS_IRQ 0x3e00 /* Interrupt message number */
#define PCI_EXP_DEVCAP 4 /* Device capabilities */
@@ -487,6 +489,8 @@
#define PCI_EXP_DEVCAP2_ARI 0x20 /* Alternative Routing-ID */
#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */
#define PCI_EXP_DEVCTL2_ARI 0x20 /* Alternative Routing-ID */
+#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */
+#define PCI_EXP_SLTCTL2 56 /* Slot Control 2 */
/* Extended Capabilities (PCI-X 2.0 and Express) */
#define PCI_EXT_CAP_ID(header) (header & 0x0000ffff)
@@ -498,6 +502,7 @@
#define PCI_EXT_CAP_ID_DSN 3
#define PCI_EXT_CAP_ID_PWR 4
#define PCI_EXT_CAP_ID_ARI 14
+#define PCI_EXT_CAP_ID_SRIOV 16
/* Advanced Error Reporting */
#define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */
@@ -615,4 +620,35 @@
#define PCI_ARI_CTRL_ACS 0x0002 /* ACS Function Groups Enable */
#define PCI_ARI_CTRL_FG(x) (((x) >> 4) & 7) /* Function Group */
+/* Single Root I/O Virtualization */
+#define PCI_SRIOV_CAP 0x04 /* SR-IOV Capabilities */
+#define PCI_SRIOV_CAP_VFM 0x01 /* VF Migration Capable */
+#define PCI_SRIOV_CAP_INTR(x) ((x) >> 21) /* Interrupt Message Number */
+#define PCI_SRIOV_CTRL 0x08 /* SR-IOV Control */
+#define PCI_SRIOV_CTRL_VFE 0x01 /* VF Enable */
+#define PCI_SRIOV_CTRL_VFM 0x02 /* VF Migration Enable */
+#define PCI_SRIOV_CTRL_INTR 0x04 /* VF Migration Interrupt Enable */
+#define PCI_SRIOV_CTRL_MSE 0x08 /* VF Memory Space Enable */
+#define PCI_SRIOV_CTRL_ARI 0x10 /* ARI Capable Hierarchy */
+#define PCI_SRIOV_STATUS 0x0a /* SR-IOV Status */
+#define PCI_SRIOV_STATUS_VFM 0x01 /* VF Migration Status */
+#define PCI_SRIOV_INITIAL_VF 0x0c /* Initial VFs */
+#define PCI_SRIOV_TOTAL_VF 0x0e /* Total VFs */
+#define PCI_SRIOV_NUM_VF 0x10 /* Number of VFs */
+#define PCI_SRIOV_FUNC_LINK 0x12 /* Function Dependency Link */
+#define PCI_SRIOV_VF_OFFSET 0x14 /* First VF Offset */
+#define PCI_SRIOV_VF_STRIDE 0x16 /* Following VF Stride */
+#define PCI_SRIOV_VF_DID 0x1a /* VF Device ID */
+#define PCI_SRIOV_SUP_PGSIZE 0x1c /* Supported Page Sizes */
+#define PCI_SRIOV_SYS_PGSIZE 0x20 /* System Page Size */
+#define PCI_SRIOV_BAR 0x24 /* VF BAR0 */
+#define PCI_SRIOV_NUM_BARS 6 /* Number of VF BARs */
+#define PCI_SRIOV_VFM 0x3c /* VF Migration State Array Offset*/
+#define PCI_SRIOV_VFM_BIR(x) ((x) & 7) /* State BIR */
+#define PCI_SRIOV_VFM_OFFSET(x) ((x) & ~7) /* State Offset */
+#define PCI_SRIOV_VFM_UA 0x0 /* Inactive.Unavailable */
+#define PCI_SRIOV_VFM_MI 0x1 /* Dormant.MigrateIn */
+#define PCI_SRIOV_VFM_MO 0x2 /* Active.MigrateOut */
+#define PCI_SRIOV_VFM_AV 0x3 /* Active.Available */
+
#endif /* LINUX_PCI_REGS_H */
diff --git a/include/linux/pcieport_if.h b/include/linux/pcieport_if.h
index 6cd91e3f982..b4c79545330 100644
--- a/include/linux/pcieport_if.h
+++ b/include/linux/pcieport_if.h
@@ -16,29 +16,30 @@
#define PCIE_ANY_PORT 7
/* Service Type */
-#define PCIE_PORT_SERVICE_PME 1 /* Power Management Event */
-#define PCIE_PORT_SERVICE_AER 2 /* Advanced Error Reporting */
-#define PCIE_PORT_SERVICE_HP 4 /* Native Hotplug */
-#define PCIE_PORT_SERVICE_VC 8 /* Virtual Channel */
+#define PCIE_PORT_SERVICE_PME_SHIFT 0 /* Power Management Event */
+#define PCIE_PORT_SERVICE_PME (1 << PCIE_PORT_SERVICE_PME_SHIFT)
+#define PCIE_PORT_SERVICE_AER_SHIFT 1 /* Advanced Error Reporting */
+#define PCIE_PORT_SERVICE_AER (1 << PCIE_PORT_SERVICE_AER_SHIFT)
+#define PCIE_PORT_SERVICE_HP_SHIFT 2 /* Native Hotplug */
+#define PCIE_PORT_SERVICE_HP (1 << PCIE_PORT_SERVICE_HP_SHIFT)
+#define PCIE_PORT_SERVICE_VC_SHIFT 3 /* Virtual Channel */
+#define PCIE_PORT_SERVICE_VC (1 << PCIE_PORT_SERVICE_VC_SHIFT)
/* Root/Upstream/Downstream Port's Interrupt Mode */
+#define PCIE_PORT_NO_IRQ (-1)
#define PCIE_PORT_INTx_MODE 0
#define PCIE_PORT_MSI_MODE 1
#define PCIE_PORT_MSIX_MODE 2
-struct pcie_port_service_id {
- __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
- __u32 subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */
- __u32 class, class_mask; /* (class,subclass,prog-if) triplet */
- __u32 port_type, service_type; /* Port Entity */
- kernel_ulong_t driver_data;
+struct pcie_port_data {
+ int port_type; /* Type of the port */
+ int port_irq_mode; /* [0:INTx | 1:MSI | 2:MSI-X] */
};
struct pcie_device {
int irq; /* Service IRQ/MSI/MSI-X Vector */
- int interrupt_mode; /* [0:INTx | 1:MSI | 2:MSI-X] */
- struct pcie_port_service_id id; /* Service ID */
- struct pci_dev *port; /* Root/Upstream/Downstream Port */
+ struct pci_dev *port; /* Root/Upstream/Downstream Port */
+ u32 service; /* Port service this device represents */
void *priv_data; /* Service Private Data */
struct device device; /* Generic Device Interface */
};
@@ -56,10 +57,9 @@ static inline void* get_service_data(struct pcie_device *dev)
struct pcie_port_service_driver {
const char *name;
- int (*probe) (struct pcie_device *dev,
- const struct pcie_port_service_id *id);
+ int (*probe) (struct pcie_device *dev);
void (*remove) (struct pcie_device *dev);
- int (*suspend) (struct pcie_device *dev, pm_message_t state);
+ int (*suspend) (struct pcie_device *dev);
int (*resume) (struct pcie_device *dev);
/* Service Error Recovery Handler */
@@ -68,7 +68,9 @@ struct pcie_port_service_driver {
/* Link Reset Capability - AER service driver specific */
pci_ers_result_t (*reset_link) (struct pci_dev *dev);
- const struct pcie_port_service_id *id_table;
+ int port_type; /* Type of the port this driver can handle */
+ u32 service; /* Port service this device represents */
+
struct device_driver driver;
};
#define to_service_driver(d) \
diff --git a/include/linux/pda_power.h b/include/linux/pda_power.h
index cb7d10f3076..d4cf7a2ceb3 100644
--- a/include/linux/pda_power.h
+++ b/include/linux/pda_power.h
@@ -31,6 +31,8 @@ struct pda_power_pdata {
unsigned int wait_for_status; /* msecs, default is 500 */
unsigned int wait_for_charger; /* msecs, default is 500 */
unsigned int polling_interval; /* msecs, default is 2000 */
+
+ unsigned long ac_max_uA; /* current to draw when on AC */
};
#endif /* __PDA_POWER_H__ */
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
new file mode 100644
index 00000000000..8f921d74f49
--- /dev/null
+++ b/include/linux/percpu-defs.h
@@ -0,0 +1,84 @@
+#ifndef _LINUX_PERCPU_DEFS_H
+#define _LINUX_PERCPU_DEFS_H
+
+/*
+ * Determine the real variable name from the name visible in the
+ * kernel sources.
+ */
+#define per_cpu_var(var) per_cpu__##var
+
+/*
+ * Base implementations of per-CPU variable declarations and definitions, where
+ * the section in which the variable is to be placed is provided by the
+ * 'section' argument. This may be used to affect the parameters governing the
+ * variable's storage.
+ *
+ * NOTE! The sections for the DECLARE and for the DEFINE must match, lest
+ * linkage errors occur due the compiler generating the wrong code to access
+ * that section.
+ */
+#define DECLARE_PER_CPU_SECTION(type, name, section) \
+ extern \
+ __attribute__((__section__(PER_CPU_BASE_SECTION section))) \
+ PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
+
+#define DEFINE_PER_CPU_SECTION(type, name, section) \
+ __attribute__((__section__(PER_CPU_BASE_SECTION section))) \
+ PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
+
+/*
+ * Variant on the per-CPU variable declaration/definition theme used for
+ * ordinary per-CPU variables.
+ */
+#define DECLARE_PER_CPU(type, name) \
+ DECLARE_PER_CPU_SECTION(type, name, "")
+
+#define DEFINE_PER_CPU(type, name) \
+ DEFINE_PER_CPU_SECTION(type, name, "")
+
+/*
+ * Declaration/definition used for per-CPU variables that must come first in
+ * the set of variables.
+ */
+#define DECLARE_PER_CPU_FIRST(type, name) \
+ DECLARE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
+
+#define DEFINE_PER_CPU_FIRST(type, name) \
+ DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
+
+/*
+ * Declaration/definition used for per-CPU variables that must be cacheline
+ * aligned under SMP conditions so that, whilst a particular instance of the
+ * data corresponds to a particular CPU, inefficiencies due to direct access by
+ * other CPUs are reduced by preventing the data from unnecessarily spanning
+ * cachelines.
+ *
+ * An example of this would be statistical data, where each CPU's set of data
+ * is updated by that CPU alone, but the data from across all CPUs is collated
+ * by a CPU processing a read from a proc file.
+ */
+#define DECLARE_PER_CPU_SHARED_ALIGNED(type, name) \
+ DECLARE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
+ ____cacheline_aligned_in_smp
+
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
+ DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
+ ____cacheline_aligned_in_smp
+
+/*
+ * Declaration/definition used for per-CPU variables that must be page aligned.
+ */
+#define DECLARE_PER_CPU_PAGE_ALIGNED(type, name) \
+ DECLARE_PER_CPU_SECTION(type, name, ".page_aligned")
+
+#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
+ DEFINE_PER_CPU_SECTION(type, name, ".page_aligned")
+
+/*
+ * Intermodule exports for per-CPU variables.
+ */
+#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
+#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
+
+
+#endif /* _LINUX_PERCPU_DEFS_H */
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 9f2a3751873..26fd9d12f05 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -5,53 +5,22 @@
#include <linux/slab.h> /* For kmalloc() */
#include <linux/smp.h>
#include <linux/cpumask.h>
+#include <linux/pfn.h>
#include <asm/percpu.h>
-#ifdef CONFIG_SMP
-#define DEFINE_PER_CPU(type, name) \
- __attribute__((__section__(".data.percpu"))) \
- PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
-
-#ifdef MODULE
-#define SHARED_ALIGNED_SECTION ".data.percpu"
-#else
-#define SHARED_ALIGNED_SECTION ".data.percpu.shared_aligned"
-#endif
-
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
- __attribute__((__section__(SHARED_ALIGNED_SECTION))) \
- PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name \
- ____cacheline_aligned_in_smp
-
-#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
- __attribute__((__section__(".data.percpu.page_aligned"))) \
- PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
-#else
-#define DEFINE_PER_CPU(type, name) \
- PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
-
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
- DEFINE_PER_CPU(type, name)
-
-#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
- DEFINE_PER_CPU(type, name)
-#endif
-
-#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
-#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
-
-/* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */
-#ifndef PERCPU_ENOUGH_ROOM
+/* enough to cover all DEFINE_PER_CPUs in modules */
#ifdef CONFIG_MODULES
-#define PERCPU_MODULE_RESERVE 8192
+#define PERCPU_MODULE_RESERVE (8 << 10)
#else
-#define PERCPU_MODULE_RESERVE 0
+#define PERCPU_MODULE_RESERVE 0
#endif
+#ifndef PERCPU_ENOUGH_ROOM
#define PERCPU_ENOUGH_ROOM \
- (__per_cpu_end - __per_cpu_start + PERCPU_MODULE_RESERVE)
-#endif /* PERCPU_ENOUGH_ROOM */
+ (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \
+ PERCPU_MODULE_RESERVE)
+#endif
/*
* Must be an lvalue. Since @var must be a simple identifier,
@@ -65,52 +34,151 @@
#ifdef CONFIG_SMP
+#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
+
+/* minimum unit size, also is the maximum supported allocation size */
+#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10)
+
+/*
+ * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
+ * back on the first chunk for dynamic percpu allocation if arch is
+ * manually allocating and mapping it for faster access (as a part of
+ * large page mapping for example).
+ *
+ * The following values give between one and two pages of free space
+ * after typical minimal boot (2-way SMP, single disk and NIC) with
+ * both defconfig and a distro config on x86_64 and 32. More
+ * intelligent way to determine this would be nice.
+ */
+#if BITS_PER_LONG > 32
+#define PERCPU_DYNAMIC_RESERVE (20 << 10)
+#else
+#define PERCPU_DYNAMIC_RESERVE (12 << 10)
+#endif
+
+extern void *pcpu_base_addr;
+
+typedef struct page * (*pcpu_get_page_fn_t)(unsigned int cpu, int pageno);
+typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr);
+
+extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
+ size_t static_size, size_t reserved_size,
+ ssize_t dyn_size, ssize_t unit_size,
+ void *base_addr,
+ pcpu_populate_pte_fn_t populate_pte_fn);
+
+extern ssize_t __init pcpu_embed_first_chunk(
+ size_t static_size, size_t reserved_size,
+ ssize_t dyn_size, ssize_t unit_size);
+
+/*
+ * Use this to get to a cpu's version of the per-cpu object
+ * dynamically allocated. Non-atomic access to the current CPU's
+ * version should probably be combined with get_cpu()/put_cpu().
+ */
+#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
+
+extern void *__alloc_reserved_percpu(size_t size, size_t align);
+
+#else /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
+
struct percpu_data {
void *ptrs[1];
};
+/* pointer disguising messes up the kmemleak objects tracking */
+#ifndef CONFIG_DEBUG_KMEMLEAK
#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
-/*
- * Use this to get to a cpu's version of the per-cpu object dynamically
- * allocated. Non-atomic access to the current CPU's version should
- * probably be combined with get_cpu()/put_cpu().
- */
-#define percpu_ptr(ptr, cpu) \
-({ \
- struct percpu_data *__p = __percpu_disguise(ptr); \
- (__typeof__(ptr))__p->ptrs[(cpu)]; \
+#else
+#define __percpu_disguise(pdata) (struct percpu_data *)(pdata)
+#endif
+
+#define per_cpu_ptr(ptr, cpu) \
+({ \
+ struct percpu_data *__p = __percpu_disguise(ptr); \
+ (__typeof__(ptr))__p->ptrs[(cpu)]; \
})
-extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask);
-extern void percpu_free(void *__pdata);
+#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
+
+extern void *__alloc_percpu(size_t size, size_t align);
+extern void free_percpu(void *__pdata);
#else /* CONFIG_SMP */
-#define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
+#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
-static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask)
+static inline void *__alloc_percpu(size_t size, size_t align)
{
- return kzalloc(size, gfp);
+ /*
+ * Can't easily make larger alignment work with kmalloc. WARN
+ * on it. Larger alignment should only be used for module
+ * percpu sections on SMP for which this path isn't used.
+ */
+ WARN_ON_ONCE(align > SMP_CACHE_BYTES);
+ return kzalloc(size, GFP_KERNEL);
}
-static inline void percpu_free(void *__pdata)
+static inline void free_percpu(void *p)
{
- kfree(__pdata);
+ kfree(p);
}
#endif /* CONFIG_SMP */
-#define percpu_alloc_mask(size, gfp, mask) \
- __percpu_alloc_mask((size), (gfp), &(mask))
+#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \
+ __alignof__(type))
+
+/*
+ * Optional methods for optimized non-lvalue per-cpu variable access.
+ *
+ * @var can be a percpu variable or a field of it and its size should
+ * equal char, int or long. percpu_read() evaluates to a lvalue and
+ * all others to void.
+ *
+ * These operations are guaranteed to be atomic w.r.t. preemption.
+ * The generic versions use plain get/put_cpu_var(). Archs are
+ * encouraged to implement single-instruction alternatives which don't
+ * require preemption protection.
+ */
+#ifndef percpu_read
+# define percpu_read(var) \
+ ({ \
+ typeof(per_cpu_var(var)) __tmp_var__; \
+ __tmp_var__ = get_cpu_var(var); \
+ put_cpu_var(var); \
+ __tmp_var__; \
+ })
+#endif
+
+#define __percpu_generic_to_op(var, val, op) \
+do { \
+ get_cpu_var(var) op val; \
+ put_cpu_var(var); \
+} while (0)
+
+#ifndef percpu_write
+# define percpu_write(var, val) __percpu_generic_to_op(var, (val), =)
+#endif
+
+#ifndef percpu_add
+# define percpu_add(var, val) __percpu_generic_to_op(var, (val), +=)
+#endif
+
+#ifndef percpu_sub
+# define percpu_sub(var, val) __percpu_generic_to_op(var, (val), -=)
+#endif
-#define percpu_alloc(size, gfp) percpu_alloc_mask((size), (gfp), cpu_online_map)
+#ifndef percpu_and
+# define percpu_and(var, val) __percpu_generic_to_op(var, (val), &=)
+#endif
-/* (legacy) interface for use without CPU hotplug handling */
+#ifndef percpu_or
+# define percpu_or(var, val) __percpu_generic_to_op(var, (val), |=)
+#endif
-#define __alloc_percpu(size) percpu_alloc_mask((size), GFP_KERNEL, \
- cpu_possible_map)
-#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type))
-#define free_percpu(ptr) percpu_free((ptr))
-#define per_cpu_ptr(ptr, cpu) percpu_ptr((ptr), (cpu))
+#ifndef percpu_xor
+# define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=)
+#endif
#endif /* __LINUX_PERCPU_H */
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
new file mode 100644
index 00000000000..89698d8aba5
--- /dev/null
+++ b/include/linux/perf_counter.h
@@ -0,0 +1,728 @@
+/*
+ * Performance counters:
+ *
+ * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
+ * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar
+ * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra
+ *
+ * Data type definitions, declarations, prototypes.
+ *
+ * Started by: Thomas Gleixner and Ingo Molnar
+ *
+ * For licencing details see kernel-base/COPYING
+ */
+#ifndef _LINUX_PERF_COUNTER_H
+#define _LINUX_PERF_COUNTER_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <asm/byteorder.h>
+
+/*
+ * User-space ABI bits:
+ */
+
+/*
+ * attr.type
+ */
+enum perf_type_id {
+ PERF_TYPE_HARDWARE = 0,
+ PERF_TYPE_SOFTWARE = 1,
+ PERF_TYPE_TRACEPOINT = 2,
+ PERF_TYPE_HW_CACHE = 3,
+ PERF_TYPE_RAW = 4,
+
+ PERF_TYPE_MAX, /* non-ABI */
+};
+
+/*
+ * Generalized performance counter event types, used by the
+ * attr.event_id parameter of the sys_perf_counter_open()
+ * syscall:
+ */
+enum perf_hw_id {
+ /*
+ * Common hardware events, generalized by the kernel:
+ */
+ PERF_COUNT_HW_CPU_CYCLES = 0,
+ PERF_COUNT_HW_INSTRUCTIONS = 1,
+ PERF_COUNT_HW_CACHE_REFERENCES = 2,
+ PERF_COUNT_HW_CACHE_MISSES = 3,
+ PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
+ PERF_COUNT_HW_BRANCH_MISSES = 5,
+ PERF_COUNT_HW_BUS_CYCLES = 6,
+
+ PERF_COUNT_HW_MAX, /* non-ABI */
+};
+
+/*
+ * Generalized hardware cache counters:
+ *
+ * { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x
+ * { read, write, prefetch } x
+ * { accesses, misses }
+ */
+enum perf_hw_cache_id {
+ PERF_COUNT_HW_CACHE_L1D = 0,
+ PERF_COUNT_HW_CACHE_L1I = 1,
+ PERF_COUNT_HW_CACHE_LL = 2,
+ PERF_COUNT_HW_CACHE_DTLB = 3,
+ PERF_COUNT_HW_CACHE_ITLB = 4,
+ PERF_COUNT_HW_CACHE_BPU = 5,
+
+ PERF_COUNT_HW_CACHE_MAX, /* non-ABI */
+};
+
+enum perf_hw_cache_op_id {
+ PERF_COUNT_HW_CACHE_OP_READ = 0,
+ PERF_COUNT_HW_CACHE_OP_WRITE = 1,
+ PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
+
+ PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */
+};
+
+enum perf_hw_cache_op_result_id {
+ PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
+ PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
+
+ PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */
+};
+
+/*
+ * Special "software" counters provided by the kernel, even if the hardware
+ * does not support performance counters. These counters measure various
+ * physical and sw events of the kernel (and allow the profiling of them as
+ * well):
+ */
+enum perf_sw_ids {
+ PERF_COUNT_SW_CPU_CLOCK = 0,
+ PERF_COUNT_SW_TASK_CLOCK = 1,
+ PERF_COUNT_SW_PAGE_FAULTS = 2,
+ PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
+ PERF_COUNT_SW_CPU_MIGRATIONS = 4,
+ PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
+ PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
+
+ PERF_COUNT_SW_MAX, /* non-ABI */
+};
+
+/*
+ * Bits that can be set in attr.sample_type to request information
+ * in the overflow packets.
+ */
+enum perf_counter_sample_format {
+ PERF_SAMPLE_IP = 1U << 0,
+ PERF_SAMPLE_TID = 1U << 1,
+ PERF_SAMPLE_TIME = 1U << 2,
+ PERF_SAMPLE_ADDR = 1U << 3,
+ PERF_SAMPLE_GROUP = 1U << 4,
+ PERF_SAMPLE_CALLCHAIN = 1U << 5,
+ PERF_SAMPLE_ID = 1U << 6,
+ PERF_SAMPLE_CPU = 1U << 7,
+ PERF_SAMPLE_PERIOD = 1U << 8,
+
+ PERF_SAMPLE_MAX = 1U << 9, /* non-ABI */
+};
+
+/*
+ * Bits that can be set in attr.read_format to request that
+ * reads on the counter should return the indicated quantities,
+ * in increasing order of bit value, after the counter value.
+ */
+enum perf_counter_read_format {
+ PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
+ PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
+ PERF_FORMAT_ID = 1U << 2,
+
+ PERF_FORMAT_MAX = 1U << 3, /* non-ABI */
+};
+
+#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
+
+/*
+ * Hardware event to monitor via a performance monitoring counter:
+ */
+struct perf_counter_attr {
+
+ /*
+ * Major type: hardware/software/tracepoint/etc.
+ */
+ __u32 type;
+
+ /*
+ * Size of the attr structure, for fwd/bwd compat.
+ */
+ __u32 size;
+
+ /*
+ * Type specific configuration information.
+ */
+ __u64 config;
+
+ union {
+ __u64 sample_period;
+ __u64 sample_freq;
+ };
+
+ __u64 sample_type;
+ __u64 read_format;
+
+ __u64 disabled : 1, /* off by default */
+ inherit : 1, /* children inherit it */
+ pinned : 1, /* must always be on PMU */
+ exclusive : 1, /* only group on PMU */
+ exclude_user : 1, /* don't count user */
+ exclude_kernel : 1, /* ditto kernel */
+ exclude_hv : 1, /* ditto hypervisor */
+ exclude_idle : 1, /* don't count when idle */
+ mmap : 1, /* include mmap data */
+ comm : 1, /* include comm data */
+ freq : 1, /* use freq, not period */
+
+ __reserved_1 : 53;
+
+ __u32 wakeup_events; /* wakeup every n events */
+ __u32 __reserved_2;
+
+ __u64 __reserved_3;
+};
+
+/*
+ * Ioctls that can be done on a perf counter fd:
+ */
+#define PERF_COUNTER_IOC_ENABLE _IO ('$', 0)
+#define PERF_COUNTER_IOC_DISABLE _IO ('$', 1)
+#define PERF_COUNTER_IOC_REFRESH _IO ('$', 2)
+#define PERF_COUNTER_IOC_RESET _IO ('$', 3)
+#define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64)
+
+enum perf_counter_ioc_flags {
+ PERF_IOC_FLAG_GROUP = 1U << 0,
+};
+
+/*
+ * Structure of the page that can be mapped via mmap
+ */
+struct perf_counter_mmap_page {
+ __u32 version; /* version number of this structure */
+ __u32 compat_version; /* lowest version this is compat with */
+
+ /*
+ * Bits needed to read the hw counters in user-space.
+ *
+ * u32 seq;
+ * s64 count;
+ *
+ * do {
+ * seq = pc->lock;
+ *
+ * barrier()
+ * if (pc->index) {
+ * count = pmc_read(pc->index - 1);
+ * count += pc->offset;
+ * } else
+ * goto regular_read;
+ *
+ * barrier();
+ * } while (pc->lock != seq);
+ *
+ * NOTE: for obvious reason this only works on self-monitoring
+ * processes.
+ */
+ __u32 lock; /* seqlock for synchronization */
+ __u32 index; /* hardware counter identifier */
+ __s64 offset; /* add to hardware counter value */
+
+ /*
+ * Control data for the mmap() data buffer.
+ *
+ * User-space reading the @data_head value should issue an rmb(), on
+ * SMP capable platforms, after reading this value -- see
+ * perf_counter_wakeup().
+ *
+ * When the mapping is PROT_WRITE the @data_tail value should be
+ * written by userspace to reflect the last read data. In this case
+ * the kernel will not over-write unread data.
+ */
+ __u64 data_head; /* head in the data section */
+ __u64 data_tail; /* user-space written tail */
+};
+
+#define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0)
+#define PERF_EVENT_MISC_CPUMODE_UNKNOWN (0 << 0)
+#define PERF_EVENT_MISC_KERNEL (1 << 0)
+#define PERF_EVENT_MISC_USER (2 << 0)
+#define PERF_EVENT_MISC_HYPERVISOR (3 << 0)
+#define PERF_EVENT_MISC_OVERFLOW (1 << 2)
+
+struct perf_event_header {
+ __u32 type;
+ __u16 misc;
+ __u16 size;
+};
+
+enum perf_event_type {
+
+ /*
+ * The MMAP events record the PROT_EXEC mappings so that we can
+ * correlate userspace IPs to code. They have the following structure:
+ *
+ * struct {
+ * struct perf_event_header header;
+ *
+ * u32 pid, tid;
+ * u64 addr;
+ * u64 len;
+ * u64 pgoff;
+ * char filename[];
+ * };
+ */
+ PERF_EVENT_MMAP = 1,
+
+ /*
+ * struct {
+ * struct perf_event_header header;
+ * u64 id;
+ * u64 lost;
+ * };
+ */
+ PERF_EVENT_LOST = 2,
+
+ /*
+ * struct {
+ * struct perf_event_header header;
+ *
+ * u32 pid, tid;
+ * char comm[];
+ * };
+ */
+ PERF_EVENT_COMM = 3,
+
+ /*
+ * struct {
+ * struct perf_event_header header;
+ * u64 time;
+ * u64 id;
+ * u64 sample_period;
+ * };
+ */
+ PERF_EVENT_PERIOD = 4,
+
+ /*
+ * struct {
+ * struct perf_event_header header;
+ * u64 time;
+ * u64 id;
+ * };
+ */
+ PERF_EVENT_THROTTLE = 5,
+ PERF_EVENT_UNTHROTTLE = 6,
+
+ /*
+ * struct {
+ * struct perf_event_header header;
+ * u32 pid, ppid;
+ * };
+ */
+ PERF_EVENT_FORK = 7,
+
+ /*
+ * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field
+ * will be PERF_SAMPLE_*
+ *
+ * struct {
+ * struct perf_event_header header;
+ *
+ * { u64 ip; } && PERF_SAMPLE_IP
+ * { u32 pid, tid; } && PERF_SAMPLE_TID
+ * { u64 time; } && PERF_SAMPLE_TIME
+ * { u64 addr; } && PERF_SAMPLE_ADDR
+ * { u64 config; } && PERF_SAMPLE_CONFIG
+ * { u32 cpu, res; } && PERF_SAMPLE_CPU
+ *
+ * { u64 nr;
+ * { u64 id, val; } cnt[nr]; } && PERF_SAMPLE_GROUP
+ *
+ * { u64 nr,
+ * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
+ * };
+ */
+};
+
+enum perf_callchain_context {
+ PERF_CONTEXT_HV = (__u64)-32,
+ PERF_CONTEXT_KERNEL = (__u64)-128,
+ PERF_CONTEXT_USER = (__u64)-512,
+
+ PERF_CONTEXT_GUEST = (__u64)-2048,
+ PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
+ PERF_CONTEXT_GUEST_USER = (__u64)-2560,
+
+ PERF_CONTEXT_MAX = (__u64)-4095,
+};
+
+#ifdef __KERNEL__
+/*
+ * Kernel-internal data types and definitions:
+ */
+
+#ifdef CONFIG_PERF_COUNTERS
+# include <asm/perf_counter.h>
+#endif
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/spinlock.h>
+#include <linux/hrtimer.h>
+#include <linux/fs.h>
+#include <linux/pid_namespace.h>
+#include <asm/atomic.h>
+
+#define PERF_MAX_STACK_DEPTH 255
+
+struct perf_callchain_entry {
+ __u64 nr;
+ __u64 ip[PERF_MAX_STACK_DEPTH];
+};
+
+struct task_struct;
+
+/**
+ * struct hw_perf_counter - performance counter hardware details:
+ */
+struct hw_perf_counter {
+#ifdef CONFIG_PERF_COUNTERS
+ union {
+ struct { /* hardware */
+ u64 config;
+ unsigned long config_base;
+ unsigned long counter_base;
+ int idx;
+ };
+ union { /* software */
+ atomic64_t count;
+ struct hrtimer hrtimer;
+ };
+ };
+ atomic64_t prev_count;
+ u64 sample_period;
+ u64 last_period;
+ atomic64_t period_left;
+ u64 interrupts;
+
+ u64 freq_count;
+ u64 freq_interrupts;
+ u64 freq_stamp;
+#endif
+};
+
+struct perf_counter;
+
+/**
+ * struct pmu - generic performance monitoring unit
+ */
+struct pmu {
+ int (*enable) (struct perf_counter *counter);
+ void (*disable) (struct perf_counter *counter);
+ void (*read) (struct perf_counter *counter);
+ void (*unthrottle) (struct perf_counter *counter);
+};
+
+/**
+ * enum perf_counter_active_state - the states of a counter
+ */
+enum perf_counter_active_state {
+ PERF_COUNTER_STATE_ERROR = -2,
+ PERF_COUNTER_STATE_OFF = -1,
+ PERF_COUNTER_STATE_INACTIVE = 0,
+ PERF_COUNTER_STATE_ACTIVE = 1,
+};
+
+struct file;
+
+struct perf_mmap_data {
+ struct rcu_head rcu_head;
+ int nr_pages; /* nr of data pages */
+ int writable; /* are we writable */
+ int nr_locked; /* nr pages mlocked */
+
+ atomic_t poll; /* POLL_ for wakeups */
+ atomic_t events; /* event limit */
+
+ atomic_long_t head; /* write position */
+ atomic_long_t done_head; /* completed head */
+
+ atomic_t lock; /* concurrent writes */
+ atomic_t wakeup; /* needs a wakeup */
+ atomic_t lost; /* nr records lost */
+
+ struct perf_counter_mmap_page *user_page;
+ void *data_pages[0];
+};
+
+struct perf_pending_entry {
+ struct perf_pending_entry *next;
+ void (*func)(struct perf_pending_entry *);
+};
+
+/**
+ * struct perf_counter - performance counter kernel representation:
+ */
+struct perf_counter {
+#ifdef CONFIG_PERF_COUNTERS
+ struct list_head list_entry;
+ struct list_head event_entry;
+ struct list_head sibling_list;
+ int nr_siblings;
+ struct perf_counter *group_leader;
+ const struct pmu *pmu;
+
+ enum perf_counter_active_state state;
+ atomic64_t count;
+
+ /*
+ * These are the total time in nanoseconds that the counter
+ * has been enabled (i.e. eligible to run, and the task has
+ * been scheduled in, if this is a per-task counter)
+ * and running (scheduled onto the CPU), respectively.
+ *
+ * They are computed from tstamp_enabled, tstamp_running and
+ * tstamp_stopped when the counter is in INACTIVE or ACTIVE state.
+ */
+ u64 total_time_enabled;
+ u64 total_time_running;
+
+ /*
+ * These are timestamps used for computing total_time_enabled
+ * and total_time_running when the counter is in INACTIVE or
+ * ACTIVE state, measured in nanoseconds from an arbitrary point
+ * in time.
+ * tstamp_enabled: the notional time when the counter was enabled
+ * tstamp_running: the notional time when the counter was scheduled on
+ * tstamp_stopped: in INACTIVE state, the notional time when the
+ * counter was scheduled off.
+ */
+ u64 tstamp_enabled;
+ u64 tstamp_running;
+ u64 tstamp_stopped;
+
+ struct perf_counter_attr attr;
+ struct hw_perf_counter hw;
+
+ struct perf_counter_context *ctx;
+ struct file *filp;
+
+ /*
+ * These accumulate total time (in nanoseconds) that children
+ * counters have been enabled and running, respectively.
+ */
+ atomic64_t child_total_time_enabled;
+ atomic64_t child_total_time_running;
+
+ /*
+ * Protect attach/detach and child_list:
+ */
+ struct mutex child_mutex;
+ struct list_head child_list;
+ struct perf_counter *parent;
+
+ int oncpu;
+ int cpu;
+
+ struct list_head owner_entry;
+ struct task_struct *owner;
+
+ /* mmap bits */
+ struct mutex mmap_mutex;
+ atomic_t mmap_count;
+ struct perf_mmap_data *data;
+
+ /* poll related */
+ wait_queue_head_t waitq;
+ struct fasync_struct *fasync;
+
+ /* delayed work for NMIs and such */
+ int pending_wakeup;
+ int pending_kill;
+ int pending_disable;
+ struct perf_pending_entry pending;
+
+ atomic_t event_limit;
+
+ void (*destroy)(struct perf_counter *);
+ struct rcu_head rcu_head;
+
+ struct pid_namespace *ns;
+ u64 id;
+#endif
+};
+
+/**
+ * struct perf_counter_context - counter context structure
+ *
+ * Used as a container for task counters and CPU counters as well:
+ */
+struct perf_counter_context {
+ /*
+ * Protect the states of the counters in the list,
+ * nr_active, and the list:
+ */
+ spinlock_t lock;
+ /*
+ * Protect the list of counters. Locking either mutex or lock
+ * is sufficient to ensure the list doesn't change; to change
+ * the list you need to lock both the mutex and the spinlock.
+ */
+ struct mutex mutex;
+
+ struct list_head counter_list;
+ struct list_head event_list;
+ int nr_counters;
+ int nr_active;
+ int is_active;
+ atomic_t refcount;
+ struct task_struct *task;
+
+ /*
+ * Context clock, runs when context enabled.
+ */
+ u64 time;
+ u64 timestamp;
+
+ /*
+ * These fields let us detect when two contexts have both
+ * been cloned (inherited) from a common ancestor.
+ */
+ struct perf_counter_context *parent_ctx;
+ u64 parent_gen;
+ u64 generation;
+ int pin_count;
+ struct rcu_head rcu_head;
+};
+
+/**
+ * struct perf_counter_cpu_context - per cpu counter context structure
+ */
+struct perf_cpu_context {
+ struct perf_counter_context ctx;
+ struct perf_counter_context *task_ctx;
+ int active_oncpu;
+ int max_pertask;
+ int exclusive;
+
+ /*
+ * Recursion avoidance:
+ *
+ * task, softirq, irq, nmi context
+ */
+ int recursion[4];
+};
+
+#ifdef CONFIG_PERF_COUNTERS
+
+/*
+ * Set by architecture code:
+ */
+extern int perf_max_counters;
+
+extern const struct pmu *hw_perf_counter_init(struct perf_counter *counter);
+
+extern void perf_counter_task_sched_in(struct task_struct *task, int cpu);
+extern void perf_counter_task_sched_out(struct task_struct *task,
+ struct task_struct *next, int cpu);
+extern void perf_counter_task_tick(struct task_struct *task, int cpu);
+extern int perf_counter_init_task(struct task_struct *child);
+extern void perf_counter_exit_task(struct task_struct *child);
+extern void perf_counter_free_task(struct task_struct *task);
+extern void set_perf_counter_pending(void);
+extern void perf_counter_do_pending(void);
+extern void perf_counter_print_debug(void);
+extern void __perf_disable(void);
+extern bool __perf_enable(void);
+extern void perf_disable(void);
+extern void perf_enable(void);
+extern int perf_counter_task_disable(void);
+extern int perf_counter_task_enable(void);
+extern int hw_perf_group_sched_in(struct perf_counter *group_leader,
+ struct perf_cpu_context *cpuctx,
+ struct perf_counter_context *ctx, int cpu);
+extern void perf_counter_update_userpage(struct perf_counter *counter);
+
+struct perf_sample_data {
+ struct pt_regs *regs;
+ u64 addr;
+ u64 period;
+};
+
+extern int perf_counter_overflow(struct perf_counter *counter, int nmi,
+ struct perf_sample_data *data);
+
+/*
+ * Return 1 for a software counter, 0 for a hardware counter
+ */
+static inline int is_software_counter(struct perf_counter *counter)
+{
+ return (counter->attr.type != PERF_TYPE_RAW) &&
+ (counter->attr.type != PERF_TYPE_HARDWARE) &&
+ (counter->attr.type != PERF_TYPE_HW_CACHE);
+}
+
+extern void perf_swcounter_event(u32, u64, int, struct pt_regs *, u64);
+
+extern void __perf_counter_mmap(struct vm_area_struct *vma);
+
+static inline void perf_counter_mmap(struct vm_area_struct *vma)
+{
+ if (vma->vm_flags & VM_EXEC)
+ __perf_counter_mmap(vma);
+}
+
+extern void perf_counter_comm(struct task_struct *tsk);
+extern void perf_counter_fork(struct task_struct *tsk);
+
+extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
+
+extern int sysctl_perf_counter_paranoid;
+extern int sysctl_perf_counter_mlock;
+extern int sysctl_perf_counter_sample_rate;
+
+extern void perf_counter_init(void);
+
+#ifndef perf_misc_flags
+#define perf_misc_flags(regs) (user_mode(regs) ? PERF_EVENT_MISC_USER : \
+ PERF_EVENT_MISC_KERNEL)
+#define perf_instruction_pointer(regs) instruction_pointer(regs)
+#endif
+
+#else
+static inline void
+perf_counter_task_sched_in(struct task_struct *task, int cpu) { }
+static inline void
+perf_counter_task_sched_out(struct task_struct *task,
+ struct task_struct *next, int cpu) { }
+static inline void
+perf_counter_task_tick(struct task_struct *task, int cpu) { }
+static inline int perf_counter_init_task(struct task_struct *child) { return 0; }
+static inline void perf_counter_exit_task(struct task_struct *child) { }
+static inline void perf_counter_free_task(struct task_struct *task) { }
+static inline void perf_counter_do_pending(void) { }
+static inline void perf_counter_print_debug(void) { }
+static inline void perf_disable(void) { }
+static inline void perf_enable(void) { }
+static inline int perf_counter_task_disable(void) { return -EINVAL; }
+static inline int perf_counter_task_enable(void) { return -EINVAL; }
+
+static inline void
+perf_swcounter_event(u32 event, u64 nr, int nmi,
+ struct pt_regs *regs, u64 addr) { }
+
+static inline void perf_counter_mmap(struct vm_area_struct *vma) { }
+static inline void perf_counter_comm(struct task_struct *tsk) { }
+static inline void perf_counter_fork(struct task_struct *tsk) { }
+static inline void perf_counter_init(void) { }
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _LINUX_PERF_COUNTER_H */
diff --git a/include/linux/pfkeyv2.h b/include/linux/pfkeyv2.h
index 01b262959f2..228b0b6306b 100644
--- a/include/linux/pfkeyv2.h
+++ b/include/linux/pfkeyv2.h
@@ -12,187 +12,187 @@
#define PFKEYV2_REVISION 199806L
struct sadb_msg {
- uint8_t sadb_msg_version;
- uint8_t sadb_msg_type;
- uint8_t sadb_msg_errno;
- uint8_t sadb_msg_satype;
- uint16_t sadb_msg_len;
- uint16_t sadb_msg_reserved;
- uint32_t sadb_msg_seq;
- uint32_t sadb_msg_pid;
+ __u8 sadb_msg_version;
+ __u8 sadb_msg_type;
+ __u8 sadb_msg_errno;
+ __u8 sadb_msg_satype;
+ __u16 sadb_msg_len;
+ __u16 sadb_msg_reserved;
+ __u32 sadb_msg_seq;
+ __u32 sadb_msg_pid;
} __attribute__((packed));
/* sizeof(struct sadb_msg) == 16 */
struct sadb_ext {
- uint16_t sadb_ext_len;
- uint16_t sadb_ext_type;
+ __u16 sadb_ext_len;
+ __u16 sadb_ext_type;
} __attribute__((packed));
/* sizeof(struct sadb_ext) == 4 */
struct sadb_sa {
- uint16_t sadb_sa_len;
- uint16_t sadb_sa_exttype;
+ __u16 sadb_sa_len;
+ __u16 sadb_sa_exttype;
__be32 sadb_sa_spi;
- uint8_t sadb_sa_replay;
- uint8_t sadb_sa_state;
- uint8_t sadb_sa_auth;
- uint8_t sadb_sa_encrypt;
- uint32_t sadb_sa_flags;
+ __u8 sadb_sa_replay;
+ __u8 sadb_sa_state;
+ __u8 sadb_sa_auth;
+ __u8 sadb_sa_encrypt;
+ __u32 sadb_sa_flags;
} __attribute__((packed));
/* sizeof(struct sadb_sa) == 16 */
struct sadb_lifetime {
- uint16_t sadb_lifetime_len;
- uint16_t sadb_lifetime_exttype;
- uint32_t sadb_lifetime_allocations;
- uint64_t sadb_lifetime_bytes;
- uint64_t sadb_lifetime_addtime;
- uint64_t sadb_lifetime_usetime;
+ __u16 sadb_lifetime_len;
+ __u16 sadb_lifetime_exttype;
+ __u32 sadb_lifetime_allocations;
+ __u64 sadb_lifetime_bytes;
+ __u64 sadb_lifetime_addtime;
+ __u64 sadb_lifetime_usetime;
} __attribute__((packed));
/* sizeof(struct sadb_lifetime) == 32 */
struct sadb_address {
- uint16_t sadb_address_len;
- uint16_t sadb_address_exttype;
- uint8_t sadb_address_proto;
- uint8_t sadb_address_prefixlen;
- uint16_t sadb_address_reserved;
+ __u16 sadb_address_len;
+ __u16 sadb_address_exttype;
+ __u8 sadb_address_proto;
+ __u8 sadb_address_prefixlen;
+ __u16 sadb_address_reserved;
} __attribute__((packed));
/* sizeof(struct sadb_address) == 8 */
struct sadb_key {
- uint16_t sadb_key_len;
- uint16_t sadb_key_exttype;
- uint16_t sadb_key_bits;
- uint16_t sadb_key_reserved;
+ __u16 sadb_key_len;
+ __u16 sadb_key_exttype;
+ __u16 sadb_key_bits;
+ __u16 sadb_key_reserved;
} __attribute__((packed));
/* sizeof(struct sadb_key) == 8 */
struct sadb_ident {
- uint16_t sadb_ident_len;
- uint16_t sadb_ident_exttype;
- uint16_t sadb_ident_type;
- uint16_t sadb_ident_reserved;
- uint64_t sadb_ident_id;
+ __u16 sadb_ident_len;
+ __u16 sadb_ident_exttype;
+ __u16 sadb_ident_type;
+ __u16 sadb_ident_reserved;
+ __u64 sadb_ident_id;
} __attribute__((packed));
/* sizeof(struct sadb_ident) == 16 */
struct sadb_sens {
- uint16_t sadb_sens_len;
- uint16_t sadb_sens_exttype;
- uint32_t sadb_sens_dpd;
- uint8_t sadb_sens_sens_level;
- uint8_t sadb_sens_sens_len;
- uint8_t sadb_sens_integ_level;
- uint8_t sadb_sens_integ_len;
- uint32_t sadb_sens_reserved;
+ __u16 sadb_sens_len;
+ __u16 sadb_sens_exttype;
+ __u32 sadb_sens_dpd;
+ __u8 sadb_sens_sens_level;
+ __u8 sadb_sens_sens_len;
+ __u8 sadb_sens_integ_level;
+ __u8 sadb_sens_integ_len;
+ __u32 sadb_sens_reserved;
} __attribute__((packed));
/* sizeof(struct sadb_sens) == 16 */
/* followed by:
- uint64_t sadb_sens_bitmap[sens_len];
- uint64_t sadb_integ_bitmap[integ_len]; */
+ __u64 sadb_sens_bitmap[sens_len];
+ __u64 sadb_integ_bitmap[integ_len]; */
struct sadb_prop {
- uint16_t sadb_prop_len;
- uint16_t sadb_prop_exttype;
- uint8_t sadb_prop_replay;
- uint8_t sadb_prop_reserved[3];
+ __u16 sadb_prop_len;
+ __u16 sadb_prop_exttype;
+ __u8 sadb_prop_replay;
+ __u8 sadb_prop_reserved[3];
} __attribute__((packed));
/* sizeof(struct sadb_prop) == 8 */
/* followed by:
struct sadb_comb sadb_combs[(sadb_prop_len +
- sizeof(uint64_t) - sizeof(struct sadb_prop)) /
+ sizeof(__u64) - sizeof(struct sadb_prop)) /
sizeof(struct sadb_comb)]; */
struct sadb_comb {
- uint8_t sadb_comb_auth;
- uint8_t sadb_comb_encrypt;
- uint16_t sadb_comb_flags;
- uint16_t sadb_comb_auth_minbits;
- uint16_t sadb_comb_auth_maxbits;
- uint16_t sadb_comb_encrypt_minbits;
- uint16_t sadb_comb_encrypt_maxbits;
- uint32_t sadb_comb_reserved;
- uint32_t sadb_comb_soft_allocations;
- uint32_t sadb_comb_hard_allocations;
- uint64_t sadb_comb_soft_bytes;
- uint64_t sadb_comb_hard_bytes;
- uint64_t sadb_comb_soft_addtime;
- uint64_t sadb_comb_hard_addtime;
- uint64_t sadb_comb_soft_usetime;
- uint64_t sadb_comb_hard_usetime;
+ __u8 sadb_comb_auth;
+ __u8 sadb_comb_encrypt;
+ __u16 sadb_comb_flags;
+ __u16 sadb_comb_auth_minbits;
+ __u16 sadb_comb_auth_maxbits;
+ __u16 sadb_comb_encrypt_minbits;
+ __u16 sadb_comb_encrypt_maxbits;
+ __u32 sadb_comb_reserved;
+ __u32 sadb_comb_soft_allocations;
+ __u32 sadb_comb_hard_allocations;
+ __u64 sadb_comb_soft_bytes;
+ __u64 sadb_comb_hard_bytes;
+ __u64 sadb_comb_soft_addtime;
+ __u64 sadb_comb_hard_addtime;
+ __u64 sadb_comb_soft_usetime;
+ __u64 sadb_comb_hard_usetime;
} __attribute__((packed));
/* sizeof(struct sadb_comb) == 72 */
struct sadb_supported {
- uint16_t sadb_supported_len;
- uint16_t sadb_supported_exttype;
- uint32_t sadb_supported_reserved;
+ __u16 sadb_supported_len;
+ __u16 sadb_supported_exttype;
+ __u32 sadb_supported_reserved;
} __attribute__((packed));
/* sizeof(struct sadb_supported) == 8 */
/* followed by:
struct sadb_alg sadb_algs[(sadb_supported_len +
- sizeof(uint64_t) - sizeof(struct sadb_supported)) /
+ sizeof(__u64) - sizeof(struct sadb_supported)) /
sizeof(struct sadb_alg)]; */
struct sadb_alg {
- uint8_t sadb_alg_id;
- uint8_t sadb_alg_ivlen;
- uint16_t sadb_alg_minbits;
- uint16_t sadb_alg_maxbits;
- uint16_t sadb_alg_reserved;
+ __u8 sadb_alg_id;
+ __u8 sadb_alg_ivlen;
+ __u16 sadb_alg_minbits;
+ __u16 sadb_alg_maxbits;
+ __u16 sadb_alg_reserved;
} __attribute__((packed));
/* sizeof(struct sadb_alg) == 8 */
struct sadb_spirange {
- uint16_t sadb_spirange_len;
- uint16_t sadb_spirange_exttype;
- uint32_t sadb_spirange_min;
- uint32_t sadb_spirange_max;
- uint32_t sadb_spirange_reserved;
+ __u16 sadb_spirange_len;
+ __u16 sadb_spirange_exttype;
+ __u32 sadb_spirange_min;
+ __u32 sadb_spirange_max;
+ __u32 sadb_spirange_reserved;
} __attribute__((packed));
/* sizeof(struct sadb_spirange) == 16 */
struct sadb_x_kmprivate {
- uint16_t sadb_x_kmprivate_len;
- uint16_t sadb_x_kmprivate_exttype;
- uint32_t sadb_x_kmprivate_reserved;
+ __u16 sadb_x_kmprivate_len;
+ __u16 sadb_x_kmprivate_exttype;
+ __u32 sadb_x_kmprivate_reserved;
} __attribute__((packed));
/* sizeof(struct sadb_x_kmprivate) == 8 */
struct sadb_x_sa2 {
- uint16_t sadb_x_sa2_len;
- uint16_t sadb_x_sa2_exttype;
- uint8_t sadb_x_sa2_mode;
- uint8_t sadb_x_sa2_reserved1;
- uint16_t sadb_x_sa2_reserved2;
- uint32_t sadb_x_sa2_sequence;
- uint32_t sadb_x_sa2_reqid;
+ __u16 sadb_x_sa2_len;
+ __u16 sadb_x_sa2_exttype;
+ __u8 sadb_x_sa2_mode;
+ __u8 sadb_x_sa2_reserved1;
+ __u16 sadb_x_sa2_reserved2;
+ __u32 sadb_x_sa2_sequence;
+ __u32 sadb_x_sa2_reqid;
} __attribute__((packed));
/* sizeof(struct sadb_x_sa2) == 16 */
struct sadb_x_policy {
- uint16_t sadb_x_policy_len;
- uint16_t sadb_x_policy_exttype;
- uint16_t sadb_x_policy_type;
- uint8_t sadb_x_policy_dir;
- uint8_t sadb_x_policy_reserved;
- uint32_t sadb_x_policy_id;
- uint32_t sadb_x_policy_priority;
+ __u16 sadb_x_policy_len;
+ __u16 sadb_x_policy_exttype;
+ __u16 sadb_x_policy_type;
+ __u8 sadb_x_policy_dir;
+ __u8 sadb_x_policy_reserved;
+ __u32 sadb_x_policy_id;
+ __u32 sadb_x_policy_priority;
} __attribute__((packed));
/* sizeof(struct sadb_x_policy) == 16 */
struct sadb_x_ipsecrequest {
- uint16_t sadb_x_ipsecrequest_len;
- uint16_t sadb_x_ipsecrequest_proto;
- uint8_t sadb_x_ipsecrequest_mode;
- uint8_t sadb_x_ipsecrequest_level;
- uint16_t sadb_x_ipsecrequest_reserved1;
- uint32_t sadb_x_ipsecrequest_reqid;
- uint32_t sadb_x_ipsecrequest_reserved2;
+ __u16 sadb_x_ipsecrequest_len;
+ __u16 sadb_x_ipsecrequest_proto;
+ __u8 sadb_x_ipsecrequest_mode;
+ __u8 sadb_x_ipsecrequest_level;
+ __u16 sadb_x_ipsecrequest_reserved1;
+ __u32 sadb_x_ipsecrequest_reqid;
+ __u32 sadb_x_ipsecrequest_reserved2;
} __attribute__((packed));
/* sizeof(struct sadb_x_ipsecrequest) == 16 */
@@ -200,38 +200,38 @@ struct sadb_x_ipsecrequest {
* type of NAT-T is supported, draft-ietf-ipsec-udp-encaps-06
*/
struct sadb_x_nat_t_type {
- uint16_t sadb_x_nat_t_type_len;
- uint16_t sadb_x_nat_t_type_exttype;
- uint8_t sadb_x_nat_t_type_type;
- uint8_t sadb_x_nat_t_type_reserved[3];
+ __u16 sadb_x_nat_t_type_len;
+ __u16 sadb_x_nat_t_type_exttype;
+ __u8 sadb_x_nat_t_type_type;
+ __u8 sadb_x_nat_t_type_reserved[3];
} __attribute__((packed));
/* sizeof(struct sadb_x_nat_t_type) == 8 */
/* Pass a NAT Traversal port (Source or Dest port) */
struct sadb_x_nat_t_port {
- uint16_t sadb_x_nat_t_port_len;
- uint16_t sadb_x_nat_t_port_exttype;
+ __u16 sadb_x_nat_t_port_len;
+ __u16 sadb_x_nat_t_port_exttype;
__be16 sadb_x_nat_t_port_port;
- uint16_t sadb_x_nat_t_port_reserved;
+ __u16 sadb_x_nat_t_port_reserved;
} __attribute__((packed));
/* sizeof(struct sadb_x_nat_t_port) == 8 */
/* Generic LSM security context */
struct sadb_x_sec_ctx {
- uint16_t sadb_x_sec_len;
- uint16_t sadb_x_sec_exttype;
- uint8_t sadb_x_ctx_alg; /* LSMs: e.g., selinux == 1 */
- uint8_t sadb_x_ctx_doi;
- uint16_t sadb_x_ctx_len;
+ __u16 sadb_x_sec_len;
+ __u16 sadb_x_sec_exttype;
+ __u8 sadb_x_ctx_alg; /* LSMs: e.g., selinux == 1 */
+ __u8 sadb_x_ctx_doi;
+ __u16 sadb_x_ctx_len;
} __attribute__((packed));
/* sizeof(struct sadb_sec_ctx) = 8 */
/* Used by MIGRATE to pass addresses IKE will use to perform
* negotiation with the peer */
struct sadb_x_kmaddress {
- uint16_t sadb_x_kmaddress_len;
- uint16_t sadb_x_kmaddress_exttype;
- uint32_t sadb_x_kmaddress_reserved;
+ __u16 sadb_x_kmaddress_len;
+ __u16 sadb_x_kmaddress_exttype;
+ __u32 sadb_x_kmaddress_reserved;
} __attribute__((packed));
/* sizeof(struct sadb_x_kmaddress) == 8 */
diff --git a/include/linux/phy.h b/include/linux/phy.h
index d7e54d98869..b1368b8f657 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -79,7 +79,7 @@ typedef enum {
* Need to be a little smaller than phydev->dev.bus_id to leave room
* for the ":%02x"
*/
-#define MII_BUS_ID_SIZE (BUS_ID_SIZE - 3)
+#define MII_BUS_ID_SIZE (20 - 3)
/*
* The Bus class for PHYs. Devices which provide access to
@@ -315,8 +315,7 @@ struct phy_device {
/* Interrupt and Polling infrastructure */
struct work_struct phy_queue;
- struct work_struct state_queue;
- struct timer_list phy_timer;
+ struct delayed_work state_queue;
atomic_t irq_disable;
struct mutex lock;
@@ -389,6 +388,12 @@ struct phy_driver {
/* Enables or disables interrupts */
int (*config_intr)(struct phy_device *phydev);
+ /*
+ * Checks if the PHY generated an interrupt.
+ * For multi-PHY devices with shared PHY interrupt pin
+ */
+ int (*did_interrupt)(struct phy_device *phydev);
+
/* Clears up any memory if needed */
void (*remove)(struct phy_device *phydev);
@@ -402,7 +407,7 @@ struct phy_driver {
/* A Structure for boards to register fixups with the PHY Lib */
struct phy_fixup {
struct list_head list;
- char bus_id[BUS_ID_SIZE];
+ char bus_id[20];
u32 phy_uid;
u32 phy_uid_mask;
int (*run)(struct phy_device *phydev);
@@ -439,10 +444,16 @@ static inline int phy_write(struct phy_device *phydev, u16 regnum, u16 val)
int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id);
struct phy_device* get_phy_device(struct mii_bus *bus, int addr);
+int phy_device_register(struct phy_device *phy);
int phy_clear_interrupt(struct phy_device *phydev);
int phy_config_interrupt(struct phy_device *phydev, u32 interrupts);
+int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
+ u32 flags, phy_interface_t interface);
struct phy_device * phy_attach(struct net_device *dev,
const char *bus_id, u32 flags, phy_interface_t interface);
+int phy_connect_direct(struct net_device *dev, struct phy_device *phydev,
+ void (*handler)(struct net_device *), u32 flags,
+ phy_interface_t interface);
struct phy_device * phy_connect(struct net_device *dev, const char *bus_id,
void (*handler)(struct net_device *), u32 flags,
phy_interface_t interface);
diff --git a/include/linux/pim.h b/include/linux/pim.h
index 1ba0661561a..252bf6644c5 100644
--- a/include/linux/pim.h
+++ b/include/linux/pim.h
@@ -4,14 +4,14 @@
#include <asm/byteorder.h>
/* Message types - V1 */
-#define PIM_V1_VERSION __constant_htonl(0x10000000)
+#define PIM_V1_VERSION cpu_to_be32(0x10000000)
#define PIM_V1_REGISTER 1
/* Message types - V2 */
#define PIM_VERSION 2
#define PIM_REGISTER 1
-#define PIM_NULL_REGISTER __constant_htonl(0x40000000)
+#define PIM_NULL_REGISTER cpu_to_be32(0x40000000)
/* PIMv2 register message header layout (ietf-draft-idmr-pimvsm-v2-00.ps */
struct pimreghdr
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 8e4120285f7..b43a9e03905 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -134,6 +134,11 @@ struct pipe_buf_operations {
memory allocation, whereas PIPE_BUF makes atomicity guarantees. */
#define PIPE_SIZE PAGE_SIZE
+/* Pipe lock and unlock operations */
+void pipe_lock(struct pipe_inode_info *);
+void pipe_unlock(struct pipe_inode_info *);
+void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *);
+
/* Drop the inode semaphore and wait for a pipe event, atomically */
void pipe_wait(struct pipe_inode_info *pipe);
@@ -147,5 +152,6 @@ void generic_pipe_buf_unmap(struct pipe_inode_info *, struct pipe_buffer *, void
void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
+void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
#endif
diff --git a/include/linux/pktcdvd.h b/include/linux/pktcdvd.h
index 04b4d7330e6..d745f5b6c7b 100644
--- a/include/linux/pktcdvd.h
+++ b/include/linux/pktcdvd.h
@@ -113,6 +113,7 @@ struct pkt_ctrl_command {
#include <linux/cdrom.h>
#include <linux/kobject.h>
#include <linux/sysfs.h>
+#include <linux/mempool.h>
/* default bio write queue congestion marks */
#define PKT_WRITE_CONGESTION_ON 10000
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 9a342699c60..8dc5123b630 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -12,6 +12,7 @@
#define _PLATFORM_DEVICE_H_
#include <linux/device.h>
+#include <linux/mod_devicetable.h>
struct platform_device {
const char * name;
@@ -19,8 +20,12 @@ struct platform_device {
struct device dev;
u32 num_resources;
struct resource * resource;
+
+ struct platform_device_id *id_entry;
};
+#define platform_get_device_id(pdev) ((pdev)->id_entry)
+
#define to_platform_device(x) container_of((x), struct platform_device, dev)
extern int platform_device_register(struct platform_device *);
@@ -31,8 +36,8 @@ extern struct device platform_bus;
extern struct resource *platform_get_resource(struct platform_device *, unsigned int, unsigned int);
extern int platform_get_irq(struct platform_device *, unsigned int);
-extern struct resource *platform_get_resource_byname(struct platform_device *, unsigned int, char *);
-extern int platform_get_irq_byname(struct platform_device *, char *);
+extern struct resource *platform_get_resource_byname(struct platform_device *, unsigned int, const char *);
+extern int platform_get_irq_byname(struct platform_device *, const char *);
extern int platform_add_devices(struct platform_device **, int);
extern struct platform_device *platform_device_register_simple(const char *, int id,
@@ -56,6 +61,7 @@ struct platform_driver {
int (*resume_early)(struct platform_device *);
int (*resume)(struct platform_device *);
struct device_driver driver;
+ struct platform_device_id *id_table;
};
extern int platform_driver_register(struct platform_driver *);
@@ -70,4 +76,46 @@ extern int platform_driver_probe(struct platform_driver *driver,
#define platform_get_drvdata(_dev) dev_get_drvdata(&(_dev)->dev)
#define platform_set_drvdata(_dev,data) dev_set_drvdata(&(_dev)->dev, (data))
+/* early platform driver interface */
+struct early_platform_driver {
+ const char *class_str;
+ struct platform_driver *pdrv;
+ struct list_head list;
+ int requested_id;
+};
+
+#define EARLY_PLATFORM_ID_UNSET -2
+#define EARLY_PLATFORM_ID_ERROR -3
+
+extern int early_platform_driver_register(struct early_platform_driver *epdrv,
+ char *buf);
+extern void early_platform_add_devices(struct platform_device **devs, int num);
+
+static inline int is_early_platform_device(struct platform_device *pdev)
+{
+ return !pdev->dev.driver;
+}
+
+extern void early_platform_driver_register_all(char *class_str);
+extern int early_platform_driver_probe(char *class_str,
+ int nr_probe, int user_only);
+extern void early_platform_cleanup(void);
+
+
+#ifndef MODULE
+#define early_platform_init(class_string, platform_driver) \
+static __initdata struct early_platform_driver early_driver = { \
+ .class_str = class_string, \
+ .pdrv = platform_driver, \
+ .requested_id = EARLY_PLATFORM_ID_UNSET, \
+}; \
+static int __init early_platform_driver_setup_func(char *buf) \
+{ \
+ return early_platform_driver_register(&early_driver, buf); \
+} \
+early_param(class_string, early_platform_driver_setup_func)
+#else /* MODULE */
+#define early_platform_init(class_string, platform_driver)
+#endif /* MODULE */
+
#endif /* _PLATFORM_DEVICE_H_ */
diff --git a/include/linux/plist.h b/include/linux/plist.h
index 85de2f05587..45926d77d6a 100644
--- a/include/linux/plist.h
+++ b/include/linux/plist.h
@@ -96,6 +96,10 @@ struct plist_node {
# define PLIST_HEAD_LOCK_INIT(_lock)
#endif
+#define _PLIST_HEAD_INIT(head) \
+ .prio_list = LIST_HEAD_INIT((head).prio_list), \
+ .node_list = LIST_HEAD_INIT((head).node_list)
+
/**
* PLIST_HEAD_INIT - static struct plist_head initializer
* @head: struct plist_head variable name
@@ -103,8 +107,7 @@ struct plist_node {
*/
#define PLIST_HEAD_INIT(head, _lock) \
{ \
- .prio_list = LIST_HEAD_INIT((head).prio_list), \
- .node_list = LIST_HEAD_INIT((head).node_list), \
+ _PLIST_HEAD_INIT(head), \
PLIST_HEAD_LOCK_INIT(&(_lock)) \
}
@@ -116,7 +119,7 @@ struct plist_node {
#define PLIST_NODE_INIT(node, __prio) \
{ \
.prio = (__prio), \
- .plist = PLIST_HEAD_INIT((node).plist, NULL), \
+ .plist = { _PLIST_HEAD_INIT((node).plist) }, \
}
/**
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 24ba5f67b3a..b3f74764a58 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -382,14 +382,13 @@ struct dev_pm_info {
#ifdef CONFIG_PM_SLEEP
extern void device_pm_lock(void);
extern int sysdev_resume(void);
-extern void device_power_up(pm_message_t state);
-extern void device_resume(pm_message_t state);
+extern void dpm_resume_noirq(pm_message_t state);
+extern void dpm_resume_end(pm_message_t state);
extern void device_pm_unlock(void);
extern int sysdev_suspend(pm_message_t state);
-extern int device_power_down(pm_message_t state);
-extern int device_suspend(pm_message_t state);
-extern int device_prepare_suspend(pm_message_t state);
+extern int dpm_suspend_noirq(pm_message_t state);
+extern int dpm_suspend_start(pm_message_t state);
extern void __suspend_report_result(const char *function, void *fn, int ret);
@@ -400,7 +399,10 @@ extern void __suspend_report_result(const char *function, void *fn, int ret);
#else /* !CONFIG_PM_SLEEP */
-static inline int device_suspend(pm_message_t state)
+#define device_pm_lock() do {} while (0)
+#define device_pm_unlock() do {} while (0)
+
+static inline int dpm_suspend_start(pm_message_t state)
{
return 0;
}
@@ -409,6 +411,14 @@ static inline int device_suspend(pm_message_t state)
#endif /* !CONFIG_PM_SLEEP */
+/* How to reorder dpm_list after device_move() */
+enum dpm_order {
+ DPM_ORDER_NONE,
+ DPM_ORDER_DEV_AFTER_PARENT,
+ DPM_ORDER_PARENT_BEFORE_DEV,
+ DPM_ORDER_DEV_LAST,
+};
+
/*
* Global Power Management flags
* Used to keep APM and ACPI from both being active
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index ca3c8877302..b063c7328ba 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -446,6 +446,7 @@ int pnp_start_dev(struct pnp_dev *dev);
int pnp_stop_dev(struct pnp_dev *dev);
int pnp_activate_dev(struct pnp_dev *dev);
int pnp_disable_dev(struct pnp_dev *dev);
+int pnp_range_reserved(resource_size_t start, resource_size_t end);
/* protocol helpers */
int pnp_is_active(struct pnp_dev *dev);
@@ -476,6 +477,7 @@ static inline int pnp_start_dev(struct pnp_dev *dev) { return -ENODEV; }
static inline int pnp_stop_dev(struct pnp_dev *dev) { return -ENODEV; }
static inline int pnp_activate_dev(struct pnp_dev *dev) { return -ENODEV; }
static inline int pnp_disable_dev(struct pnp_dev *dev) { return -ENODEV; }
+static inline int pnp_range_reserved(resource_size_t start, resource_size_t end) { return 0;}
/* protocol helpers */
static inline int pnp_is_active(struct pnp_dev *dev) { return 0; }
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 9f31683728f..6729f7dcd60 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -17,6 +17,9 @@
*/
#define TIMER_ENTRY_STATIC ((void *) 0x74737461)
+/********** mm/debug-pagealloc.c **********/
+#define PAGE_POISON 0xaa
+
/********** mm/slab.c **********/
/*
* Magic nums for obj red zoning.
diff --git a/include/linux/poll.h b/include/linux/poll.h
index 8c24ef8d997..fa287f25138 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -32,6 +32,7 @@ typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_
typedef struct poll_table_struct {
poll_queue_proc qproc;
+ unsigned long key;
} poll_table;
static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
@@ -43,10 +44,12 @@ static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_addres
static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc)
{
pt->qproc = qproc;
+ pt->key = ~0UL; /* all events enabled */
}
struct poll_table_entry {
struct file *filp;
+ unsigned long key;
wait_queue_t wait;
wait_queue_head_t *wait_address;
};
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 8ff25e0e7f7..594c494ac3f 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -73,6 +73,8 @@ enum power_supply_property {
POWER_SUPPLY_PROP_VOLTAGE_AVG,
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CURRENT_AVG,
+ POWER_SUPPLY_PROP_POWER_NOW,
+ POWER_SUPPLY_PROP_POWER_AVG,
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN,
POWER_SUPPLY_PROP_CHARGE_FULL,
diff --git a/include/linux/ppp_channel.h b/include/linux/ppp_channel.h
index a942892d6df..0d3fa63e90e 100644
--- a/include/linux/ppp_channel.h
+++ b/include/linux/ppp_channel.h
@@ -22,6 +22,7 @@
#include <linux/list.h>
#include <linux/skbuff.h>
#include <linux/poll.h>
+#include <net/net_namespace.h>
struct ppp_channel;
@@ -39,8 +40,8 @@ struct ppp_channel {
int mtu; /* max transmit packet size */
int hdrlen; /* amount of headroom channel needs */
void *ppp; /* opaque to channel */
- /* the following are not used at present */
int speed; /* transfer rate (bytes/second) */
+ /* the following is not used at present */
int latency; /* overhead time in milliseconds */
};
@@ -56,6 +57,9 @@ extern void ppp_input(struct ppp_channel *, struct sk_buff *);
that we may have missed a packet. */
extern void ppp_input_error(struct ppp_channel *, int code);
+/* Attach a channel to a given PPP unit in specified net. */
+extern int ppp_register_net_channel(struct net *, struct ppp_channel *);
+
/* Attach a channel to a given PPP unit. */
extern int ppp_register_channel(struct ppp_channel *);
diff --git a/include/linux/ppp_defs.h b/include/linux/ppp_defs.h
index 1c866bda201..0f93ed6b4a8 100644
--- a/include/linux/ppp_defs.h
+++ b/include/linux/ppp_defs.h
@@ -177,8 +177,8 @@ struct ppp_comp_stats {
* the last NP packet was sent or received.
*/
struct ppp_idle {
- time_t xmit_idle; /* time since last NP packet sent */
- time_t recv_idle; /* time since last NP packet received */
+ __kernel_time_t xmit_idle; /* time since last NP packet sent */
+ __kernel_time_t recv_idle; /* time since last NP packet received */
};
#endif /* _PPP_DEFS_H_ */
diff --git a/include/linux/pps.h b/include/linux/pps.h
new file mode 100644
index 00000000000..cfe5c7214ec
--- /dev/null
+++ b/include/linux/pps.h
@@ -0,0 +1,122 @@
+/*
+ * PPS API header
+ *
+ * Copyright (C) 2005-2009 Rodolfo Giometti <giometti@linux.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+
+#ifndef _PPS_H_
+#define _PPS_H_
+
+#define PPS_VERSION "5.3.6"
+#define PPS_MAX_SOURCES 16 /* should be enough... */
+
+/* Implementation note: the logical states ``assert'' and ``clear''
+ * are implemented in terms of the chip register, i.e. ``assert''
+ * means the bit is set. */
+
+/*
+ * 3.2 New data structures
+ */
+
+#define PPS_API_VERS_1 1
+#define PPS_API_VERS PPS_API_VERS_1 /* we use API version 1 */
+#define PPS_MAX_NAME_LEN 32
+
+/* 32-bit vs. 64-bit compatibility.
+ *
+ * 0n i386, the alignment of a uint64_t is only 4 bytes, while on most other
+ * architectures it's 8 bytes. On i386, there will be no padding between the
+ * two consecutive 'struct pps_ktime' members of struct pps_kinfo and struct
+ * pps_kparams. But on most platforms there will be padding to ensure correct
+ * alignment.
+ *
+ * The simple fix is probably to add an explicit padding.
+ * [David Woodhouse]
+ */
+struct pps_ktime {
+ __s64 sec;
+ __s32 nsec;
+ __u32 flags;
+};
+#define PPS_TIME_INVALID (1<<0) /* used to specify timeout==NULL */
+
+struct pps_kinfo {
+ __u32 assert_sequence; /* seq. num. of assert event */
+ __u32 clear_sequence; /* seq. num. of clear event */
+ struct pps_ktime assert_tu; /* time of assert event */
+ struct pps_ktime clear_tu; /* time of clear event */
+ int current_mode; /* current mode bits */
+};
+
+struct pps_kparams {
+ int api_version; /* API version # */
+ int mode; /* mode bits */
+ struct pps_ktime assert_off_tu; /* offset compensation for assert */
+ struct pps_ktime clear_off_tu; /* offset compensation for clear */
+};
+
+/*
+ * 3.3 Mode bit definitions
+ */
+
+/* Device/implementation parameters */
+#define PPS_CAPTUREASSERT 0x01 /* capture assert events */
+#define PPS_CAPTURECLEAR 0x02 /* capture clear events */
+#define PPS_CAPTUREBOTH 0x03 /* capture assert and clear events */
+
+#define PPS_OFFSETASSERT 0x10 /* apply compensation for assert ev. */
+#define PPS_OFFSETCLEAR 0x20 /* apply compensation for clear ev. */
+
+#define PPS_CANWAIT 0x100 /* can we wait for an event? */
+#define PPS_CANPOLL 0x200 /* bit reserved for future use */
+
+/* Kernel actions */
+#define PPS_ECHOASSERT 0x40 /* feed back assert event to output */
+#define PPS_ECHOCLEAR 0x80 /* feed back clear event to output */
+
+/* Timestamp formats */
+#define PPS_TSFMT_TSPEC 0x1000 /* select timespec format */
+#define PPS_TSFMT_NTPFP 0x2000 /* select NTP format */
+
+/*
+ * 3.4.4 New functions: disciplining the kernel timebase
+ */
+
+/* Kernel consumers */
+#define PPS_KC_HARDPPS 0 /* hardpps() (or equivalent) */
+#define PPS_KC_HARDPPS_PLL 1 /* hardpps() constrained to
+ use a phase-locked loop */
+#define PPS_KC_HARDPPS_FLL 2 /* hardpps() constrained to
+ use a frequency-locked loop */
+/*
+ * Here begins the implementation-specific part!
+ */
+
+struct pps_fdata {
+ struct pps_kinfo info;
+ struct pps_ktime timeout;
+};
+
+#include <linux/ioctl.h>
+
+#define PPS_GETPARAMS _IOR('p', 0xa1, struct pps_kparams *)
+#define PPS_SETPARAMS _IOW('p', 0xa2, struct pps_kparams *)
+#define PPS_GETCAP _IOR('p', 0xa3, int *)
+#define PPS_FETCH _IOWR('p', 0xa4, struct pps_fdata *)
+
+#endif /* _PPS_H_ */
diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h
new file mode 100644
index 00000000000..e0a193f830e
--- /dev/null
+++ b/include/linux/pps_kernel.h
@@ -0,0 +1,89 @@
+/*
+ * PPS API kernel header
+ *
+ * Copyright (C) 2009 Rodolfo Giometti <giometti@linux.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/pps.h>
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/time.h>
+
+/*
+ * Global defines
+ */
+
+/* The specific PPS source info */
+struct pps_source_info {
+ char name[PPS_MAX_NAME_LEN]; /* simbolic name */
+ char path[PPS_MAX_NAME_LEN]; /* path of connected device */
+ int mode; /* PPS's allowed mode */
+
+ void (*echo)(int source, int event, void *data); /* PPS echo function */
+
+ struct module *owner;
+ struct device *dev;
+};
+
+/* The main struct */
+struct pps_device {
+ struct pps_source_info info; /* PSS source info */
+
+ struct pps_kparams params; /* PPS's current params */
+
+ __u32 assert_sequence; /* PPS' assert event seq # */
+ __u32 clear_sequence; /* PPS' clear event seq # */
+ struct pps_ktime assert_tu;
+ struct pps_ktime clear_tu;
+ int current_mode; /* PPS mode at event time */
+
+ int go; /* PPS event is arrived? */
+ wait_queue_head_t queue; /* PPS event queue */
+
+ unsigned int id; /* PPS source unique ID */
+ struct cdev cdev;
+ struct device *dev;
+ int devno;
+ struct fasync_struct *async_queue; /* fasync method */
+ spinlock_t lock;
+
+ atomic_t usage; /* usage count */
+};
+
+/*
+ * Global variables
+ */
+
+extern spinlock_t pps_idr_lock;
+extern struct idr pps_idr;
+extern struct timespec pps_irq_ts[];
+
+extern struct device_attribute pps_attrs[];
+
+/*
+ * Exported functions
+ */
+
+struct pps_device *pps_get_source(int source);
+extern void pps_put_source(struct pps_device *pps);
+extern int pps_register_source(struct pps_source_info *info,
+ int default_params);
+extern void pps_unregister_source(int source);
+extern int pps_register_cdev(struct pps_device *pps);
+extern void pps_unregister_cdev(struct pps_device *pps);
+extern void pps_event(int source, struct pps_ktime *ts, int event, void *data);
diff --git a/include/linux/prctl.h b/include/linux/prctl.h
index 48d887e3c6e..b00df4c79c6 100644
--- a/include/linux/prctl.h
+++ b/include/linux/prctl.h
@@ -85,4 +85,7 @@
#define PR_SET_TIMERSLACK 29
#define PR_GET_TIMERSLACK 30
+#define PR_TASK_PERF_COUNTERS_DISABLE 31
+#define PR_TASK_PERF_COUNTERS_ENABLE 32
+
#endif /* _LINUX_PRCTL_H */
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index b8bdb96eff7..e6e77d31c41 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -41,9 +41,6 @@ enum {
* while parent/subdir create the directory structure (every
* /proc file has a parent, but "subdir" is NULL for all
* non-directory entries).
- *
- * "owner" is used to protect module
- * from unloading while proc_dir_entry is in use
*/
typedef int (read_proc_t)(char *page, char **start, off_t off,
@@ -70,7 +67,6 @@ struct proc_dir_entry {
* somewhere.
*/
const struct file_operations *proc_fops;
- struct module *owner;
struct proc_dir_entry *next, *parent, *subdir;
void *data;
read_proc_t *read_proc;
@@ -97,20 +93,9 @@ struct vmcore {
#ifdef CONFIG_PROC_FS
-extern spinlock_t proc_subdir_lock;
-
extern void proc_root_init(void);
void proc_flush_task(struct task_struct *task);
-struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *);
-int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir);
-unsigned long task_vsize(struct mm_struct *);
-int task_statm(struct mm_struct *, int *, int *, int *, int *);
-void task_mem(struct seq_file *, struct mm_struct *);
-void clear_refs_smap(struct mm_struct *mm);
-
-struct proc_dir_entry *de_get(struct proc_dir_entry *de);
-void de_put(struct proc_dir_entry *de);
extern struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode,
struct proc_dir_entry *parent);
@@ -120,20 +105,7 @@ struct proc_dir_entry *proc_create_data(const char *name, mode_t mode,
void *data);
extern void remove_proc_entry(const char *name, struct proc_dir_entry *parent);
-extern struct vfsmount *proc_mnt;
struct pid_namespace;
-extern int proc_fill_super(struct super_block *);
-extern struct inode *proc_get_inode(struct super_block *, unsigned int, struct proc_dir_entry *);
-
-/*
- * These are generic /proc routines that use the internal
- * "struct proc_dir_entry" tree to traverse the filesystem.
- *
- * The /proc root directory has extended versions to take care
- * of the /proc/<pid> subdirectories.
- */
-extern int proc_readdir(struct file *, void *, filldir_t);
-extern struct dentry *proc_lookup(struct inode *, struct dentry *, struct nameidata *);
extern int pid_ns_prepare_proc(struct pid_namespace *ns);
extern void pid_ns_release_proc(struct pid_namespace *ns);
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 98b93ca4db0..7456d7d87a1 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -81,7 +81,6 @@
extern long arch_ptrace(struct task_struct *child, long request, long addr, long data);
-extern struct task_struct *ptrace_get_task_struct(pid_t pid);
extern int ptrace_traceme(void);
extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
@@ -94,7 +93,7 @@ extern void ptrace_notify(int exit_code);
extern void __ptrace_link(struct task_struct *child,
struct task_struct *new_parent);
extern void __ptrace_unlink(struct task_struct *child);
-extern void ptrace_fork(struct task_struct *task, unsigned long clone_flags);
+extern void exit_ptrace(struct task_struct *tracer);
#define PTRACE_MODE_READ 1
#define PTRACE_MODE_ATTACH 2
/* Returns 0 on success, -errno on denial. */
@@ -326,15 +325,6 @@ static inline void user_enable_block_step(struct task_struct *task)
#define arch_ptrace_untrace(task) do { } while (0)
#endif
-#ifndef arch_ptrace_fork
-/*
- * Do machine-specific work to initialize a new task.
- *
- * This is called from copy_process().
- */
-#define arch_ptrace_fork(child, clone_flags) do { } while (0)
-#endif
-
extern int task_current_syscall(struct task_struct *target, long *callno,
unsigned long args[6], unsigned int maxargs,
unsigned long *sp, unsigned long *pc);
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 3945f803d51..7c775751392 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -28,4 +28,4 @@ int pwm_enable(struct pwm_device *pwm);
*/
void pwm_disable(struct pwm_device *pwm);
-#endif /* __ASM_ARCH_PWM_H */
+#endif /* __LINUX_PWM_H */
diff --git a/include/linux/qnx4_fs.h b/include/linux/qnx4_fs.h
index 787d19ea9f4..8b9aee1a9ce 100644
--- a/include/linux/qnx4_fs.h
+++ b/include/linux/qnx4_fs.h
@@ -85,65 +85,4 @@ struct qnx4_super_block {
struct qnx4_inode_entry AltBoot;
};
-#ifdef __KERNEL__
-
-#define QNX4_DEBUG 0
-
-#if QNX4_DEBUG
-#define QNX4DEBUG(X) printk X
-#else
-#define QNX4DEBUG(X) (void) 0
-#endif
-
-struct qnx4_sb_info {
- struct buffer_head *sb_buf; /* superblock buffer */
- struct qnx4_super_block *sb; /* our superblock */
- unsigned int Version; /* may be useful */
- struct qnx4_inode_entry *BitMap; /* useful */
-};
-
-struct qnx4_inode_info {
- struct qnx4_inode_entry raw;
- loff_t mmu_private;
- struct inode vfs_inode;
-};
-
-extern struct inode *qnx4_iget(struct super_block *, unsigned long);
-extern struct dentry *qnx4_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd);
-extern unsigned long qnx4_count_free_blocks(struct super_block *sb);
-extern unsigned long qnx4_block_map(struct inode *inode, long iblock);
-
-extern struct buffer_head *qnx4_bread(struct inode *, int, int);
-
-extern const struct inode_operations qnx4_file_inode_operations;
-extern const struct inode_operations qnx4_dir_inode_operations;
-extern const struct file_operations qnx4_file_operations;
-extern const struct file_operations qnx4_dir_operations;
-extern int qnx4_is_free(struct super_block *sb, long block);
-extern int qnx4_set_bitmap(struct super_block *sb, long block, int busy);
-extern int qnx4_create(struct inode *inode, struct dentry *dentry, int mode, struct nameidata *nd);
-extern void qnx4_truncate(struct inode *inode);
-extern void qnx4_free_inode(struct inode *inode);
-extern int qnx4_unlink(struct inode *dir, struct dentry *dentry);
-extern int qnx4_rmdir(struct inode *dir, struct dentry *dentry);
-extern int qnx4_sync_file(struct file *file, struct dentry *dentry, int);
-extern int qnx4_sync_inode(struct inode *inode);
-
-static inline struct qnx4_sb_info *qnx4_sb(struct super_block *sb)
-{
- return sb->s_fs_info;
-}
-
-static inline struct qnx4_inode_info *qnx4_i(struct inode *inode)
-{
- return container_of(inode, struct qnx4_inode_info, vfs_inode);
-}
-
-static inline struct qnx4_inode_entry *qnx4_raw_inode(struct inode *inode)
-{
- return &qnx4_i(inode)->raw;
-}
-
-#endif /* __KERNEL__ */
-
#endif
diff --git a/include/linux/quota.h b/include/linux/quota.h
index d72d5d84fde..78c48895b12 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -198,6 +198,7 @@ struct mem_dqblk {
qsize_t dqb_bhardlimit; /* absolute limit on disk blks alloc */
qsize_t dqb_bsoftlimit; /* preferred limit on disk blks */
qsize_t dqb_curspace; /* current used space */
+ qsize_t dqb_rsvspace; /* current reserved space for delalloc*/
qsize_t dqb_ihardlimit; /* absolute limit on allocated inodes */
qsize_t dqb_isoftlimit; /* preferred inode limit */
qsize_t dqb_curinodes; /* current # allocated inodes */
@@ -276,8 +277,6 @@ struct dquot {
struct mem_dqblk dq_dqb; /* Diskquota usage */
};
-#define NODQUOT (struct dquot *)NULL
-
#define QUOTA_OK 0
#define NO_QUOTA 1
@@ -308,6 +307,14 @@ struct dquot_operations {
int (*release_dquot) (struct dquot *); /* Quota is going to be deleted from disk */
int (*mark_dirty) (struct dquot *); /* Dquot is marked dirty */
int (*write_info) (struct super_block *, int); /* Write of quota "superblock" */
+ /* reserve quota for delayed block allocation */
+ int (*reserve_space) (struct inode *, qsize_t, int);
+ /* claim reserved quota for delayed alloc */
+ int (*claim_space) (struct inode *, qsize_t);
+ /* release rsved quota for delayed alloc */
+ void (*release_rsv) (struct inode *, qsize_t);
+ /* get reserved quota for delayed alloc */
+ qsize_t (*get_reserved_space) (struct inode *);
};
/* Operations handling requests from userspace */
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 0b35b3a1be0..7bc45759368 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -20,7 +20,12 @@ static inline struct quota_info *sb_dqopt(struct super_block *sb)
/*
* declaration of quota_function calls in kernel.
*/
-void sync_dquots(struct super_block *sb, int type);
+void sync_quota_sb(struct super_block *sb, int type);
+static inline void writeout_quota_sb(struct super_block *sb, int type)
+{
+ if (sb->s_qcop->quota_sync)
+ sb->s_qcop->quota_sync(sb, type);
+}
int dquot_initialize(struct inode *inode, int type);
int dquot_drop(struct inode *inode);
@@ -35,6 +40,11 @@ void dquot_destroy(struct dquot *dquot);
int dquot_alloc_space(struct inode *inode, qsize_t number, int prealloc);
int dquot_alloc_inode(const struct inode *inode, qsize_t number);
+int dquot_reserve_space(struct inode *inode, qsize_t number, int prealloc);
+int dquot_claim_space(struct inode *inode, qsize_t number);
+void dquot_release_reserved_space(struct inode *inode, qsize_t number);
+qsize_t dquot_get_reserved_space(struct inode *inode);
+
int dquot_free_space(struct inode *inode, qsize_t number);
int dquot_free_inode(const struct inode *inode, qsize_t number);
@@ -183,6 +193,16 @@ static inline int vfs_dq_alloc_space(struct inode *inode, qsize_t nr)
return ret;
}
+static inline int vfs_dq_reserve_space(struct inode *inode, qsize_t nr)
+{
+ if (sb_any_quota_active(inode->i_sb)) {
+ /* Used space is updated in alloc_space() */
+ if (inode->i_sb->dq_op->reserve_space(inode, nr, 0) == NO_QUOTA)
+ return 1;
+ }
+ return 0;
+}
+
static inline int vfs_dq_alloc_inode(struct inode *inode)
{
if (sb_any_quota_active(inode->i_sb)) {
@@ -193,6 +213,31 @@ static inline int vfs_dq_alloc_inode(struct inode *inode)
return 0;
}
+/*
+ * Convert in-memory reserved quotas to real consumed quotas
+ */
+static inline int vfs_dq_claim_space(struct inode *inode, qsize_t nr)
+{
+ if (sb_any_quota_active(inode->i_sb)) {
+ if (inode->i_sb->dq_op->claim_space(inode, nr) == NO_QUOTA)
+ return 1;
+ } else
+ inode_add_bytes(inode, nr);
+
+ mark_inode_dirty(inode);
+ return 0;
+}
+
+/*
+ * Release reserved (in-memory) quotas
+ */
+static inline
+void vfs_dq_release_reservation_space(struct inode *inode, qsize_t nr)
+{
+ if (sb_any_quota_active(inode->i_sb))
+ inode->i_sb->dq_op->release_rsv(inode, nr);
+}
+
static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr)
{
if (sb_any_quota_active(inode->i_sb))
@@ -213,12 +258,7 @@ static inline void vfs_dq_free_inode(struct inode *inode)
inode->i_sb->dq_op->free_inode(inode, 1);
}
-/* The following two functions cannot be called inside a transaction */
-static inline void vfs_dq_sync(struct super_block *sb)
-{
- sync_dquots(sb, -1);
-}
-
+/* Cannot be called inside a transaction */
static inline int vfs_dq_off(struct super_block *sb, int remount)
{
int ret = -ENOSYS;
@@ -294,7 +334,11 @@ static inline void vfs_dq_free_inode(struct inode *inode)
{
}
-static inline void vfs_dq_sync(struct super_block *sb)
+static inline void sync_quota_sb(struct super_block *sb, int type)
+{
+}
+
+static inline void writeout_quota_sb(struct super_block *sb, int type)
{
}
@@ -339,6 +383,22 @@ static inline int vfs_dq_alloc_space(struct inode *inode, qsize_t nr)
return 0;
}
+static inline int vfs_dq_reserve_space(struct inode *inode, qsize_t nr)
+{
+ return 0;
+}
+
+static inline int vfs_dq_claim_space(struct inode *inode, qsize_t nr)
+{
+ return vfs_dq_alloc_space(inode, nr);
+}
+
+static inline
+int vfs_dq_release_reservation_space(struct inode *inode, qsize_t nr)
+{
+ return 0;
+}
+
static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr)
{
inode_sub_bytes(inode, nr);
@@ -354,67 +414,48 @@ static inline void vfs_dq_free_space(struct inode *inode, qsize_t nr)
static inline int vfs_dq_prealloc_block_nodirty(struct inode *inode, qsize_t nr)
{
- return vfs_dq_prealloc_space_nodirty(inode,
- nr << inode->i_sb->s_blocksize_bits);
+ return vfs_dq_prealloc_space_nodirty(inode, nr << inode->i_blkbits);
}
static inline int vfs_dq_prealloc_block(struct inode *inode, qsize_t nr)
{
- return vfs_dq_prealloc_space(inode,
- nr << inode->i_sb->s_blocksize_bits);
+ return vfs_dq_prealloc_space(inode, nr << inode->i_blkbits);
}
static inline int vfs_dq_alloc_block_nodirty(struct inode *inode, qsize_t nr)
{
- return vfs_dq_alloc_space_nodirty(inode,
- nr << inode->i_sb->s_blocksize_bits);
+ return vfs_dq_alloc_space_nodirty(inode, nr << inode->i_blkbits);
}
static inline int vfs_dq_alloc_block(struct inode *inode, qsize_t nr)
{
- return vfs_dq_alloc_space(inode,
- nr << inode->i_sb->s_blocksize_bits);
+ return vfs_dq_alloc_space(inode, nr << inode->i_blkbits);
+}
+
+static inline int vfs_dq_reserve_block(struct inode *inode, qsize_t nr)
+{
+ return vfs_dq_reserve_space(inode, nr << inode->i_blkbits);
+}
+
+static inline int vfs_dq_claim_block(struct inode *inode, qsize_t nr)
+{
+ return vfs_dq_claim_space(inode, nr << inode->i_blkbits);
+}
+
+static inline
+void vfs_dq_release_reservation_block(struct inode *inode, qsize_t nr)
+{
+ vfs_dq_release_reservation_space(inode, nr << inode->i_blkbits);
}
static inline void vfs_dq_free_block_nodirty(struct inode *inode, qsize_t nr)
{
- vfs_dq_free_space_nodirty(inode, nr << inode->i_sb->s_blocksize_bits);
+ vfs_dq_free_space_nodirty(inode, nr << inode->i_blkbits);
}
static inline void vfs_dq_free_block(struct inode *inode, qsize_t nr)
{
- vfs_dq_free_space(inode, nr << inode->i_sb->s_blocksize_bits);
+ vfs_dq_free_space(inode, nr << inode->i_blkbits);
}
-/*
- * Define uppercase equivalents for compatibility with old function names
- * Can go away when we think all users have been converted (15/04/2008)
- */
-#define DQUOT_INIT(inode) vfs_dq_init(inode)
-#define DQUOT_DROP(inode) vfs_dq_drop(inode)
-#define DQUOT_PREALLOC_SPACE_NODIRTY(inode, nr) \
- vfs_dq_prealloc_space_nodirty(inode, nr)
-#define DQUOT_PREALLOC_SPACE(inode, nr) vfs_dq_prealloc_space(inode, nr)
-#define DQUOT_ALLOC_SPACE_NODIRTY(inode, nr) \
- vfs_dq_alloc_space_nodirty(inode, nr)
-#define DQUOT_ALLOC_SPACE(inode, nr) vfs_dq_alloc_space(inode, nr)
-#define DQUOT_PREALLOC_BLOCK_NODIRTY(inode, nr) \
- vfs_dq_prealloc_block_nodirty(inode, nr)
-#define DQUOT_PREALLOC_BLOCK(inode, nr) vfs_dq_prealloc_block(inode, nr)
-#define DQUOT_ALLOC_BLOCK_NODIRTY(inode, nr) \
- vfs_dq_alloc_block_nodirty(inode, nr)
-#define DQUOT_ALLOC_BLOCK(inode, nr) vfs_dq_alloc_block(inode, nr)
-#define DQUOT_ALLOC_INODE(inode) vfs_dq_alloc_inode(inode)
-#define DQUOT_FREE_SPACE_NODIRTY(inode, nr) \
- vfs_dq_free_space_nodirty(inode, nr)
-#define DQUOT_FREE_SPACE(inode, nr) vfs_dq_free_space(inode, nr)
-#define DQUOT_FREE_BLOCK_NODIRTY(inode, nr) \
- vfs_dq_free_block_nodirty(inode, nr)
-#define DQUOT_FREE_BLOCK(inode, nr) vfs_dq_free_block(inode, nr)
-#define DQUOT_FREE_INODE(inode) vfs_dq_free_inode(inode)
-#define DQUOT_TRANSFER(inode, iattr) vfs_dq_transfer(inode, iattr)
-#define DQUOT_SYNC(sb) vfs_dq_sync(sb)
-#define DQUOT_OFF(sb, remount) vfs_dq_off(sb, remount)
-#define DQUOT_ON_REMOUNT(sb) vfs_dq_quota_on_remount(sb)
-
#endif /* _LINUX_QUOTAOPS_ */
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 355f6e80db0..c5da7491809 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -167,6 +167,8 @@ radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results,
unsigned long first_index, unsigned int max_items);
unsigned long radix_tree_next_hole(struct radix_tree_root *root,
unsigned long index, unsigned long max_scan);
+unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
+ unsigned long index, unsigned long max_scan);
int radix_tree_preload(gfp_t gfp_mask);
void radix_tree_init(void);
void *radix_tree_tag_set(struct radix_tree_root *root,
diff --git a/include/linux/raid/bitmap.h b/include/linux/raid/bitmap.h
deleted file mode 100644
index e98900671ca..00000000000
--- a/include/linux/raid/bitmap.h
+++ /dev/null
@@ -1,288 +0,0 @@
-/*
- * bitmap.h: Copyright (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003
- *
- * additions: Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
- */
-#ifndef BITMAP_H
-#define BITMAP_H 1
-
-#define BITMAP_MAJOR_LO 3
-/* version 4 insists the bitmap is in little-endian order
- * with version 3, it is host-endian which is non-portable
- */
-#define BITMAP_MAJOR_HI 4
-#define BITMAP_MAJOR_HOSTENDIAN 3
-
-#define BITMAP_MINOR 39
-
-/*
- * in-memory bitmap:
- *
- * Use 16 bit block counters to track pending writes to each "chunk".
- * The 2 high order bits are special-purpose, the first is a flag indicating
- * whether a resync is needed. The second is a flag indicating whether a
- * resync is active.
- * This means that the counter is actually 14 bits:
- *
- * +--------+--------+------------------------------------------------+
- * | resync | resync | counter |
- * | needed | active | |
- * | (0-1) | (0-1) | (0-16383) |
- * +--------+--------+------------------------------------------------+
- *
- * The "resync needed" bit is set when:
- * a '1' bit is read from storage at startup.
- * a write request fails on some drives
- * a resync is aborted on a chunk with 'resync active' set
- * It is cleared (and resync-active set) when a resync starts across all drives
- * of the chunk.
- *
- *
- * The "resync active" bit is set when:
- * a resync is started on all drives, and resync_needed is set.
- * resync_needed will be cleared (as long as resync_active wasn't already set).
- * It is cleared when a resync completes.
- *
- * The counter counts pending write requests, plus the on-disk bit.
- * When the counter is '1' and the resync bits are clear, the on-disk
- * bit can be cleared aswell, thus setting the counter to 0.
- * When we set a bit, or in the counter (to start a write), if the fields is
- * 0, we first set the disk bit and set the counter to 1.
- *
- * If the counter is 0, the on-disk bit is clear and the stipe is clean
- * Anything that dirties the stipe pushes the counter to 2 (at least)
- * and sets the on-disk bit (lazily).
- * If a periodic sweep find the counter at 2, it is decremented to 1.
- * If the sweep find the counter at 1, the on-disk bit is cleared and the
- * counter goes to zero.
- *
- * Also, we'll hijack the "map" pointer itself and use it as two 16 bit block
- * counters as a fallback when "page" memory cannot be allocated:
- *
- * Normal case (page memory allocated):
- *
- * page pointer (32-bit)
- *
- * [ ] ------+
- * |
- * +-------> [ ][ ]..[ ] (4096 byte page == 2048 counters)
- * c1 c2 c2048
- *
- * Hijacked case (page memory allocation failed):
- *
- * hijacked page pointer (32-bit)
- *
- * [ ][ ] (no page memory allocated)
- * counter #1 (16-bit) counter #2 (16-bit)
- *
- */
-
-#ifdef __KERNEL__
-
-#define PAGE_BITS (PAGE_SIZE << 3)
-#define PAGE_BIT_SHIFT (PAGE_SHIFT + 3)
-
-typedef __u16 bitmap_counter_t;
-#define COUNTER_BITS 16
-#define COUNTER_BIT_SHIFT 4
-#define COUNTER_BYTE_RATIO (COUNTER_BITS / 8)
-#define COUNTER_BYTE_SHIFT (COUNTER_BIT_SHIFT - 3)
-
-#define NEEDED_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 1)))
-#define RESYNC_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 2)))
-#define COUNTER_MAX ((bitmap_counter_t) RESYNC_MASK - 1)
-#define NEEDED(x) (((bitmap_counter_t) x) & NEEDED_MASK)
-#define RESYNC(x) (((bitmap_counter_t) x) & RESYNC_MASK)
-#define COUNTER(x) (((bitmap_counter_t) x) & COUNTER_MAX)
-
-/* how many counters per page? */
-#define PAGE_COUNTER_RATIO (PAGE_BITS / COUNTER_BITS)
-/* same, except a shift value for more efficient bitops */
-#define PAGE_COUNTER_SHIFT (PAGE_BIT_SHIFT - COUNTER_BIT_SHIFT)
-/* same, except a mask value for more efficient bitops */
-#define PAGE_COUNTER_MASK (PAGE_COUNTER_RATIO - 1)
-
-#define BITMAP_BLOCK_SIZE 512
-#define BITMAP_BLOCK_SHIFT 9
-
-/* how many blocks per chunk? (this is variable) */
-#define CHUNK_BLOCK_RATIO(bitmap) ((bitmap)->chunksize >> BITMAP_BLOCK_SHIFT)
-#define CHUNK_BLOCK_SHIFT(bitmap) ((bitmap)->chunkshift - BITMAP_BLOCK_SHIFT)
-#define CHUNK_BLOCK_MASK(bitmap) (CHUNK_BLOCK_RATIO(bitmap) - 1)
-
-/* when hijacked, the counters and bits represent even larger "chunks" */
-/* there will be 1024 chunks represented by each counter in the page pointers */
-#define PAGEPTR_BLOCK_RATIO(bitmap) \
- (CHUNK_BLOCK_RATIO(bitmap) << PAGE_COUNTER_SHIFT >> 1)
-#define PAGEPTR_BLOCK_SHIFT(bitmap) \
- (CHUNK_BLOCK_SHIFT(bitmap) + PAGE_COUNTER_SHIFT - 1)
-#define PAGEPTR_BLOCK_MASK(bitmap) (PAGEPTR_BLOCK_RATIO(bitmap) - 1)
-
-/*
- * on-disk bitmap:
- *
- * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap
- * file a page at a time. There's a superblock at the start of the file.
- */
-
-/* map chunks (bits) to file pages - offset by the size of the superblock */
-#define CHUNK_BIT_OFFSET(chunk) ((chunk) + (sizeof(bitmap_super_t) << 3))
-
-#endif
-
-/*
- * bitmap structures:
- */
-
-#define BITMAP_MAGIC 0x6d746962
-
-/* use these for bitmap->flags and bitmap->sb->state bit-fields */
-enum bitmap_state {
- BITMAP_STALE = 0x002, /* the bitmap file is out of date or had -EIO */
- BITMAP_WRITE_ERROR = 0x004, /* A write error has occurred */
- BITMAP_HOSTENDIAN = 0x8000,
-};
-
-/* the superblock at the front of the bitmap file -- little endian */
-typedef struct bitmap_super_s {
- __le32 magic; /* 0 BITMAP_MAGIC */
- __le32 version; /* 4 the bitmap major for now, could change... */
- __u8 uuid[16]; /* 8 128 bit uuid - must match md device uuid */
- __le64 events; /* 24 event counter for the bitmap (1)*/
- __le64 events_cleared;/*32 event counter when last bit cleared (2) */
- __le64 sync_size; /* 40 the size of the md device's sync range(3) */
- __le32 state; /* 48 bitmap state information */
- __le32 chunksize; /* 52 the bitmap chunk size in bytes */
- __le32 daemon_sleep; /* 56 seconds between disk flushes */
- __le32 write_behind; /* 60 number of outstanding write-behind writes */
-
- __u8 pad[256 - 64]; /* set to zero */
-} bitmap_super_t;
-
-/* notes:
- * (1) This event counter is updated before the eventcounter in the md superblock
- * When a bitmap is loaded, it is only accepted if this event counter is equal
- * to, or one greater than, the event counter in the superblock.
- * (2) This event counter is updated when the other one is *if*and*only*if* the
- * array is not degraded. As bits are not cleared when the array is degraded,
- * this represents the last time that any bits were cleared.
- * If a device is being added that has an event count with this value or
- * higher, it is accepted as conforming to the bitmap.
- * (3)This is the number of sectors represented by the bitmap, and is the range that
- * resync happens across. For raid1 and raid5/6 it is the size of individual
- * devices. For raid10 it is the size of the array.
- */
-
-#ifdef __KERNEL__
-
-/* the in-memory bitmap is represented by bitmap_pages */
-struct bitmap_page {
- /*
- * map points to the actual memory page
- */
- char *map;
- /*
- * in emergencies (when map cannot be alloced), hijack the map
- * pointer and use it as two counters itself
- */
- unsigned int hijacked:1;
- /*
- * count of dirty bits on the page
- */
- unsigned int count:31;
-};
-
-/* keep track of bitmap file pages that have pending writes on them */
-struct page_list {
- struct list_head list;
- struct page *page;
-};
-
-/* the main bitmap structure - one per mddev */
-struct bitmap {
- struct bitmap_page *bp;
- unsigned long pages; /* total number of pages in the bitmap */
- unsigned long missing_pages; /* number of pages not yet allocated */
-
- mddev_t *mddev; /* the md device that the bitmap is for */
-
- int counter_bits; /* how many bits per block counter */
-
- /* bitmap chunksize -- how much data does each bit represent? */
- unsigned long chunksize;
- unsigned long chunkshift; /* chunksize = 2^chunkshift (for bitops) */
- unsigned long chunks; /* total number of data chunks for the array */
-
- /* We hold a count on the chunk currently being synced, and drop
- * it when the last block is started. If the resync is aborted
- * midway, we need to be able to drop that count, so we remember
- * the counted chunk..
- */
- unsigned long syncchunk;
-
- __u64 events_cleared;
- int need_sync;
-
- /* bitmap spinlock */
- spinlock_t lock;
-
- long offset; /* offset from superblock if file is NULL */
- struct file *file; /* backing disk file */
- struct page *sb_page; /* cached copy of the bitmap file superblock */
- struct page **filemap; /* list of cache pages for the file */
- unsigned long *filemap_attr; /* attributes associated w/ filemap pages */
- unsigned long file_pages; /* number of pages in the file */
- int last_page_size; /* bytes in the last page */
-
- unsigned long flags;
-
- int allclean;
-
- unsigned long max_write_behind; /* write-behind mode */
- atomic_t behind_writes;
-
- /*
- * the bitmap daemon - periodically wakes up and sweeps the bitmap
- * file, cleaning up bits and flushing out pages to disk as necessary
- */
- unsigned long daemon_lastrun; /* jiffies of last run */
- unsigned long daemon_sleep; /* how many seconds between updates? */
- unsigned long last_end_sync; /* when we lasted called end_sync to
- * update bitmap with resync progress */
-
- atomic_t pending_writes; /* pending writes to the bitmap file */
- wait_queue_head_t write_wait;
- wait_queue_head_t overflow_wait;
-
-};
-
-/* the bitmap API */
-
-/* these are used only by md/bitmap */
-int bitmap_create(mddev_t *mddev);
-void bitmap_flush(mddev_t *mddev);
-void bitmap_destroy(mddev_t *mddev);
-
-void bitmap_print_sb(struct bitmap *bitmap);
-void bitmap_update_sb(struct bitmap *bitmap);
-
-int bitmap_setallbits(struct bitmap *bitmap);
-void bitmap_write_all(struct bitmap *bitmap);
-
-void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e);
-
-/* these are exported */
-int bitmap_startwrite(struct bitmap *bitmap, sector_t offset,
- unsigned long sectors, int behind);
-void bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
- unsigned long sectors, int success, int behind);
-int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int degraded);
-void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int aborted);
-void bitmap_close_sync(struct bitmap *bitmap);
-void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector);
-
-void bitmap_unplug(struct bitmap *bitmap);
-void bitmap_daemon_work(struct bitmap *bitmap);
-#endif
-
-#endif
diff --git a/include/linux/raid/linear.h b/include/linux/raid/linear.h
deleted file mode 100644
index f38b9c586af..00000000000
--- a/include/linux/raid/linear.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef _LINEAR_H
-#define _LINEAR_H
-
-#include <linux/raid/md.h>
-
-struct dev_info {
- mdk_rdev_t *rdev;
- sector_t num_sectors;
- sector_t start_sector;
-};
-
-typedef struct dev_info dev_info_t;
-
-struct linear_private_data
-{
- struct linear_private_data *prev; /* earlier version */
- dev_info_t **hash_table;
- sector_t spacing;
- sector_t array_sectors;
- int sector_shift; /* shift before dividing
- * by spacing
- */
- dev_info_t disks[0];
-};
-
-
-typedef struct linear_private_data linear_conf_t;
-
-#define mddev_to_conf(mddev) ((linear_conf_t *) mddev->private)
-
-#endif
diff --git a/include/linux/raid/md.h b/include/linux/raid/md.h
deleted file mode 100644
index 82bea14cae1..00000000000
--- a/include/linux/raid/md.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- md.h : Multiple Devices driver for Linux
- Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
- Copyright (C) 1994-96 Marc ZYNGIER
- <zyngier@ufr-info-p7.ibp.fr> or
- <maz@gloups.fdn.fr>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- You should have received a copy of the GNU General Public License
- (for example /usr/src/linux/COPYING); if not, write to the Free
- Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-
-#ifndef _MD_H
-#define _MD_H
-
-#include <linux/blkdev.h>
-#include <linux/seq_file.h>
-
-/*
- * 'md_p.h' holds the 'physical' layout of RAID devices
- * 'md_u.h' holds the user <=> kernel API
- *
- * 'md_k.h' holds kernel internal definitions
- */
-
-#include <linux/raid/md_p.h>
-#include <linux/raid/md_u.h>
-#include <linux/raid/md_k.h>
-
-#ifdef CONFIG_MD
-
-/*
- * Different major versions are not compatible.
- * Different minor versions are only downward compatible.
- * Different patchlevel versions are downward and upward compatible.
- */
-#define MD_MAJOR_VERSION 0
-#define MD_MINOR_VERSION 90
-/*
- * MD_PATCHLEVEL_VERSION indicates kernel functionality.
- * >=1 means different superblock formats are selectable using SET_ARRAY_INFO
- * and major_version/minor_version accordingly
- * >=2 means that Internal bitmaps are supported by setting MD_SB_BITMAP_PRESENT
- * in the super status byte
- * >=3 means that bitmap superblock version 4 is supported, which uses
- * little-ending representation rather than host-endian
- */
-#define MD_PATCHLEVEL_VERSION 3
-
-extern int mdp_major;
-
-extern int register_md_personality(struct mdk_personality *p);
-extern int unregister_md_personality(struct mdk_personality *p);
-extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev),
- mddev_t *mddev, const char *name);
-extern void md_unregister_thread(mdk_thread_t *thread);
-extern void md_wakeup_thread(mdk_thread_t *thread);
-extern void md_check_recovery(mddev_t *mddev);
-extern void md_write_start(mddev_t *mddev, struct bio *bi);
-extern void md_write_end(mddev_t *mddev);
-extern void md_done_sync(mddev_t *mddev, int blocks, int ok);
-extern void md_error(mddev_t *mddev, mdk_rdev_t *rdev);
-
-extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
- sector_t sector, int size, struct page *page);
-extern void md_super_wait(mddev_t *mddev);
-extern int sync_page_io(struct block_device *bdev, sector_t sector, int size,
- struct page *page, int rw);
-extern void md_do_sync(mddev_t *mddev);
-extern void md_new_event(mddev_t *mddev);
-extern int md_allow_write(mddev_t *mddev);
-extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
-
-#endif /* CONFIG_MD */
-#endif
-
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h
deleted file mode 100644
index 9743e4dbc91..00000000000
--- a/include/linux/raid/md_k.h
+++ /dev/null
@@ -1,402 +0,0 @@
-/*
- md_k.h : kernel internal structure of the Linux MD driver
- Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- You should have received a copy of the GNU General Public License
- (for example /usr/src/linux/COPYING); if not, write to the Free
- Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-
-#ifndef _MD_K_H
-#define _MD_K_H
-
-/* and dm-bio-list.h is not under include/linux because.... ??? */
-#include "../../../drivers/md/dm-bio-list.h"
-
-#ifdef CONFIG_BLOCK
-
-#define LEVEL_MULTIPATH (-4)
-#define LEVEL_LINEAR (-1)
-#define LEVEL_FAULTY (-5)
-
-/* we need a value for 'no level specified' and 0
- * means 'raid0', so we need something else. This is
- * for internal use only
- */
-#define LEVEL_NONE (-1000000)
-
-#define MaxSector (~(sector_t)0)
-
-typedef struct mddev_s mddev_t;
-typedef struct mdk_rdev_s mdk_rdev_t;
-
-/*
- * options passed in raidrun:
- */
-
-/* Currently this must fit in an 'int' */
-#define MAX_CHUNK_SIZE (1<<30)
-
-/*
- * MD's 'extended' device
- */
-struct mdk_rdev_s
-{
- struct list_head same_set; /* RAID devices within the same set */
-
- sector_t size; /* Device size (in blocks) */
- mddev_t *mddev; /* RAID array if running */
- long last_events; /* IO event timestamp */
-
- struct block_device *bdev; /* block device handle */
-
- struct page *sb_page;
- int sb_loaded;
- __u64 sb_events;
- sector_t data_offset; /* start of data in array */
- sector_t sb_start; /* offset of the super block (in 512byte sectors) */
- int sb_size; /* bytes in the superblock */
- int preferred_minor; /* autorun support */
-
- struct kobject kobj;
-
- /* A device can be in one of three states based on two flags:
- * Not working: faulty==1 in_sync==0
- * Fully working: faulty==0 in_sync==1
- * Working, but not
- * in sync with array
- * faulty==0 in_sync==0
- *
- * It can never have faulty==1, in_sync==1
- * This reduces the burden of testing multiple flags in many cases
- */
-
- unsigned long flags;
-#define Faulty 1 /* device is known to have a fault */
-#define In_sync 2 /* device is in_sync with rest of array */
-#define WriteMostly 4 /* Avoid reading if at all possible */
-#define BarriersNotsupp 5 /* BIO_RW_BARRIER is not supported */
-#define AllReserved 6 /* If whole device is reserved for
- * one array */
-#define AutoDetected 7 /* added by auto-detect */
-#define Blocked 8 /* An error occured on an externally
- * managed array, don't allow writes
- * until it is cleared */
-#define StateChanged 9 /* Faulty or Blocked has changed during
- * interrupt, so it needs to be
- * notified by the thread */
- wait_queue_head_t blocked_wait;
-
- int desc_nr; /* descriptor index in the superblock */
- int raid_disk; /* role of device in array */
- int saved_raid_disk; /* role that device used to have in the
- * array and could again if we did a partial
- * resync from the bitmap
- */
- sector_t recovery_offset;/* If this device has been partially
- * recovered, this is where we were
- * up to.
- */
-
- atomic_t nr_pending; /* number of pending requests.
- * only maintained for arrays that
- * support hot removal
- */
- atomic_t read_errors; /* number of consecutive read errors that
- * we have tried to ignore.
- */
- atomic_t corrected_errors; /* number of corrected read errors,
- * for reporting to userspace and storing
- * in superblock.
- */
- struct work_struct del_work; /* used for delayed sysfs removal */
-
- struct sysfs_dirent *sysfs_state; /* handle for 'state'
- * sysfs entry */
-};
-
-struct mddev_s
-{
- void *private;
- struct mdk_personality *pers;
- dev_t unit;
- int md_minor;
- struct list_head disks;
- unsigned long flags;
-#define MD_CHANGE_DEVS 0 /* Some device status has changed */
-#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */
-#define MD_CHANGE_PENDING 2 /* superblock update in progress */
-
- int ro;
-
- struct gendisk *gendisk;
-
- struct kobject kobj;
- int hold_active;
-#define UNTIL_IOCTL 1
-#define UNTIL_STOP 2
-
- /* Superblock information */
- int major_version,
- minor_version,
- patch_version;
- int persistent;
- int external; /* metadata is
- * managed externally */
- char metadata_type[17]; /* externally set*/
- int chunk_size;
- time_t ctime, utime;
- int level, layout;
- char clevel[16];
- int raid_disks;
- int max_disks;
- sector_t size; /* used size of component devices */
- sector_t array_sectors; /* exported array size */
- __u64 events;
-
- char uuid[16];
-
- /* If the array is being reshaped, we need to record the
- * new shape and an indication of where we are up to.
- * This is written to the superblock.
- * If reshape_position is MaxSector, then no reshape is happening (yet).
- */
- sector_t reshape_position;
- int delta_disks, new_level, new_layout, new_chunk;
-
- struct mdk_thread_s *thread; /* management thread */
- struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */
- sector_t curr_resync; /* last block scheduled */
- unsigned long resync_mark; /* a recent timestamp */
- sector_t resync_mark_cnt;/* blocks written at resync_mark */
- sector_t curr_mark_cnt; /* blocks scheduled now */
-
- sector_t resync_max_sectors; /* may be set by personality */
-
- sector_t resync_mismatches; /* count of sectors where
- * parity/replica mismatch found
- */
-
- /* allow user-space to request suspension of IO to regions of the array */
- sector_t suspend_lo;
- sector_t suspend_hi;
- /* if zero, use the system-wide default */
- int sync_speed_min;
- int sync_speed_max;
-
- /* resync even though the same disks are shared among md-devices */
- int parallel_resync;
-
- int ok_start_degraded;
- /* recovery/resync flags
- * NEEDED: we might need to start a resync/recover
- * RUNNING: a thread is running, or about to be started
- * SYNC: actually doing a resync, not a recovery
- * RECOVER: doing recovery, or need to try it.
- * INTR: resync needs to be aborted for some reason
- * DONE: thread is done and is waiting to be reaped
- * REQUEST: user-space has requested a sync (used with SYNC)
- * CHECK: user-space request for for check-only, no repair
- * RESHAPE: A reshape is happening
- *
- * If neither SYNC or RESHAPE are set, then it is a recovery.
- */
-#define MD_RECOVERY_RUNNING 0
-#define MD_RECOVERY_SYNC 1
-#define MD_RECOVERY_RECOVER 2
-#define MD_RECOVERY_INTR 3
-#define MD_RECOVERY_DONE 4
-#define MD_RECOVERY_NEEDED 5
-#define MD_RECOVERY_REQUESTED 6
-#define MD_RECOVERY_CHECK 7
-#define MD_RECOVERY_RESHAPE 8
-#define MD_RECOVERY_FROZEN 9
-
- unsigned long recovery;
- int recovery_disabled; /* if we detect that recovery
- * will always fail, set this
- * so we don't loop trying */
-
- int in_sync; /* know to not need resync */
- struct mutex reconfig_mutex;
- atomic_t active; /* general refcount */
- atomic_t openers; /* number of active opens */
-
- int changed; /* true if we might need to reread partition info */
- int degraded; /* whether md should consider
- * adding a spare
- */
- int barriers_work; /* initialised to true, cleared as soon
- * as a barrier request to slave
- * fails. Only supported
- */
- struct bio *biolist; /* bios that need to be retried
- * because BIO_RW_BARRIER is not supported
- */
-
- atomic_t recovery_active; /* blocks scheduled, but not written */
- wait_queue_head_t recovery_wait;
- sector_t recovery_cp;
- sector_t resync_min; /* user requested sync
- * starts here */
- sector_t resync_max; /* resync should pause
- * when it gets here */
-
- struct sysfs_dirent *sysfs_state; /* handle for 'array_state'
- * file in sysfs.
- */
- struct sysfs_dirent *sysfs_action; /* handle for 'sync_action' */
-
- struct work_struct del_work; /* used for delayed sysfs removal */
-
- spinlock_t write_lock;
- wait_queue_head_t sb_wait; /* for waiting on superblock updates */
- atomic_t pending_writes; /* number of active superblock writes */
-
- unsigned int safemode; /* if set, update "clean" superblock
- * when no writes pending.
- */
- unsigned int safemode_delay;
- struct timer_list safemode_timer;
- atomic_t writes_pending;
- struct request_queue *queue; /* for plugging ... */
-
- atomic_t write_behind; /* outstanding async IO */
- unsigned int max_write_behind; /* 0 = sync */
-
- struct bitmap *bitmap; /* the bitmap for the device */
- struct file *bitmap_file; /* the bitmap file */
- long bitmap_offset; /* offset from superblock of
- * start of bitmap. May be
- * negative, but not '0'
- */
- long default_bitmap_offset; /* this is the offset to use when
- * hot-adding a bitmap. It should
- * eventually be settable by sysfs.
- */
-
- struct list_head all_mddevs;
-};
-
-
-static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev)
-{
- int faulty = test_bit(Faulty, &rdev->flags);
- if (atomic_dec_and_test(&rdev->nr_pending) && faulty)
- set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
-}
-
-static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
-{
- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
-}
-
-struct mdk_personality
-{
- char *name;
- int level;
- struct list_head list;
- struct module *owner;
- int (*make_request)(struct request_queue *q, struct bio *bio);
- int (*run)(mddev_t *mddev);
- int (*stop)(mddev_t *mddev);
- void (*status)(struct seq_file *seq, mddev_t *mddev);
- /* error_handler must set ->faulty and clear ->in_sync
- * if appropriate, and should abort recovery if needed
- */
- void (*error_handler)(mddev_t *mddev, mdk_rdev_t *rdev);
- int (*hot_add_disk) (mddev_t *mddev, mdk_rdev_t *rdev);
- int (*hot_remove_disk) (mddev_t *mddev, int number);
- int (*spare_active) (mddev_t *mddev);
- sector_t (*sync_request)(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster);
- int (*resize) (mddev_t *mddev, sector_t sectors);
- int (*check_reshape) (mddev_t *mddev);
- int (*start_reshape) (mddev_t *mddev);
- int (*reconfig) (mddev_t *mddev, int layout, int chunk_size);
- /* quiesce moves between quiescence states
- * 0 - fully active
- * 1 - no new requests allowed
- * others - reserved
- */
- void (*quiesce) (mddev_t *mddev, int state);
-};
-
-
-struct md_sysfs_entry {
- struct attribute attr;
- ssize_t (*show)(mddev_t *, char *);
- ssize_t (*store)(mddev_t *, const char *, size_t);
-};
-
-
-static inline char * mdname (mddev_t * mddev)
-{
- return mddev->gendisk ? mddev->gendisk->disk_name : "mdX";
-}
-
-/*
- * iterates through some rdev ringlist. It's safe to remove the
- * current 'rdev'. Dont touch 'tmp' though.
- */
-#define rdev_for_each_list(rdev, tmp, head) \
- list_for_each_entry_safe(rdev, tmp, head, same_set)
-
-/*
- * iterates through the 'same array disks' ringlist
- */
-#define rdev_for_each(rdev, tmp, mddev) \
- list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set)
-
-#define rdev_for_each_rcu(rdev, mddev) \
- list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set)
-
-typedef struct mdk_thread_s {
- void (*run) (mddev_t *mddev);
- mddev_t *mddev;
- wait_queue_head_t wqueue;
- unsigned long flags;
- struct task_struct *tsk;
- unsigned long timeout;
-} mdk_thread_t;
-
-#define THREAD_WAKEUP 0
-
-#define __wait_event_lock_irq(wq, condition, lock, cmd) \
-do { \
- wait_queue_t __wait; \
- init_waitqueue_entry(&__wait, current); \
- \
- add_wait_queue(&wq, &__wait); \
- for (;;) { \
- set_current_state(TASK_UNINTERRUPTIBLE); \
- if (condition) \
- break; \
- spin_unlock_irq(&lock); \
- cmd; \
- schedule(); \
- spin_lock_irq(&lock); \
- } \
- current->state = TASK_RUNNING; \
- remove_wait_queue(&wq, &__wait); \
-} while (0)
-
-#define wait_event_lock_irq(wq, condition, lock, cmd) \
-do { \
- if (condition) \
- break; \
- __wait_event_lock_irq(wq, condition, lock, cmd); \
-} while (0)
-
-static inline void safe_put_page(struct page *p)
-{
- if (p) put_page(p);
-}
-
-#endif /* CONFIG_BLOCK */
-#endif
-
diff --git a/include/linux/raid/md_p.h b/include/linux/raid/md_p.h
index 6ba830fa853..ffa2efbbe38 100644
--- a/include/linux/raid/md_p.h
+++ b/include/linux/raid/md_p.h
@@ -232,7 +232,7 @@ struct mdp_superblock_1 {
__le64 reshape_position; /* next address in array-space for reshape */
__le32 delta_disks; /* change in number of raid_disks */
__le32 new_layout; /* new layout */
- __le32 new_chunk; /* new chunk size (bytes) */
+ __le32 new_chunk; /* new chunk size (512byte sectors) */
__u8 pad1[128-124]; /* set to 0 when written */
/* constant this-device information - 64 bytes */
diff --git a/include/linux/raid/md_u.h b/include/linux/raid/md_u.h
index 7192035fc4b..fb1abb3367e 100644
--- a/include/linux/raid/md_u.h
+++ b/include/linux/raid/md_u.h
@@ -15,6 +15,24 @@
#ifndef _MD_U_H
#define _MD_U_H
+/*
+ * Different major versions are not compatible.
+ * Different minor versions are only downward compatible.
+ * Different patchlevel versions are downward and upward compatible.
+ */
+#define MD_MAJOR_VERSION 0
+#define MD_MINOR_VERSION 90
+/*
+ * MD_PATCHLEVEL_VERSION indicates kernel functionality.
+ * >=1 means different superblock formats are selectable using SET_ARRAY_INFO
+ * and major_version/minor_version accordingly
+ * >=2 means that Internal bitmaps are supported by setting MD_SB_BITMAP_PRESENT
+ * in the super status byte
+ * >=3 means that bitmap superblock version 4 is supported, which uses
+ * little-ending representation rather than host-endian
+ */
+#define MD_PATCHLEVEL_VERSION 3
+
/* ioctls */
/* status */
@@ -46,6 +64,12 @@
#define STOP_ARRAY_RO _IO (MD_MAJOR, 0x33)
#define RESTART_ARRAY_RW _IO (MD_MAJOR, 0x34)
+/* 63 partitions with the alternate major number (mdp) */
+#define MdpMinorShift 6
+#ifdef __KERNEL__
+extern int mdp_major;
+#endif
+
typedef struct mdu_version_s {
int major;
int minor;
@@ -85,6 +109,17 @@ typedef struct mdu_array_info_s {
} mdu_array_info_t;
+/* non-obvious values for 'level' */
+#define LEVEL_MULTIPATH (-4)
+#define LEVEL_LINEAR (-1)
+#define LEVEL_FAULTY (-5)
+
+/* we need a value for 'no level specified' and 0
+ * means 'raid0', so we need something else. This is
+ * for internal use only
+ */
+#define LEVEL_NONE (-1000000)
+
typedef struct mdu_disk_info_s {
/*
* configuration/status of one particular disk
diff --git a/include/linux/raid/multipath.h b/include/linux/raid/multipath.h
deleted file mode 100644
index 6f53fc177a4..00000000000
--- a/include/linux/raid/multipath.h
+++ /dev/null
@@ -1,42 +0,0 @@
-#ifndef _MULTIPATH_H
-#define _MULTIPATH_H
-
-#include <linux/raid/md.h>
-
-struct multipath_info {
- mdk_rdev_t *rdev;
-};
-
-struct multipath_private_data {
- mddev_t *mddev;
- struct multipath_info *multipaths;
- int raid_disks;
- int working_disks;
- spinlock_t device_lock;
- struct list_head retry_list;
-
- mempool_t *pool;
-};
-
-typedef struct multipath_private_data multipath_conf_t;
-
-/*
- * this is the only point in the RAID code where we violate
- * C type safety. mddev->private is an 'opaque' pointer.
- */
-#define mddev_to_conf(mddev) ((multipath_conf_t *) mddev->private)
-
-/*
- * this is our 'private' 'collective' MULTIPATH buffer head.
- * it contains information about what kind of IO operations were started
- * for this MULTIPATH operation, and about their status:
- */
-
-struct multipath_bh {
- mddev_t *mddev;
- struct bio *master_bio;
- struct bio bio;
- int path;
- struct list_head retry_list;
-};
-#endif
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
new file mode 100644
index 00000000000..d92480f8285
--- /dev/null
+++ b/include/linux/raid/pq.h
@@ -0,0 +1,132 @@
+/* -*- linux-c -*- ------------------------------------------------------- *
+ *
+ * Copyright 2003 H. Peter Anvin - All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
+ * Boston MA 02111-1307, USA; either version 2 of the License, or
+ * (at your option) any later version; incorporated herein by reference.
+ *
+ * ----------------------------------------------------------------------- */
+
+#ifndef LINUX_RAID_RAID6_H
+#define LINUX_RAID_RAID6_H
+
+#ifdef __KERNEL__
+
+/* Set to 1 to use kernel-wide empty_zero_page */
+#define RAID6_USE_EMPTY_ZERO_PAGE 0
+#include <linux/blkdev.h>
+
+/* We need a pre-zeroed page... if we don't want to use the kernel-provided
+ one define it here */
+#if RAID6_USE_EMPTY_ZERO_PAGE
+# define raid6_empty_zero_page empty_zero_page
+#else
+extern const char raid6_empty_zero_page[PAGE_SIZE];
+#endif
+
+#else /* ! __KERNEL__ */
+/* Used for testing in user space */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <limits.h>
+#include <stddef.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+
+/* Not standard, but glibc defines it */
+#define BITS_PER_LONG __WORDSIZE
+
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef uint64_t u64;
+
+#ifndef PAGE_SIZE
+# define PAGE_SIZE 4096
+#endif
+extern const char raid6_empty_zero_page[PAGE_SIZE];
+
+#define __init
+#define __exit
+#define __attribute_const__ __attribute__((const))
+#define noinline __attribute__((noinline))
+
+#define preempt_enable()
+#define preempt_disable()
+#define cpu_has_feature(x) 1
+#define enable_kernel_altivec()
+#define disable_kernel_altivec()
+
+#define EXPORT_SYMBOL(sym)
+#define MODULE_LICENSE(licence)
+#define subsys_initcall(x)
+#define module_exit(x)
+#endif /* __KERNEL__ */
+
+/* Routine choices */
+struct raid6_calls {
+ void (*gen_syndrome)(int, size_t, void **);
+ int (*valid)(void); /* Returns 1 if this routine set is usable */
+ const char *name; /* Name of this routine set */
+ int prefer; /* Has special performance attribute */
+};
+
+/* Selected algorithm */
+extern struct raid6_calls raid6_call;
+
+/* Algorithm list */
+extern const struct raid6_calls * const raid6_algos[];
+int raid6_select_algo(void);
+
+/* Return values from chk_syndrome */
+#define RAID6_OK 0
+#define RAID6_P_BAD 1
+#define RAID6_Q_BAD 2
+#define RAID6_PQ_BAD 3
+
+/* Galois field tables */
+extern const u8 raid6_gfmul[256][256] __attribute__((aligned(256)));
+extern const u8 raid6_gfexp[256] __attribute__((aligned(256)));
+extern const u8 raid6_gfinv[256] __attribute__((aligned(256)));
+extern const u8 raid6_gfexi[256] __attribute__((aligned(256)));
+
+/* Recovery routines */
+void raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
+ void **ptrs);
+void raid6_datap_recov(int disks, size_t bytes, int faila, void **ptrs);
+void raid6_dual_recov(int disks, size_t bytes, int faila, int failb,
+ void **ptrs);
+
+/* Some definitions to allow code to be compiled for testing in userspace */
+#ifndef __KERNEL__
+
+# define jiffies raid6_jiffies()
+# define printk printf
+# define GFP_KERNEL 0
+# define __get_free_pages(x, y) ((unsigned long)mmap(NULL, PAGE_SIZE << (y), \
+ PROT_READ|PROT_WRITE, \
+ MAP_PRIVATE|MAP_ANONYMOUS,\
+ 0, 0))
+# define free_pages(x, y) munmap((void *)(x), (y)*PAGE_SIZE)
+
+static inline void cpu_relax(void)
+{
+ /* Nothing */
+}
+
+#undef HZ
+#define HZ 1000
+static inline uint32_t raid6_jiffies(void)
+{
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ return tv.tv_sec*1000 + tv.tv_usec/1000;
+}
+
+#endif /* ! __KERNEL__ */
+
+#endif /* LINUX_RAID_RAID6_H */
diff --git a/include/linux/raid/raid0.h b/include/linux/raid/raid0.h
deleted file mode 100644
index fd42aa87c39..00000000000
--- a/include/linux/raid/raid0.h
+++ /dev/null
@@ -1,30 +0,0 @@
-#ifndef _RAID0_H
-#define _RAID0_H
-
-#include <linux/raid/md.h>
-
-struct strip_zone
-{
- sector_t zone_start; /* Zone offset in md_dev (in sectors) */
- sector_t dev_start; /* Zone offset in real dev (in sectors) */
- sector_t sectors; /* Zone size in sectors */
- int nb_dev; /* # of devices attached to the zone */
- mdk_rdev_t **dev; /* Devices attached to the zone */
-};
-
-struct raid0_private_data
-{
- struct strip_zone **hash_table; /* Table of indexes into strip_zone */
- struct strip_zone *strip_zone;
- mdk_rdev_t **devlist; /* lists of rdevs, pointed to by strip_zone->dev */
- int nr_strip_zones;
-
- sector_t spacing;
- int sector_shift; /* shift this before divide by spacing */
-};
-
-typedef struct raid0_private_data raid0_conf_t;
-
-#define mddev_to_conf(mddev) ((raid0_conf_t *) mddev->private)
-
-#endif
diff --git a/include/linux/raid/raid1.h b/include/linux/raid/raid1.h
deleted file mode 100644
index 0a9ba7c3302..00000000000
--- a/include/linux/raid/raid1.h
+++ /dev/null
@@ -1,134 +0,0 @@
-#ifndef _RAID1_H
-#define _RAID1_H
-
-#include <linux/raid/md.h>
-
-typedef struct mirror_info mirror_info_t;
-
-struct mirror_info {
- mdk_rdev_t *rdev;
- sector_t head_position;
-};
-
-/*
- * memory pools need a pointer to the mddev, so they can force an unplug
- * when memory is tight, and a count of the number of drives that the
- * pool was allocated for, so they know how much to allocate and free.
- * mddev->raid_disks cannot be used, as it can change while a pool is active
- * These two datums are stored in a kmalloced struct.
- */
-
-struct pool_info {
- mddev_t *mddev;
- int raid_disks;
-};
-
-
-typedef struct r1bio_s r1bio_t;
-
-struct r1_private_data_s {
- mddev_t *mddev;
- mirror_info_t *mirrors;
- int raid_disks;
- int last_used;
- sector_t next_seq_sect;
- spinlock_t device_lock;
-
- struct list_head retry_list;
- /* queue pending writes and submit them on unplug */
- struct bio_list pending_bio_list;
- /* queue of writes that have been unplugged */
- struct bio_list flushing_bio_list;
-
- /* for use when syncing mirrors: */
-
- spinlock_t resync_lock;
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
- sector_t next_resync;
- int fullsync; /* set to 1 if a full sync is needed,
- * (fresh device added).
- * Cleared when a sync completes.
- */
-
- wait_queue_head_t wait_barrier;
-
- struct pool_info *poolinfo;
-
- struct page *tmppage;
-
- mempool_t *r1bio_pool;
- mempool_t *r1buf_pool;
-};
-
-typedef struct r1_private_data_s conf_t;
-
-/*
- * this is the only point in the RAID code where we violate
- * C type safety. mddev->private is an 'opaque' pointer.
- */
-#define mddev_to_conf(mddev) ((conf_t *) mddev->private)
-
-/*
- * this is our 'private' RAID1 bio.
- *
- * it contains information about what kind of IO operations were started
- * for this RAID1 operation, and about their status:
- */
-
-struct r1bio_s {
- atomic_t remaining; /* 'have we finished' count,
- * used from IRQ handlers
- */
- atomic_t behind_remaining; /* number of write-behind ios remaining
- * in this BehindIO request
- */
- sector_t sector;
- int sectors;
- unsigned long state;
- mddev_t *mddev;
- /*
- * original bio going to /dev/mdx
- */
- struct bio *master_bio;
- /*
- * if the IO is in READ direction, then this is where we read
- */
- int read_disk;
-
- struct list_head retry_list;
- struct bitmap_update *bitmap_update;
- /*
- * if the IO is in WRITE direction, then multiple bios are used.
- * We choose the number when they are allocated.
- */
- struct bio *bios[0];
- /* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/
-};
-
-/* when we get a read error on a read-only array, we redirect to another
- * device without failing the first device, or trying to over-write to
- * correct the read error. To keep track of bad blocks on a per-bio
- * level, we store IO_BLOCKED in the appropriate 'bios' pointer
- */
-#define IO_BLOCKED ((struct bio*)1)
-
-/* bits for r1bio.state */
-#define R1BIO_Uptodate 0
-#define R1BIO_IsSync 1
-#define R1BIO_Degraded 2
-#define R1BIO_BehindIO 3
-#define R1BIO_Barrier 4
-#define R1BIO_BarrierRetry 5
-/* For write-behind requests, we call bi_end_io when
- * the last non-write-behind device completes, providing
- * any write was successful. Otherwise we call when
- * any write-behind write succeeds, otherwise we call
- * with failure when last write completes (and all failed).
- * Record that bi_end_io was called with this flag...
- */
-#define R1BIO_Returned 6
-
-#endif
diff --git a/include/linux/raid/raid10.h b/include/linux/raid/raid10.h
deleted file mode 100644
index e9091cfeb28..00000000000
--- a/include/linux/raid/raid10.h
+++ /dev/null
@@ -1,123 +0,0 @@
-#ifndef _RAID10_H
-#define _RAID10_H
-
-#include <linux/raid/md.h>
-
-typedef struct mirror_info mirror_info_t;
-
-struct mirror_info {
- mdk_rdev_t *rdev;
- sector_t head_position;
-};
-
-typedef struct r10bio_s r10bio_t;
-
-struct r10_private_data_s {
- mddev_t *mddev;
- mirror_info_t *mirrors;
- int raid_disks;
- spinlock_t device_lock;
-
- /* geometry */
- int near_copies; /* number of copies layed out raid0 style */
- int far_copies; /* number of copies layed out
- * at large strides across drives
- */
- int far_offset; /* far_copies are offset by 1 stripe
- * instead of many
- */
- int copies; /* near_copies * far_copies.
- * must be <= raid_disks
- */
- sector_t stride; /* distance between far copies.
- * This is size / far_copies unless
- * far_offset, in which case it is
- * 1 stripe.
- */
-
- int chunk_shift; /* shift from chunks to sectors */
- sector_t chunk_mask;
-
- struct list_head retry_list;
- /* queue pending writes and submit them on unplug */
- struct bio_list pending_bio_list;
-
-
- spinlock_t resync_lock;
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
- sector_t next_resync;
- int fullsync; /* set to 1 if a full sync is needed,
- * (fresh device added).
- * Cleared when a sync completes.
- */
-
- wait_queue_head_t wait_barrier;
-
- mempool_t *r10bio_pool;
- mempool_t *r10buf_pool;
- struct page *tmppage;
-};
-
-typedef struct r10_private_data_s conf_t;
-
-/*
- * this is the only point in the RAID code where we violate
- * C type safety. mddev->private is an 'opaque' pointer.
- */
-#define mddev_to_conf(mddev) ((conf_t *) mddev->private)
-
-/*
- * this is our 'private' RAID10 bio.
- *
- * it contains information about what kind of IO operations were started
- * for this RAID10 operation, and about their status:
- */
-
-struct r10bio_s {
- atomic_t remaining; /* 'have we finished' count,
- * used from IRQ handlers
- */
- sector_t sector; /* virtual sector number */
- int sectors;
- unsigned long state;
- mddev_t *mddev;
- /*
- * original bio going to /dev/mdx
- */
- struct bio *master_bio;
- /*
- * if the IO is in READ direction, then this is where we read
- */
- int read_slot;
-
- struct list_head retry_list;
- /*
- * if the IO is in WRITE direction, then multiple bios are used,
- * one for each copy.
- * When resyncing we also use one for each copy.
- * When reconstructing, we use 2 bios, one for read, one for write.
- * We choose the number when they are allocated.
- */
- struct {
- struct bio *bio;
- sector_t addr;
- int devnum;
- } devs[0];
-};
-
-/* when we get a read error on a read-only array, we redirect to another
- * device without failing the first device, or trying to over-write to
- * correct the read error. To keep track of bad blocks on a per-bio
- * level, we store IO_BLOCKED in the appropriate 'bios' pointer
- */
-#define IO_BLOCKED ((struct bio*)1)
-
-/* bits for r10bio.state */
-#define R10BIO_Uptodate 0
-#define R10BIO_IsSync 1
-#define R10BIO_IsRecover 2
-#define R10BIO_Degraded 3
-#endif
diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h
deleted file mode 100644
index 3b267279245..00000000000
--- a/include/linux/raid/raid5.h
+++ /dev/null
@@ -1,402 +0,0 @@
-#ifndef _RAID5_H
-#define _RAID5_H
-
-#include <linux/raid/md.h>
-#include <linux/raid/xor.h>
-
-/*
- *
- * Each stripe contains one buffer per disc. Each buffer can be in
- * one of a number of states stored in "flags". Changes between
- * these states happen *almost* exclusively under a per-stripe
- * spinlock. Some very specific changes can happen in bi_end_io, and
- * these are not protected by the spin lock.
- *
- * The flag bits that are used to represent these states are:
- * R5_UPTODATE and R5_LOCKED
- *
- * State Empty == !UPTODATE, !LOCK
- * We have no data, and there is no active request
- * State Want == !UPTODATE, LOCK
- * A read request is being submitted for this block
- * State Dirty == UPTODATE, LOCK
- * Some new data is in this buffer, and it is being written out
- * State Clean == UPTODATE, !LOCK
- * We have valid data which is the same as on disc
- *
- * The possible state transitions are:
- *
- * Empty -> Want - on read or write to get old data for parity calc
- * Empty -> Dirty - on compute_parity to satisfy write/sync request.(RECONSTRUCT_WRITE)
- * Empty -> Clean - on compute_block when computing a block for failed drive
- * Want -> Empty - on failed read
- * Want -> Clean - on successful completion of read request
- * Dirty -> Clean - on successful completion of write request
- * Dirty -> Clean - on failed write
- * Clean -> Dirty - on compute_parity to satisfy write/sync (RECONSTRUCT or RMW)
- *
- * The Want->Empty, Want->Clean, Dirty->Clean, transitions
- * all happen in b_end_io at interrupt time.
- * Each sets the Uptodate bit before releasing the Lock bit.
- * This leaves one multi-stage transition:
- * Want->Dirty->Clean
- * This is safe because thinking that a Clean buffer is actually dirty
- * will at worst delay some action, and the stripe will be scheduled
- * for attention after the transition is complete.
- *
- * There is one possibility that is not covered by these states. That
- * is if one drive has failed and there is a spare being rebuilt. We
- * can't distinguish between a clean block that has been generated
- * from parity calculations, and a clean block that has been
- * successfully written to the spare ( or to parity when resyncing).
- * To distingush these states we have a stripe bit STRIPE_INSYNC that
- * is set whenever a write is scheduled to the spare, or to the parity
- * disc if there is no spare. A sync request clears this bit, and
- * when we find it set with no buffers locked, we know the sync is
- * complete.
- *
- * Buffers for the md device that arrive via make_request are attached
- * to the appropriate stripe in one of two lists linked on b_reqnext.
- * One list (bh_read) for read requests, one (bh_write) for write.
- * There should never be more than one buffer on the two lists
- * together, but we are not guaranteed of that so we allow for more.
- *
- * If a buffer is on the read list when the associated cache buffer is
- * Uptodate, the data is copied into the read buffer and it's b_end_io
- * routine is called. This may happen in the end_request routine only
- * if the buffer has just successfully been read. end_request should
- * remove the buffers from the list and then set the Uptodate bit on
- * the buffer. Other threads may do this only if they first check
- * that the Uptodate bit is set. Once they have checked that they may
- * take buffers off the read queue.
- *
- * When a buffer on the write list is committed for write it is copied
- * into the cache buffer, which is then marked dirty, and moved onto a
- * third list, the written list (bh_written). Once both the parity
- * block and the cached buffer are successfully written, any buffer on
- * a written list can be returned with b_end_io.
- *
- * The write list and read list both act as fifos. The read list is
- * protected by the device_lock. The write and written lists are
- * protected by the stripe lock. The device_lock, which can be
- * claimed while the stipe lock is held, is only for list
- * manipulations and will only be held for a very short time. It can
- * be claimed from interrupts.
- *
- *
- * Stripes in the stripe cache can be on one of two lists (or on
- * neither). The "inactive_list" contains stripes which are not
- * currently being used for any request. They can freely be reused
- * for another stripe. The "handle_list" contains stripes that need
- * to be handled in some way. Both of these are fifo queues. Each
- * stripe is also (potentially) linked to a hash bucket in the hash
- * table so that it can be found by sector number. Stripes that are
- * not hashed must be on the inactive_list, and will normally be at
- * the front. All stripes start life this way.
- *
- * The inactive_list, handle_list and hash bucket lists are all protected by the
- * device_lock.
- * - stripes on the inactive_list never have their stripe_lock held.
- * - stripes have a reference counter. If count==0, they are on a list.
- * - If a stripe might need handling, STRIPE_HANDLE is set.
- * - When refcount reaches zero, then if STRIPE_HANDLE it is put on
- * handle_list else inactive_list
- *
- * This, combined with the fact that STRIPE_HANDLE is only ever
- * cleared while a stripe has a non-zero count means that if the
- * refcount is 0 and STRIPE_HANDLE is set, then it is on the
- * handle_list and if recount is 0 and STRIPE_HANDLE is not set, then
- * the stripe is on inactive_list.
- *
- * The possible transitions are:
- * activate an unhashed/inactive stripe (get_active_stripe())
- * lockdev check-hash unlink-stripe cnt++ clean-stripe hash-stripe unlockdev
- * activate a hashed, possibly active stripe (get_active_stripe())
- * lockdev check-hash if(!cnt++)unlink-stripe unlockdev
- * attach a request to an active stripe (add_stripe_bh())
- * lockdev attach-buffer unlockdev
- * handle a stripe (handle_stripe())
- * lockstripe clrSTRIPE_HANDLE ...
- * (lockdev check-buffers unlockdev) ..
- * change-state ..
- * record io/ops needed unlockstripe schedule io/ops
- * release an active stripe (release_stripe())
- * lockdev if (!--cnt) { if STRIPE_HANDLE, add to handle_list else add to inactive-list } unlockdev
- *
- * The refcount counts each thread that have activated the stripe,
- * plus raid5d if it is handling it, plus one for each active request
- * on a cached buffer, and plus one if the stripe is undergoing stripe
- * operations.
- *
- * Stripe operations are performed outside the stripe lock,
- * the stripe operations are:
- * -copying data between the stripe cache and user application buffers
- * -computing blocks to save a disk access, or to recover a missing block
- * -updating the parity on a write operation (reconstruct write and
- * read-modify-write)
- * -checking parity correctness
- * -running i/o to disk
- * These operations are carried out by raid5_run_ops which uses the async_tx
- * api to (optionally) offload operations to dedicated hardware engines.
- * When requesting an operation handle_stripe sets the pending bit for the
- * operation and increments the count. raid5_run_ops is then run whenever
- * the count is non-zero.
- * There are some critical dependencies between the operations that prevent some
- * from being requested while another is in flight.
- * 1/ Parity check operations destroy the in cache version of the parity block,
- * so we prevent parity dependent operations like writes and compute_blocks
- * from starting while a check is in progress. Some dma engines can perform
- * the check without damaging the parity block, in these cases the parity
- * block is re-marked up to date (assuming the check was successful) and is
- * not re-read from disk.
- * 2/ When a write operation is requested we immediately lock the affected
- * blocks, and mark them as not up to date. This causes new read requests
- * to be held off, as well as parity checks and compute block operations.
- * 3/ Once a compute block operation has been requested handle_stripe treats
- * that block as if it is up to date. raid5_run_ops guaruntees that any
- * operation that is dependent on the compute block result is initiated after
- * the compute block completes.
- */
-
-/*
- * Operations state - intermediate states that are visible outside of sh->lock
- * In general _idle indicates nothing is running, _run indicates a data
- * processing operation is active, and _result means the data processing result
- * is stable and can be acted upon. For simple operations like biofill and
- * compute that only have an _idle and _run state they are indicated with
- * sh->state flags (STRIPE_BIOFILL_RUN and STRIPE_COMPUTE_RUN)
- */
-/**
- * enum check_states - handles syncing / repairing a stripe
- * @check_state_idle - check operations are quiesced
- * @check_state_run - check operation is running
- * @check_state_result - set outside lock when check result is valid
- * @check_state_compute_run - check failed and we are repairing
- * @check_state_compute_result - set outside lock when compute result is valid
- */
-enum check_states {
- check_state_idle = 0,
- check_state_run, /* parity check */
- check_state_check_result,
- check_state_compute_run, /* parity repair */
- check_state_compute_result,
-};
-
-/**
- * enum reconstruct_states - handles writing or expanding a stripe
- */
-enum reconstruct_states {
- reconstruct_state_idle = 0,
- reconstruct_state_prexor_drain_run, /* prexor-write */
- reconstruct_state_drain_run, /* write */
- reconstruct_state_run, /* expand */
- reconstruct_state_prexor_drain_result,
- reconstruct_state_drain_result,
- reconstruct_state_result,
-};
-
-struct stripe_head {
- struct hlist_node hash;
- struct list_head lru; /* inactive_list or handle_list */
- struct raid5_private_data *raid_conf;
- sector_t sector; /* sector of this row */
- int pd_idx; /* parity disk index */
- unsigned long state; /* state flags */
- atomic_t count; /* nr of active thread/requests */
- spinlock_t lock;
- int bm_seq; /* sequence number for bitmap flushes */
- int disks; /* disks in stripe */
- enum check_states check_state;
- enum reconstruct_states reconstruct_state;
- /* stripe_operations
- * @target - STRIPE_OP_COMPUTE_BLK target
- */
- struct stripe_operations {
- int target;
- u32 zero_sum_result;
- } ops;
- struct r5dev {
- struct bio req;
- struct bio_vec vec;
- struct page *page;
- struct bio *toread, *read, *towrite, *written;
- sector_t sector; /* sector of this page */
- unsigned long flags;
- } dev[1]; /* allocated with extra space depending of RAID geometry */
-};
-
-/* stripe_head_state - collects and tracks the dynamic state of a stripe_head
- * for handle_stripe. It is only valid under spin_lock(sh->lock);
- */
-struct stripe_head_state {
- int syncing, expanding, expanded;
- int locked, uptodate, to_read, to_write, failed, written;
- int to_fill, compute, req_compute, non_overwrite;
- int failed_num;
- unsigned long ops_request;
-};
-
-/* r6_state - extra state data only relevant to r6 */
-struct r6_state {
- int p_failed, q_failed, qd_idx, failed_num[2];
-};
-
-/* Flags */
-#define R5_UPTODATE 0 /* page contains current data */
-#define R5_LOCKED 1 /* IO has been submitted on "req" */
-#define R5_OVERWRITE 2 /* towrite covers whole page */
-/* and some that are internal to handle_stripe */
-#define R5_Insync 3 /* rdev && rdev->in_sync at start */
-#define R5_Wantread 4 /* want to schedule a read */
-#define R5_Wantwrite 5
-#define R5_Overlap 7 /* There is a pending overlapping request on this block */
-#define R5_ReadError 8 /* seen a read error here recently */
-#define R5_ReWrite 9 /* have tried to over-write the readerror */
-
-#define R5_Expanded 10 /* This block now has post-expand data */
-#define R5_Wantcompute 11 /* compute_block in progress treat as
- * uptodate
- */
-#define R5_Wantfill 12 /* dev->toread contains a bio that needs
- * filling
- */
-#define R5_Wantdrain 13 /* dev->towrite needs to be drained */
-/*
- * Write method
- */
-#define RECONSTRUCT_WRITE 1
-#define READ_MODIFY_WRITE 2
-/* not a write method, but a compute_parity mode */
-#define CHECK_PARITY 3
-
-/*
- * Stripe state
- */
-#define STRIPE_HANDLE 2
-#define STRIPE_SYNCING 3
-#define STRIPE_INSYNC 4
-#define STRIPE_PREREAD_ACTIVE 5
-#define STRIPE_DELAYED 6
-#define STRIPE_DEGRADED 7
-#define STRIPE_BIT_DELAY 8
-#define STRIPE_EXPANDING 9
-#define STRIPE_EXPAND_SOURCE 10
-#define STRIPE_EXPAND_READY 11
-#define STRIPE_IO_STARTED 12 /* do not count towards 'bypass_count' */
-#define STRIPE_FULL_WRITE 13 /* all blocks are set to be overwritten */
-#define STRIPE_BIOFILL_RUN 14
-#define STRIPE_COMPUTE_RUN 15
-/*
- * Operation request flags
- */
-#define STRIPE_OP_BIOFILL 0
-#define STRIPE_OP_COMPUTE_BLK 1
-#define STRIPE_OP_PREXOR 2
-#define STRIPE_OP_BIODRAIN 3
-#define STRIPE_OP_POSTXOR 4
-#define STRIPE_OP_CHECK 5
-
-/*
- * Plugging:
- *
- * To improve write throughput, we need to delay the handling of some
- * stripes until there has been a chance that several write requests
- * for the one stripe have all been collected.
- * In particular, any write request that would require pre-reading
- * is put on a "delayed" queue until there are no stripes currently
- * in a pre-read phase. Further, if the "delayed" queue is empty when
- * a stripe is put on it then we "plug" the queue and do not process it
- * until an unplug call is made. (the unplug_io_fn() is called).
- *
- * When preread is initiated on a stripe, we set PREREAD_ACTIVE and add
- * it to the count of prereading stripes.
- * When write is initiated, or the stripe refcnt == 0 (just in case) we
- * clear the PREREAD_ACTIVE flag and decrement the count
- * Whenever the 'handle' queue is empty and the device is not plugged, we
- * move any strips from delayed to handle and clear the DELAYED flag and set
- * PREREAD_ACTIVE.
- * In stripe_handle, if we find pre-reading is necessary, we do it if
- * PREREAD_ACTIVE is set, else we set DELAYED which will send it to the delayed queue.
- * HANDLE gets cleared if stripe_handle leave nothing locked.
- */
-
-
-struct disk_info {
- mdk_rdev_t *rdev;
-};
-
-struct raid5_private_data {
- struct hlist_head *stripe_hashtbl;
- mddev_t *mddev;
- struct disk_info *spare;
- int chunk_size, level, algorithm;
- int max_degraded;
- int raid_disks;
- int max_nr_stripes;
-
- /* used during an expand */
- sector_t expand_progress; /* MaxSector when no expand happening */
- sector_t expand_lo; /* from here up to expand_progress it out-of-bounds
- * as we haven't flushed the metadata yet
- */
- int previous_raid_disks;
-
- struct list_head handle_list; /* stripes needing handling */
- struct list_head hold_list; /* preread ready stripes */
- struct list_head delayed_list; /* stripes that have plugged requests */
- struct list_head bitmap_list; /* stripes delaying awaiting bitmap update */
- struct bio *retry_read_aligned; /* currently retrying aligned bios */
- struct bio *retry_read_aligned_list; /* aligned bios retry list */
- atomic_t preread_active_stripes; /* stripes with scheduled io */
- atomic_t active_aligned_reads;
- atomic_t pending_full_writes; /* full write backlog */
- int bypass_count; /* bypassed prereads */
- int bypass_threshold; /* preread nice */
- struct list_head *last_hold; /* detect hold_list promotions */
-
- atomic_t reshape_stripes; /* stripes with pending writes for reshape */
- /* unfortunately we need two cache names as we temporarily have
- * two caches.
- */
- int active_name;
- char cache_name[2][20];
- struct kmem_cache *slab_cache; /* for allocating stripes */
-
- int seq_flush, seq_write;
- int quiesce;
-
- int fullsync; /* set to 1 if a full sync is needed,
- * (fresh device added).
- * Cleared when a sync completes.
- */
-
- struct page *spare_page; /* Used when checking P/Q in raid6 */
-
- /*
- * Free stripes pool
- */
- atomic_t active_stripes;
- struct list_head inactive_list;
- wait_queue_head_t wait_for_stripe;
- wait_queue_head_t wait_for_overlap;
- int inactive_blocked; /* release of inactive stripes blocked,
- * waiting for 25% to be free
- */
- int pool_size; /* number of disks in stripeheads in pool */
- spinlock_t device_lock;
- struct disk_info *disks;
-};
-
-typedef struct raid5_private_data raid5_conf_t;
-
-#define mddev_to_conf(mddev) ((raid5_conf_t *) mddev->private)
-
-/*
- * Our supported algorithms
- */
-#define ALGORITHM_LEFT_ASYMMETRIC 0
-#define ALGORITHM_RIGHT_ASYMMETRIC 1
-#define ALGORITHM_LEFT_SYMMETRIC 2
-#define ALGORITHM_RIGHT_SYMMETRIC 3
-
-#endif
diff --git a/include/linux/raid/xor.h b/include/linux/raid/xor.h
index 3e120587ead..5a210959e3f 100644
--- a/include/linux/raid/xor.h
+++ b/include/linux/raid/xor.h
@@ -1,8 +1,6 @@
#ifndef _XOR_H
#define _XOR_H
-#include <linux/raid/md.h>
-
#define MAX_XOR_BLOCKS 4
extern void xor_blocks(unsigned int count, unsigned int bytes,
diff --git a/include/linux/rational.h b/include/linux/rational.h
new file mode 100644
index 00000000000..4f532fcd9ee
--- /dev/null
+++ b/include/linux/rational.h
@@ -0,0 +1,19 @@
+/*
+ * rational fractions
+ *
+ * Copyright (C) 2009 emlix GmbH, Oskar Schirmer <os@emlix.com>
+ *
+ * helper functions when coping with rational numbers,
+ * e.g. when calculating optimum numerator/denominator pairs for
+ * pll configuration taking into account restricted register size
+ */
+
+#ifndef _LINUX_RATIONAL_H
+#define _LINUX_RATIONAL_H
+
+void rational_best_approximation(
+ unsigned long given_numerator, unsigned long given_denominator,
+ unsigned long max_numerator, unsigned long max_denominator,
+ unsigned long *best_numerator, unsigned long *best_denominator);
+
+#endif /* _LINUX_RATIONAL_H */
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h
index 80044a4f3ab..bfd92e1e5d2 100644
--- a/include/linux/rcuclassic.h
+++ b/include/linux/rcuclassic.h
@@ -36,7 +36,6 @@
#include <linux/cache.h>
#include <linux/spinlock.h>
#include <linux/threads.h>
-#include <linux/percpu.h>
#include <linux/cpumask.h>
#include <linux/seqlock.h>
@@ -108,25 +107,14 @@ struct rcu_data {
struct rcu_head barrier;
};
-DECLARE_PER_CPU(struct rcu_data, rcu_data);
-DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
-
/*
* Increment the quiescent state counter.
* The counter is a bit degenerated: We do not need to know
* how many quiescent states passed, just if there was at least
* one since the start of the grace period. Thus just a flag.
*/
-static inline void rcu_qsctr_inc(int cpu)
-{
- struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
- rdp->passed_quiesc = 1;
-}
-static inline void rcu_bh_qsctr_inc(int cpu)
-{
- struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
- rdp->passed_quiesc = 1;
-}
+extern void rcu_qsctr_inc(int cpu);
+extern void rcu_bh_qsctr_inc(int cpu);
extern int rcu_pending(int cpu);
extern int rcu_needs_cpu(int cpu);
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index e649bd3f2c9..5710f43bbc9 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -198,6 +198,32 @@ static inline void list_splice_init_rcu(struct list_head *list,
at->prev = last;
}
+/**
+ * list_entry_rcu - get the struct for this entry
+ * @ptr: the &struct list_head pointer.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_struct within the struct.
+ *
+ * This primitive may safely run concurrently with the _rcu list-mutation
+ * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
+ */
+#define list_entry_rcu(ptr, type, member) \
+ container_of(rcu_dereference(ptr), type, member)
+
+/**
+ * list_first_entry_rcu - get the first element from a list
+ * @ptr: the list head to take the element from.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_struct within the struct.
+ *
+ * Note, that list is expected to be not empty.
+ *
+ * This primitive may safely run concurrently with the _rcu list-mutation
+ * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
+ */
+#define list_first_entry_rcu(ptr, type, member) \
+ list_entry_rcu((ptr)->next, type, member)
+
#define __list_for_each_rcu(pos, head) \
for (pos = rcu_dereference((head)->next); \
pos != (head); \
@@ -214,9 +240,9 @@ static inline void list_splice_init_rcu(struct list_head *list,
* as long as the traversal is guarded by rcu_read_lock().
*/
#define list_for_each_entry_rcu(pos, head, member) \
- for (pos = list_entry(rcu_dereference((head)->next), typeof(*pos), member); \
+ for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \
prefetch(pos->member.next), &pos->member != (head); \
- pos = list_entry(rcu_dereference(pos->member.next), typeof(*pos), member))
+ pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
/**
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 528343e6da5..15fbb3ca634 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -36,7 +36,6 @@
#include <linux/cache.h>
#include <linux/spinlock.h>
#include <linux/threads.h>
-#include <linux/percpu.h>
#include <linux/cpumask.h>
#include <linux/seqlock.h>
#include <linux/lockdep.h>
diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h
index 74304b4538d..fce522782ff 100644
--- a/include/linux/rcupreempt.h
+++ b/include/linux/rcupreempt.h
@@ -36,34 +36,19 @@
#include <linux/cache.h>
#include <linux/spinlock.h>
#include <linux/threads.h>
-#include <linux/percpu.h>
+#include <linux/smp.h>
#include <linux/cpumask.h>
#include <linux/seqlock.h>
-struct rcu_dyntick_sched {
- int dynticks;
- int dynticks_snap;
- int sched_qs;
- int sched_qs_snap;
- int sched_dynticks_snap;
-};
-
-DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched);
-
-static inline void rcu_qsctr_inc(int cpu)
-{
- struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
-
- rdssp->sched_qs++;
-}
-#define rcu_bh_qsctr_inc(cpu)
+extern void rcu_qsctr_inc(int cpu);
+static inline void rcu_bh_qsctr_inc(int cpu) { }
/*
* Someone might want to pass call_rcu_bh as a function pointer.
* So this needs to just be a rename and not a macro function.
* (no parentheses)
*/
-#define call_rcu_bh call_rcu
+#define call_rcu_bh call_rcu
/**
* call_rcu_sched - Queue RCU callback for invocation after sched grace period.
@@ -117,30 +102,12 @@ extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu);
struct softirq_action;
#ifdef CONFIG_NO_HZ
-
-static inline void rcu_enter_nohz(void)
-{
- static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1);
-
- smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
- __get_cpu_var(rcu_dyntick_sched).dynticks++;
- WARN_ON_RATELIMIT(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1, &rs);
-}
-
-static inline void rcu_exit_nohz(void)
-{
- static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1);
-
- __get_cpu_var(rcu_dyntick_sched).dynticks++;
- smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
- WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1),
- &rs);
-}
-
-#else /* CONFIG_NO_HZ */
-#define rcu_enter_nohz() do { } while (0)
-#define rcu_exit_nohz() do { } while (0)
-#endif /* CONFIG_NO_HZ */
+extern void rcu_enter_nohz(void);
+extern void rcu_exit_nohz(void);
+#else
+# define rcu_enter_nohz() do { } while (0)
+# define rcu_exit_nohz() do { } while (0)
+#endif
/*
* A context switch is a grace period for rcupreempt synchronize_rcu()
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index a722fb67bb2..5a5153806c4 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -33,7 +33,6 @@
#include <linux/cache.h>
#include <linux/spinlock.h>
#include <linux/threads.h>
-#include <linux/percpu.h>
#include <linux/cpumask.h>
#include <linux/seqlock.h>
@@ -162,9 +161,15 @@ struct rcu_data {
unsigned long offline_fqs; /* Kicked due to being offline. */
unsigned long resched_ipi; /* Sent a resched IPI. */
- /* 5) state to allow this CPU to force_quiescent_state on others */
+ /* 5) __rcu_pending() statistics. */
long n_rcu_pending; /* rcu_pending() calls since boot. */
- long n_rcu_pending_force_qs; /* when to force quiescent states. */
+ long n_rp_qs_pending;
+ long n_rp_cb_ready;
+ long n_rp_cpu_needs_gp;
+ long n_rp_gp_completed;
+ long n_rp_gp_started;
+ long n_rp_need_fqs;
+ long n_rp_need_nothing;
int cpu;
};
@@ -236,30 +241,8 @@ struct rcu_state {
#endif /* #ifdef CONFIG_NO_HZ */
};
-extern struct rcu_state rcu_state;
-DECLARE_PER_CPU(struct rcu_data, rcu_data);
-
-extern struct rcu_state rcu_bh_state;
-DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
-
-/*
- * Increment the quiescent state counter.
- * The counter is a bit degenerated: We do not need to know
- * how many quiescent states passed, just if there was at least
- * one since the start of the grace period. Thus just a flag.
- */
-static inline void rcu_qsctr_inc(int cpu)
-{
- struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
- rdp->passed_quiesc = 1;
- rdp->passed_quiesc_completed = rdp->completed;
-}
-static inline void rcu_bh_qsctr_inc(int cpu)
-{
- struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
- rdp->passed_quiesc = 1;
- rdp->passed_quiesc_completed = rdp->completed;
-}
+extern void rcu_qsctr_inc(int cpu);
+extern void rcu_bh_qsctr_inc(int cpu);
extern int rcu_pending(int cpu);
extern int rcu_needs_cpu(int cpu);
diff --git a/include/linux/rds.h b/include/linux/rds.h
new file mode 100644
index 00000000000..d91dc91f544
--- /dev/null
+++ b/include/linux/rds.h
@@ -0,0 +1,250 @@
+/*
+ * Copyright (c) 2008 Oracle. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _LINUX_RDS_H
+#define _LINUX_RDS_H
+
+#include <linux/types.h>
+
+/* These sparse annotated types shouldn't be in any user
+ * visible header file. We should clean this up rather
+ * than kludging around them. */
+#ifndef __KERNEL__
+#define __be16 u_int16_t
+#define __be32 u_int32_t
+#define __be64 u_int64_t
+#endif
+
+#define RDS_IB_ABI_VERSION 0x301
+
+/*
+ * setsockopt/getsockopt for SOL_RDS
+ */
+#define RDS_CANCEL_SENT_TO 1
+#define RDS_GET_MR 2
+#define RDS_FREE_MR 3
+/* deprecated: RDS_BARRIER 4 */
+#define RDS_RECVERR 5
+#define RDS_CONG_MONITOR 6
+
+/*
+ * Control message types for SOL_RDS.
+ *
+ * CMSG_RDMA_ARGS (sendmsg)
+ * Request a RDMA transfer to/from the specified
+ * memory ranges.
+ * The cmsg_data is a struct rds_rdma_args.
+ * RDS_CMSG_RDMA_DEST (recvmsg, sendmsg)
+ * Kernel informs application about intended
+ * source/destination of a RDMA transfer
+ * RDS_CMSG_RDMA_MAP (sendmsg)
+ * Application asks kernel to map the given
+ * memory range into a IB MR, and send the
+ * R_Key along in an RDS extension header.
+ * The cmsg_data is a struct rds_get_mr_args,
+ * the same as for the GET_MR setsockopt.
+ * RDS_CMSG_RDMA_STATUS (recvmsg)
+ * Returns the status of a completed RDMA operation.
+ */
+#define RDS_CMSG_RDMA_ARGS 1
+#define RDS_CMSG_RDMA_DEST 2
+#define RDS_CMSG_RDMA_MAP 3
+#define RDS_CMSG_RDMA_STATUS 4
+#define RDS_CMSG_CONG_UPDATE 5
+
+#define RDS_INFO_FIRST 10000
+#define RDS_INFO_COUNTERS 10000
+#define RDS_INFO_CONNECTIONS 10001
+/* 10002 aka RDS_INFO_FLOWS is deprecated */
+#define RDS_INFO_SEND_MESSAGES 10003
+#define RDS_INFO_RETRANS_MESSAGES 10004
+#define RDS_INFO_RECV_MESSAGES 10005
+#define RDS_INFO_SOCKETS 10006
+#define RDS_INFO_TCP_SOCKETS 10007
+#define RDS_INFO_IB_CONNECTIONS 10008
+#define RDS_INFO_CONNECTION_STATS 10009
+#define RDS_INFO_IWARP_CONNECTIONS 10010
+#define RDS_INFO_LAST 10010
+
+struct rds_info_counter {
+ u_int8_t name[32];
+ u_int64_t value;
+} __attribute__((packed));
+
+#define RDS_INFO_CONNECTION_FLAG_SENDING 0x01
+#define RDS_INFO_CONNECTION_FLAG_CONNECTING 0x02
+#define RDS_INFO_CONNECTION_FLAG_CONNECTED 0x04
+
+#define TRANSNAMSIZ 16
+
+struct rds_info_connection {
+ u_int64_t next_tx_seq;
+ u_int64_t next_rx_seq;
+ __be32 laddr;
+ __be32 faddr;
+ u_int8_t transport[TRANSNAMSIZ]; /* null term ascii */
+ u_int8_t flags;
+} __attribute__((packed));
+
+struct rds_info_flow {
+ __be32 laddr;
+ __be32 faddr;
+ u_int32_t bytes;
+ __be16 lport;
+ __be16 fport;
+} __attribute__((packed));
+
+#define RDS_INFO_MESSAGE_FLAG_ACK 0x01
+#define RDS_INFO_MESSAGE_FLAG_FAST_ACK 0x02
+
+struct rds_info_message {
+ u_int64_t seq;
+ u_int32_t len;
+ __be32 laddr;
+ __be32 faddr;
+ __be16 lport;
+ __be16 fport;
+ u_int8_t flags;
+} __attribute__((packed));
+
+struct rds_info_socket {
+ u_int32_t sndbuf;
+ __be32 bound_addr;
+ __be32 connected_addr;
+ __be16 bound_port;
+ __be16 connected_port;
+ u_int32_t rcvbuf;
+ u_int64_t inum;
+} __attribute__((packed));
+
+#define RDS_IB_GID_LEN 16
+struct rds_info_rdma_connection {
+ __be32 src_addr;
+ __be32 dst_addr;
+ uint8_t src_gid[RDS_IB_GID_LEN];
+ uint8_t dst_gid[RDS_IB_GID_LEN];
+
+ uint32_t max_send_wr;
+ uint32_t max_recv_wr;
+ uint32_t max_send_sge;
+ uint32_t rdma_mr_max;
+ uint32_t rdma_mr_size;
+};
+
+/*
+ * Congestion monitoring.
+ * Congestion control in RDS happens at the host connection
+ * level by exchanging a bitmap marking congested ports.
+ * By default, a process sleeping in poll() is always woken
+ * up when the congestion map is updated.
+ * With explicit monitoring, an application can have more
+ * fine-grained control.
+ * The application installs a 64bit mask value in the socket,
+ * where each bit corresponds to a group of ports.
+ * When a congestion update arrives, RDS checks the set of
+ * ports that are now uncongested against the list bit mask
+ * installed in the socket, and if they overlap, we queue a
+ * cong_notification on the socket.
+ *
+ * To install the congestion monitor bitmask, use RDS_CONG_MONITOR
+ * with the 64bit mask.
+ * Congestion updates are received via RDS_CMSG_CONG_UPDATE
+ * control messages.
+ *
+ * The correspondence between bits and ports is
+ * 1 << (portnum % 64)
+ */
+#define RDS_CONG_MONITOR_SIZE 64
+#define RDS_CONG_MONITOR_BIT(port) (((unsigned int) port) % RDS_CONG_MONITOR_SIZE)
+#define RDS_CONG_MONITOR_MASK(port) (1ULL << RDS_CONG_MONITOR_BIT(port))
+
+/*
+ * RDMA related types
+ */
+
+/*
+ * This encapsulates a remote memory location.
+ * In the current implementation, it contains the R_Key
+ * of the remote memory region, and the offset into it
+ * (so that the application does not have to worry about
+ * alignment).
+ */
+typedef u_int64_t rds_rdma_cookie_t;
+
+struct rds_iovec {
+ u_int64_t addr;
+ u_int64_t bytes;
+};
+
+struct rds_get_mr_args {
+ struct rds_iovec vec;
+ u_int64_t cookie_addr;
+ uint64_t flags;
+};
+
+struct rds_free_mr_args {
+ rds_rdma_cookie_t cookie;
+ u_int64_t flags;
+};
+
+struct rds_rdma_args {
+ rds_rdma_cookie_t cookie;
+ struct rds_iovec remote_vec;
+ u_int64_t local_vec_addr;
+ u_int64_t nr_local;
+ u_int64_t flags;
+ u_int64_t user_token;
+};
+
+struct rds_rdma_notify {
+ u_int64_t user_token;
+ int32_t status;
+};
+
+#define RDS_RDMA_SUCCESS 0
+#define RDS_RDMA_REMOTE_ERROR 1
+#define RDS_RDMA_CANCELED 2
+#define RDS_RDMA_DROPPED 3
+#define RDS_RDMA_OTHER_ERROR 4
+
+/*
+ * Common set of flags for all RDMA related structs
+ */
+#define RDS_RDMA_READWRITE 0x0001
+#define RDS_RDMA_FENCE 0x0002 /* use FENCE for immediate send */
+#define RDS_RDMA_INVALIDATE 0x0004 /* invalidate R_Key after freeing MR */
+#define RDS_RDMA_USE_ONCE 0x0008 /* free MR after use */
+#define RDS_RDMA_DONTWAIT 0x0010 /* Don't wait in SET_BARRIER */
+#define RDS_RDMA_NOTIFY_ME 0x0020 /* Notify when operation completes */
+
+#endif /* IB_RDS_H */
diff --git a/include/linux/regulator/bq24022.h b/include/linux/regulator/bq24022.h
index e84b0a9feda..a6d014005d4 100644
--- a/include/linux/regulator/bq24022.h
+++ b/include/linux/regulator/bq24022.h
@@ -10,6 +10,8 @@
*
*/
+struct regulator_init_data;
+
/**
* bq24022_mach_info - platform data for bq24022
* @gpio_nce: GPIO line connected to the nCE pin, used to enable / disable charging
@@ -18,4 +20,5 @@
struct bq24022_mach_info {
int gpio_nce;
int gpio_iset2;
+ struct regulator_init_data *init_data;
};
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 801bf77ff4e..277f4b964df 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -3,7 +3,7 @@
*
* Copyright (C) 2007, 2008 Wolfson Microelectronics PLC.
*
- * Author: Liam Girdwood <lg@opensource.wolfsonmicro.com>
+ * Author: Liam Girdwood <lrg@slimlogic.co.uk>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -88,6 +88,7 @@
* FAIL Regulator output has failed.
* OVER_TEMP Regulator over temp.
* FORCE_DISABLE Regulator shut down by software.
+ * VOLTAGE_CHANGE Regulator voltage changed.
*
* NOTE: These events can be OR'ed together when passed into handler.
*/
@@ -98,6 +99,7 @@
#define REGULATOR_EVENT_FAIL 0x08
#define REGULATOR_EVENT_OVER_TEMP 0x10
#define REGULATOR_EVENT_FORCE_DISABLE 0x20
+#define REGULATOR_EVENT_VOLTAGE_CHANGE 0x40
struct regulator;
@@ -140,6 +142,8 @@ int regulator_bulk_disable(int num_consumers,
void regulator_bulk_free(int num_consumers,
struct regulator_bulk_data *consumers);
+int regulator_count_voltages(struct regulator *regulator);
+int regulator_list_voltage(struct regulator *regulator, unsigned selector);
int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV);
int regulator_get_voltage(struct regulator *regulator);
int regulator_set_current_limit(struct regulator *regulator,
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 2dae05705f1..225f733e753 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -3,7 +3,7 @@
*
* Copyright (C) 2007, 2008 Wolfson Microelectronics PLC.
*
- * Author: Liam Girdwood <lg@opensource.wolfsonmicro.com>
+ * Author: Liam Girdwood <lrg@slimlogic.co.uk>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -21,25 +21,39 @@
struct regulator_dev;
struct regulator_init_data;
+enum regulator_status {
+ REGULATOR_STATUS_OFF,
+ REGULATOR_STATUS_ON,
+ REGULATOR_STATUS_ERROR,
+ /* fast/normal/idle/standby are flavors of "on" */
+ REGULATOR_STATUS_FAST,
+ REGULATOR_STATUS_NORMAL,
+ REGULATOR_STATUS_IDLE,
+ REGULATOR_STATUS_STANDBY,
+};
+
/**
* struct regulator_ops - regulator operations.
*
- * This struct describes regulator operations which can be implemented by
- * regulator chip drivers.
- *
- * @enable: Enable the regulator.
- * @disable: Disable the regulator.
+ * @enable: Configure the regulator as enabled.
+ * @disable: Configure the regulator as disabled.
* @is_enabled: Return 1 if the regulator is enabled, 0 otherwise.
*
* @set_voltage: Set the voltage for the regulator within the range specified.
* The driver should select the voltage closest to min_uV.
* @get_voltage: Return the currently configured voltage for the regulator.
+ * @list_voltage: Return one of the supported voltages, in microvolts; zero
+ * if the selector indicates a voltage that is unusable on this system;
+ * or negative errno. Selectors range from zero to one less than
+ * regulator_desc.n_voltages. Voltages may be reported in any order.
*
* @set_current_limit: Configure a limit for a current-limited regulator.
- * @get_current_limit: Get the limit for a current-limited regulator.
+ * @get_current_limit: Get the configured limit for a current-limited regulator.
*
- * @set_mode: Set the operating mode for the regulator.
- * @get_mode: Get the current operating mode for the regulator.
+ * @set_mode: Set the configured operating mode for the regulator.
+ * @get_mode: Get the configured operating mode for the regulator.
+ * @get_status: Return actual (not as-configured) status of regulator, as a
+ * REGULATOR_STATUS value (or negative errno)
* @get_optimum_mode: Get the most efficient operating mode for the regulator
* when running with the specified parameters.
*
@@ -51,9 +65,15 @@ struct regulator_init_data;
* suspended.
* @set_suspend_mode: Set the operating mode for the regulator when the
* system is suspended.
+ *
+ * This struct describes regulator operations which can be implemented by
+ * regulator chip drivers.
*/
struct regulator_ops {
+ /* enumerate supported voltages */
+ int (*list_voltage) (struct regulator_dev *, unsigned selector);
+
/* get/set regulator voltage */
int (*set_voltage) (struct regulator_dev *, int min_uV, int max_uV);
int (*get_voltage) (struct regulator_dev *);
@@ -72,6 +92,13 @@ struct regulator_ops {
int (*set_mode) (struct regulator_dev *, unsigned int mode);
unsigned int (*get_mode) (struct regulator_dev *);
+ /* report regulator status ... most other accessors report
+ * control inputs, this reports results of combining inputs
+ * from Linux (and other sources) with the actual load.
+ * returns REGULATOR_STATUS_* or negative errno.
+ */
+ int (*get_status)(struct regulator_dev *);
+
/* get most efficient regulator operating mode for load */
unsigned int (*get_optimum_mode) (struct regulator_dev *, int input_uV,
int output_uV, int load_uA);
@@ -106,6 +133,7 @@ enum regulator_type {
*
* @name: Identifying name for the regulator.
* @id: Numerical identifier for the regulator.
+ * @n_voltages: Number of selectors available for ops.list_voltage().
* @ops: Regulator operations table.
* @irq: Interrupt number for the regulator.
* @type: Indicates if the regulator is a voltage or current regulator.
@@ -114,14 +142,48 @@ enum regulator_type {
struct regulator_desc {
const char *name;
int id;
+ unsigned n_voltages;
struct regulator_ops *ops;
int irq;
enum regulator_type type;
struct module *owner;
};
+/*
+ * struct regulator_dev
+ *
+ * Voltage / Current regulator class device. One for each
+ * regulator.
+ *
+ * This should *not* be used directly by anything except the regulator
+ * core and notification injection (which should take the mutex and do
+ * no other direct access).
+ */
+struct regulator_dev {
+ struct regulator_desc *desc;
+ int use_count;
+
+ /* lists we belong to */
+ struct list_head list; /* list of all regulators */
+ struct list_head slist; /* list of supplied regulators */
+
+ /* lists we own */
+ struct list_head consumer_list; /* consumers we supply */
+ struct list_head supply_list; /* regulators we supply */
+
+ struct blocking_notifier_head notifier;
+ struct mutex mutex; /* consumer lock */
+ struct module *owner;
+ struct device dev;
+ struct regulation_constraints *constraints;
+ struct regulator_dev *supply; /* for tree */
+
+ void *reg_data; /* regulator_dev data */
+};
+
struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
- struct device *dev, void *driver_data);
+ struct device *dev, struct regulator_init_data *init_data,
+ void *driver_data);
void regulator_unregister(struct regulator_dev *rdev);
int regulator_notifier_call_chain(struct regulator_dev *rdev,
diff --git a/include/linux/regulator/fixed.h b/include/linux/regulator/fixed.h
index 1387a5d2190..91b4da31f1b 100644
--- a/include/linux/regulator/fixed.h
+++ b/include/linux/regulator/fixed.h
@@ -14,9 +14,12 @@
#ifndef __REGULATOR_FIXED_H
#define __REGULATOR_FIXED_H
+struct regulator_init_data;
+
struct fixed_voltage_config {
const char *supply_name;
int microvolts;
+ struct regulator_init_data *init_data;
};
#endif
diff --git a/include/linux/regulator/lp3971.h b/include/linux/regulator/lp3971.h
new file mode 100644
index 00000000000..61401649fe7
--- /dev/null
+++ b/include/linux/regulator/lp3971.h
@@ -0,0 +1,51 @@
+/*
+ * National Semiconductors LP3971 PMIC chip client interface
+ *
+ * Copyright (C) 2009 Samsung Electronics
+ * Author: Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * Based on wm8400.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __LINUX_REGULATOR_LP3971_H
+#define __LINUX_REGULATOR_LP3971_H
+
+#include <linux/regulator/machine.h>
+
+#define LP3971_LDO1 0
+#define LP3971_LDO2 1
+#define LP3971_LDO3 2
+#define LP3971_LDO4 3
+#define LP3971_LDO5 4
+
+#define LP3971_DCDC1 5
+#define LP3971_DCDC2 6
+#define LP3971_DCDC3 7
+
+#define LP3971_NUM_REGULATORS 8
+
+struct lp3971_regulator_subdev {
+ int id;
+ struct regulator_init_data *initdata;
+};
+
+struct lp3971_platform_data {
+ int num_regulators;
+ struct lp3971_regulator_subdev *regulators;
+};
+
+#endif
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 3794773b23d..bac64fa390f 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -3,7 +3,7 @@
*
* Copyright (C) 2007, 2008 Wolfson Microelectronics PLC.
*
- * Author: Liam Girdwood <lg@opensource.wolfsonmicro.com>
+ * Author: Liam Girdwood <lrg@slimlogic.co.uk>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -73,7 +73,9 @@ struct regulator_state {
*
* @always_on: Set if the regulator should never be disabled.
* @boot_on: Set if the regulator is enabled when the system is initially
- * started.
+ * started. If the regulator is not enabled by the hardware or
+ * bootloader then it will be enabled when the constraints are
+ * applied.
* @apply_uV: Apply the voltage constraint when initialising.
*
* @input_uV: Input voltage for regulator when supplied by another regulator.
@@ -83,6 +85,7 @@ struct regulator_state {
* @state_standby: State for regulator when system is suspended in standby
* mode.
* @initial_state: Suspend state to set by default.
+ * @initial_mode: Mode to set at startup.
*/
struct regulation_constraints {
@@ -111,6 +114,9 @@ struct regulation_constraints {
struct regulator_state state_standby;
suspend_state_t initial_state; /* suspend state to set at init */
+ /* mode to set on startup */
+ unsigned int initial_mode;
+
/* constriant flags */
unsigned always_on:1; /* regulator never off when system is on */
unsigned boot_on:1; /* bootloader/firmware enabled regulator */
@@ -160,4 +166,6 @@ struct regulator_init_data {
int regulator_suspend_prepare(suspend_state_t state);
+void regulator_has_full_constraints(void);
+
#endif
diff --git a/include/linux/regulator/max1586.h b/include/linux/regulator/max1586.h
new file mode 100644
index 00000000000..44563192bf1
--- /dev/null
+++ b/include/linux/regulator/max1586.h
@@ -0,0 +1,63 @@
+/*
+ * max1586.h -- Voltage regulation for the Maxim 1586
+ *
+ * Copyright (C) 2008 Robert Jarzmik
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef REGULATOR_MAX1586
+#define REGULATOR_MAX1586
+
+#include <linux/regulator/machine.h>
+
+#define MAX1586_V3 0
+#define MAX1586_V6 1
+
+/* precalculated values for v3_gain */
+#define MAX1586_GAIN_NO_R24 1000000 /* 700000 .. 1475000 mV */
+#define MAX1586_GAIN_R24_3k32 1051098 /* 735768 .. 1550369 mV */
+#define MAX1586_GAIN_R24_5k11 1078648 /* 755053 .. 1591005 mV */
+#define MAX1586_GAIN_R24_7k5 1115432 /* 780802 .. 1645262 mV */
+
+/**
+ * max1586_subdev_data - regulator data
+ * @id: regulator Id (either MAX1586_V3 or MAX1586_V6)
+ * @name: regulator cute name (example for V3: "vcc_core")
+ * @platform_data: regulator init data (contraints, supplies, ...)
+ */
+struct max1586_subdev_data {
+ int id;
+ char *name;
+ struct regulator_init_data *platform_data;
+};
+
+/**
+ * max1586_platform_data - platform data for max1586
+ * @num_subdevs: number of regultors used (may be 1 or 2)
+ * @subdevs: regulator used
+ * At most, there will be a regulator for V3 and one for V6 voltages.
+ * @v3_gain: gain on the V3 voltage output multiplied by 1e6.
+ * This can be calculated as ((1 + R24/R25 + R24/185.5kOhm) * 1e6)
+ * for an external resistor configuration as described in the
+ * data sheet (R25=100kOhm).
+ */
+struct max1586_platform_data {
+ int num_subdevs;
+ struct max1586_subdev_data *subdevs;
+ int v3_gain;
+};
+
+#endif
diff --git a/include/linux/regulator/userspace-consumer.h b/include/linux/regulator/userspace-consumer.h
new file mode 100644
index 00000000000..b4554ce9d4b
--- /dev/null
+++ b/include/linux/regulator/userspace-consumer.h
@@ -0,0 +1,25 @@
+#ifndef __REGULATOR_PLATFORM_CONSUMER_H_
+#define __REGULATOR_PLATFORM_CONSUMER_H_
+
+struct regulator_consumer_supply;
+
+/**
+ * struct regulator_userspace_consumer_data - line consumer
+ * initialisation data.
+ *
+ * @name: Name for the consumer line
+ * @num_supplies: Number of supplies feeding the line
+ * @supplies: Supplies configuration.
+ * @init_on: Set if the regulators supplying the line should be
+ * enabled during initialisation
+ */
+struct regulator_userspace_consumer_data {
+ const char *name;
+
+ int num_supplies;
+ struct regulator_bulk_data *supplies;
+
+ bool init_on;
+};
+
+#endif /* __REGULATOR_PLATFORM_CONSUMER_H_ */
diff --git a/include/linux/reiserfs_acl.h b/include/linux/reiserfs_acl.h
index fe00f781a62..8cc65757e47 100644
--- a/include/linux/reiserfs_acl.h
+++ b/include/linux/reiserfs_acl.h
@@ -49,13 +49,12 @@ static inline int reiserfs_acl_count(size_t size)
#ifdef CONFIG_REISERFS_FS_POSIX_ACL
struct posix_acl *reiserfs_get_acl(struct inode *inode, int type);
int reiserfs_acl_chmod(struct inode *inode);
-int reiserfs_inherit_default_acl(struct inode *dir, struct dentry *dentry,
+int reiserfs_inherit_default_acl(struct reiserfs_transaction_handle *th,
+ struct inode *dir, struct dentry *dentry,
struct inode *inode);
int reiserfs_cache_default_acl(struct inode *dir);
-extern int reiserfs_xattr_posix_acl_init(void) __init;
-extern int reiserfs_xattr_posix_acl_exit(void);
-extern struct reiserfs_xattr_handler posix_acl_default_handler;
-extern struct reiserfs_xattr_handler posix_acl_access_handler;
+extern struct xattr_handler reiserfs_posix_acl_default_handler;
+extern struct xattr_handler reiserfs_posix_acl_access_handler;
static inline void reiserfs_init_acl_access(struct inode *inode)
{
@@ -75,23 +74,14 @@ static inline struct posix_acl *reiserfs_get_acl(struct inode *inode, int type)
return NULL;
}
-static inline int reiserfs_xattr_posix_acl_init(void)
-{
- return 0;
-}
-
-static inline int reiserfs_xattr_posix_acl_exit(void)
-{
- return 0;
-}
-
static inline int reiserfs_acl_chmod(struct inode *inode)
{
return 0;
}
static inline int
-reiserfs_inherit_default_acl(const struct inode *dir, struct dentry *dentry,
+reiserfs_inherit_default_acl(struct reiserfs_transaction_handle *th,
+ const struct inode *dir, struct dentry *dentry,
struct inode *inode)
{
return 0;
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
index bc5114d35e9..dd31e7bae35 100644
--- a/include/linux/reiserfs_fs.h
+++ b/include/linux/reiserfs_fs.h
@@ -28,8 +28,6 @@
#include <linux/reiserfs_fs_sb.h>
#endif
-struct fid;
-
/*
* include/linux/reiser_fs.h
*
@@ -37,6 +35,31 @@ struct fid;
*
*/
+/* ioctl's command */
+#define REISERFS_IOC_UNPACK _IOW(0xCD,1,long)
+/* define following flags to be the same as in ext2, so that chattr(1),
+ lsattr(1) will work with us. */
+#define REISERFS_IOC_GETFLAGS FS_IOC_GETFLAGS
+#define REISERFS_IOC_SETFLAGS FS_IOC_SETFLAGS
+#define REISERFS_IOC_GETVERSION FS_IOC_GETVERSION
+#define REISERFS_IOC_SETVERSION FS_IOC_SETVERSION
+
+#ifdef __KERNEL__
+/* the 32 bit compat definitions with int argument */
+#define REISERFS_IOC32_UNPACK _IOW(0xCD, 1, int)
+#define REISERFS_IOC32_GETFLAGS FS_IOC32_GETFLAGS
+#define REISERFS_IOC32_SETFLAGS FS_IOC32_SETFLAGS
+#define REISERFS_IOC32_GETVERSION FS_IOC32_GETVERSION
+#define REISERFS_IOC32_SETVERSION FS_IOC32_SETVERSION
+
+/* Locking primitives */
+/* Right now we are still falling back to (un)lock_kernel, but eventually that
+ would evolve into real per-fs locks */
+#define reiserfs_write_lock( sb ) lock_kernel()
+#define reiserfs_write_unlock( sb ) unlock_kernel()
+
+struct fid;
+
/* in reading the #defines, it may help to understand that they employ
the following abbreviations:
@@ -79,15 +102,21 @@ struct fid;
*/
#define REISERFS_DEBUG_CODE 5 /* extra messages to help find/debug errors */
-void reiserfs_warning(struct super_block *s, const char *fmt, ...);
+void __reiserfs_warning(struct super_block *s, const char *id,
+ const char *func, const char *fmt, ...);
+#define reiserfs_warning(s, id, fmt, args...) \
+ __reiserfs_warning(s, id, __func__, fmt, ##args)
/* assertions handling */
/** always check a condition and panic if it's false. */
-#define __RASSERT( cond, scond, format, args... ) \
-if( !( cond ) ) \
- reiserfs_panic( NULL, "reiserfs[%i]: assertion " scond " failed at " \
- __FILE__ ":%i:%s: " format "\n", \
- in_interrupt() ? -1 : task_pid_nr(current), __LINE__ , __func__ , ##args )
+#define __RASSERT(cond, scond, format, args...) \
+do { \
+ if (!(cond)) \
+ reiserfs_panic(NULL, "assertion failure", "(" #cond ") at " \
+ __FILE__ ":%i:%s: " format "\n", \
+ in_interrupt() ? -1 : task_pid_nr(current), \
+ __LINE__, __func__ , ##args); \
+} while (0)
#define RASSERT(cond, format, args...) __RASSERT(cond, #cond, format, ##args)
@@ -171,7 +200,11 @@ struct reiserfs_super_block {
__le32 s_flags; /* Right now used only by inode-attributes, if enabled */
unsigned char s_uuid[16]; /* filesystem unique identifier */
unsigned char s_label[16]; /* filesystem volume label */
- char s_unused[88]; /* zero filled by mkreiserfs and
+ __le16 s_mnt_count; /* Count of mounts since last fsck */
+ __le16 s_max_mnt_count; /* Maximum mounts before check */
+ __le32 s_lastcheck; /* Timestamp of last fsck */
+ __le32 s_check_interval; /* Interval between checks */
+ char s_unused[76]; /* zero filled by mkreiserfs and
* reiserfs_convert_objectid_map_v1()
* so any additions must be updated
* there as well. */
@@ -553,10 +586,8 @@ static inline int uniqueness2type(__u32 uniqueness)
return TYPE_DIRECT;
case V1_DIRENTRY_UNIQUENESS:
return TYPE_DIRENTRY;
- default:
- reiserfs_warning(NULL, "vs-500: unknown uniqueness %d",
- uniqueness);
case V1_ANY_UNIQUENESS:
+ default:
return TYPE_ANY;
}
}
@@ -573,9 +604,8 @@ static inline __u32 type2uniqueness(int type)
return V1_DIRECT_UNIQUENESS;
case TYPE_DIRENTRY:
return V1_DIRENTRY_UNIQUENESS;
- default:
- reiserfs_warning(NULL, "vs-501: unknown type %d", type);
case TYPE_ANY:
+ default:
return V1_ANY_UNIQUENESS;
}
}
@@ -630,23 +660,54 @@ static inline void set_le_key_k_type(int version, struct reiserfs_key *key,
cpu_to_le32(type2uniqueness(type)))
: (void)(set_offset_v2_k_type(&(key->u.k_offset_v2), type));
}
+
static inline void set_le_ih_k_type(struct item_head *ih, int type)
{
set_le_key_k_type(ih_version(ih), &(ih->ih_key), type);
}
-#define is_direntry_le_key(version,key) (le_key_k_type (version, key) == TYPE_DIRENTRY)
-#define is_direct_le_key(version,key) (le_key_k_type (version, key) == TYPE_DIRECT)
-#define is_indirect_le_key(version,key) (le_key_k_type (version, key) == TYPE_INDIRECT)
-#define is_statdata_le_key(version,key) (le_key_k_type (version, key) == TYPE_STAT_DATA)
+static inline int is_direntry_le_key(int version, struct reiserfs_key *key)
+{
+ return le_key_k_type(version, key) == TYPE_DIRENTRY;
+}
+
+static inline int is_direct_le_key(int version, struct reiserfs_key *key)
+{
+ return le_key_k_type(version, key) == TYPE_DIRECT;
+}
+
+static inline int is_indirect_le_key(int version, struct reiserfs_key *key)
+{
+ return le_key_k_type(version, key) == TYPE_INDIRECT;
+}
+
+static inline int is_statdata_le_key(int version, struct reiserfs_key *key)
+{
+ return le_key_k_type(version, key) == TYPE_STAT_DATA;
+}
//
// item header has version.
//
-#define is_direntry_le_ih(ih) is_direntry_le_key (ih_version (ih), &((ih)->ih_key))
-#define is_direct_le_ih(ih) is_direct_le_key (ih_version (ih), &((ih)->ih_key))
-#define is_indirect_le_ih(ih) is_indirect_le_key (ih_version(ih), &((ih)->ih_key))
-#define is_statdata_le_ih(ih) is_statdata_le_key (ih_version (ih), &((ih)->ih_key))
+static inline int is_direntry_le_ih(struct item_head *ih)
+{
+ return is_direntry_le_key(ih_version(ih), &ih->ih_key);
+}
+
+static inline int is_direct_le_ih(struct item_head *ih)
+{
+ return is_direct_le_key(ih_version(ih), &ih->ih_key);
+}
+
+static inline int is_indirect_le_ih(struct item_head *ih)
+{
+ return is_indirect_le_key(ih_version(ih), &ih->ih_key);
+}
+
+static inline int is_statdata_le_ih(struct item_head *ih)
+{
+ return is_statdata_le_key(ih_version(ih), &ih->ih_key);
+}
//
// key is pointer to cpu key, result is cpu
@@ -687,9 +748,9 @@ static inline void cpu_key_k_offset_dec(struct cpu_key *key)
#define is_indirect_cpu_ih(ih) (is_indirect_cpu_key (&((ih)->ih_key)))
#define is_statdata_cpu_ih(ih) (is_statdata_cpu_key (&((ih)->ih_key)))
-#define I_K_KEY_IN_ITEM(p_s_ih, p_s_key, n_blocksize) \
- ( ! COMP_SHORT_KEYS(p_s_ih, p_s_key) && \
- I_OFF_BYTE_IN_ITEM(p_s_ih, k_offset (p_s_key), n_blocksize) )
+#define I_K_KEY_IN_ITEM(ih, key, n_blocksize) \
+ (!COMP_SHORT_KEYS(ih, key) && \
+ I_OFF_BYTE_IN_ITEM(ih, k_offset(key), n_blocksize))
/* maximal length of item */
#define MAX_ITEM_LEN(block_size) (block_size - BLKH_SIZE - IH_SIZE)
@@ -698,6 +759,7 @@ static inline void cpu_key_k_offset_dec(struct cpu_key *key)
/* object identifier for root dir */
#define REISERFS_ROOT_OBJECTID 2
#define REISERFS_ROOT_PARENT_OBJECTID 1
+
extern struct reiserfs_key root_key;
/*
@@ -744,25 +806,25 @@ struct block_head {
#define DISK_LEAF_NODE_LEVEL 1 /* Leaf node level. */
/* Given the buffer head of a formatted node, resolve to the block head of that node. */
-#define B_BLK_HEAD(p_s_bh) ((struct block_head *)((p_s_bh)->b_data))
+#define B_BLK_HEAD(bh) ((struct block_head *)((bh)->b_data))
/* Number of items that are in buffer. */
-#define B_NR_ITEMS(p_s_bh) (blkh_nr_item(B_BLK_HEAD(p_s_bh)))
-#define B_LEVEL(p_s_bh) (blkh_level(B_BLK_HEAD(p_s_bh)))
-#define B_FREE_SPACE(p_s_bh) (blkh_free_space(B_BLK_HEAD(p_s_bh)))
+#define B_NR_ITEMS(bh) (blkh_nr_item(B_BLK_HEAD(bh)))
+#define B_LEVEL(bh) (blkh_level(B_BLK_HEAD(bh)))
+#define B_FREE_SPACE(bh) (blkh_free_space(B_BLK_HEAD(bh)))
-#define PUT_B_NR_ITEMS(p_s_bh,val) do { set_blkh_nr_item(B_BLK_HEAD(p_s_bh),val); } while (0)
-#define PUT_B_LEVEL(p_s_bh,val) do { set_blkh_level(B_BLK_HEAD(p_s_bh),val); } while (0)
-#define PUT_B_FREE_SPACE(p_s_bh,val) do { set_blkh_free_space(B_BLK_HEAD(p_s_bh),val); } while (0)
+#define PUT_B_NR_ITEMS(bh, val) do { set_blkh_nr_item(B_BLK_HEAD(bh), val); } while (0)
+#define PUT_B_LEVEL(bh, val) do { set_blkh_level(B_BLK_HEAD(bh), val); } while (0)
+#define PUT_B_FREE_SPACE(bh, val) do { set_blkh_free_space(B_BLK_HEAD(bh), val); } while (0)
/* Get right delimiting key. -- little endian */
-#define B_PRIGHT_DELIM_KEY(p_s_bh) (&(blk_right_delim_key(B_BLK_HEAD(p_s_bh))))
+#define B_PRIGHT_DELIM_KEY(bh) (&(blk_right_delim_key(B_BLK_HEAD(bh))))
/* Does the buffer contain a disk leaf. */
-#define B_IS_ITEMS_LEVEL(p_s_bh) (B_LEVEL(p_s_bh) == DISK_LEAF_NODE_LEVEL)
+#define B_IS_ITEMS_LEVEL(bh) (B_LEVEL(bh) == DISK_LEAF_NODE_LEVEL)
/* Does the buffer contain a disk internal node */
-#define B_IS_KEYS_LEVEL(p_s_bh) (B_LEVEL(p_s_bh) > DISK_LEAF_NODE_LEVEL \
- && B_LEVEL(p_s_bh) <= MAX_HEIGHT)
+#define B_IS_KEYS_LEVEL(bh) (B_LEVEL(bh) > DISK_LEAF_NODE_LEVEL \
+ && B_LEVEL(bh) <= MAX_HEIGHT)
/***************************************************************************/
/* STAT DATA */
@@ -1112,12 +1174,13 @@ struct disk_child {
#define put_dc_size(dc_p, val) do { (dc_p)->dc_size = cpu_to_le16(val); } while(0)
/* Get disk child by buffer header and position in the tree node. */
-#define B_N_CHILD(p_s_bh,n_pos) ((struct disk_child *)\
-((p_s_bh)->b_data+BLKH_SIZE+B_NR_ITEMS(p_s_bh)*KEY_SIZE+DC_SIZE*(n_pos)))
+#define B_N_CHILD(bh, n_pos) ((struct disk_child *)\
+((bh)->b_data + BLKH_SIZE + B_NR_ITEMS(bh) * KEY_SIZE + DC_SIZE * (n_pos)))
/* Get disk child number by buffer header and position in the tree node. */
-#define B_N_CHILD_NUM(p_s_bh,n_pos) (dc_block_number(B_N_CHILD(p_s_bh,n_pos)))
-#define PUT_B_N_CHILD_NUM(p_s_bh,n_pos, val) (put_dc_block_number(B_N_CHILD(p_s_bh,n_pos), val ))
+#define B_N_CHILD_NUM(bh, n_pos) (dc_block_number(B_N_CHILD(bh, n_pos)))
+#define PUT_B_N_CHILD_NUM(bh, n_pos, val) \
+ (put_dc_block_number(B_N_CHILD(bh, n_pos), val))
/* maximal value of field child_size in structure disk_child */
/* child size is the combined size of all items and their headers */
@@ -1188,33 +1251,33 @@ struct treepath {
struct treepath var = {.path_length = ILLEGAL_PATH_ELEMENT_OFFSET, .reada = 0,}
/* Get path element by path and path position. */
-#define PATH_OFFSET_PELEMENT(p_s_path,n_offset) ((p_s_path)->path_elements +(n_offset))
+#define PATH_OFFSET_PELEMENT(path, n_offset) ((path)->path_elements + (n_offset))
/* Get buffer header at the path by path and path position. */
-#define PATH_OFFSET_PBUFFER(p_s_path,n_offset) (PATH_OFFSET_PELEMENT(p_s_path,n_offset)->pe_buffer)
+#define PATH_OFFSET_PBUFFER(path, n_offset) (PATH_OFFSET_PELEMENT(path, n_offset)->pe_buffer)
/* Get position in the element at the path by path and path position. */
-#define PATH_OFFSET_POSITION(p_s_path,n_offset) (PATH_OFFSET_PELEMENT(p_s_path,n_offset)->pe_position)
+#define PATH_OFFSET_POSITION(path, n_offset) (PATH_OFFSET_PELEMENT(path, n_offset)->pe_position)
-#define PATH_PLAST_BUFFER(p_s_path) (PATH_OFFSET_PBUFFER((p_s_path), (p_s_path)->path_length))
+#define PATH_PLAST_BUFFER(path) (PATH_OFFSET_PBUFFER((path), (path)->path_length))
/* you know, to the person who didn't
write this the macro name does not
at first suggest what it does.
Maybe POSITION_FROM_PATH_END? Or
maybe we should just focus on
dumping paths... -Hans */
-#define PATH_LAST_POSITION(p_s_path) (PATH_OFFSET_POSITION((p_s_path), (p_s_path)->path_length))
+#define PATH_LAST_POSITION(path) (PATH_OFFSET_POSITION((path), (path)->path_length))
-#define PATH_PITEM_HEAD(p_s_path) B_N_PITEM_HEAD(PATH_PLAST_BUFFER(p_s_path),PATH_LAST_POSITION(p_s_path))
+#define PATH_PITEM_HEAD(path) B_N_PITEM_HEAD(PATH_PLAST_BUFFER(path), PATH_LAST_POSITION(path))
/* in do_balance leaf has h == 0 in contrast with path structure,
where root has level == 0. That is why we need these defines */
-#define PATH_H_PBUFFER(p_s_path, h) PATH_OFFSET_PBUFFER (p_s_path, p_s_path->path_length - (h)) /* tb->S[h] */
+#define PATH_H_PBUFFER(path, h) PATH_OFFSET_PBUFFER (path, path->path_length - (h)) /* tb->S[h] */
#define PATH_H_PPARENT(path, h) PATH_H_PBUFFER (path, (h) + 1) /* tb->F[h] or tb->S[0]->b_parent */
#define PATH_H_POSITION(path, h) PATH_OFFSET_POSITION (path, path->path_length - (h))
#define PATH_H_B_ITEM_ORDER(path, h) PATH_H_POSITION(path, h + 1) /* tb->S[h]->b_item_order */
-#define PATH_H_PATH_OFFSET(p_s_path, n_h) ((p_s_path)->path_length - (n_h))
+#define PATH_H_PATH_OFFSET(path, n_h) ((path)->path_length - (n_h))
#define get_last_bh(path) PATH_PLAST_BUFFER(path)
#define get_ih(path) PATH_PITEM_HEAD(path)
@@ -1444,6 +1507,16 @@ struct buffer_info {
int bi_position;
};
+static inline struct super_block *sb_from_tb(struct tree_balance *tb)
+{
+ return tb ? tb->tb_sb : NULL;
+}
+
+static inline struct super_block *sb_from_bi(struct buffer_info *bi)
+{
+ return bi ? sb_from_tb(bi->tb) : NULL;
+}
+
/* there are 4 types of items: stat data, directory item, indirect, direct.
+-------------------+------------+--------------+------------+
| | k_offset | k_uniqueness | mergeable? |
@@ -1494,7 +1567,7 @@ extern struct item_operations *item_ops[TYPE_ANY + 1];
#define COMP_SHORT_KEYS comp_short_keys
/* number of blocks pointed to by the indirect item */
-#define I_UNFM_NUM(p_s_ih) ( ih_item_len(p_s_ih) / UNFM_P_SIZE )
+#define I_UNFM_NUM(ih) (ih_item_len(ih) / UNFM_P_SIZE)
/* the used space within the unformatted node corresponding to pos within the item pointed to by ih */
#define I_POS_UNFM_SIZE(ih,pos,size) (((pos) == I_UNFM_NUM(ih) - 1 ) ? (size) - ih_free_space(ih) : (size))
@@ -1540,7 +1613,6 @@ struct reiserfs_iget_args {
/* FUNCTION DECLARATIONS */
/***************************************************************************/
-/*#ifdef __KERNEL__*/
#define get_journal_desc_magic(bh) (bh->b_data + bh->b_size - 12)
#define journal_trans_half(blocksize) \
@@ -1598,6 +1670,10 @@ struct reiserfs_journal_header {
#define JOURNAL_MAX_COMMIT_AGE 30
#define JOURNAL_MAX_TRANS_AGE 30
#define JOURNAL_PER_BALANCE_CNT (3 * (MAX_HEIGHT-2) + 9)
+#define JOURNAL_BLOCKS_PER_OBJECT(sb) (JOURNAL_PER_BALANCE_CNT * 3 + \
+ 2 * (REISERFS_QUOTA_INIT_BLOCKS(sb) + \
+ REISERFS_QUOTA_TRANS_BLOCKS(sb)))
+
#ifdef CONFIG_QUOTA
/* We need to update data and inode (atime) */
#define REISERFS_QUOTA_TRANS_BLOCKS(s) (REISERFS_SB(s)->s_mount_opt & (1<<REISERFS_QUOTA) ? 2 : 0)
@@ -1672,7 +1748,7 @@ struct reiserfs_transaction_handle {
int t_refcount;
int t_blocks_logged; /* number of blocks this writer has logged */
int t_blocks_allocated; /* number of blocks this writer allocated */
- unsigned long t_trans_id; /* sanity check, equals the current trans id */
+ unsigned int t_trans_id; /* sanity check, equals the current trans id */
void *t_handle_save; /* save existing current->journal_info */
unsigned displace_new_blocks:1; /* if new block allocation occurres, that block
should be displaced from others */
@@ -1748,13 +1824,13 @@ int journal_end_sync(struct reiserfs_transaction_handle *, struct super_block *,
int journal_mark_freed(struct reiserfs_transaction_handle *,
struct super_block *, b_blocknr_t blocknr);
int journal_transaction_should_end(struct reiserfs_transaction_handle *, int);
-int reiserfs_in_journal(struct super_block *p_s_sb, unsigned int bmap_nr,
- int bit_nr, int searchall, b_blocknr_t *next);
+int reiserfs_in_journal(struct super_block *sb, unsigned int bmap_nr,
+ int bit_nr, int searchall, b_blocknr_t *next);
int journal_begin(struct reiserfs_transaction_handle *,
- struct super_block *p_s_sb, unsigned long);
+ struct super_block *sb, unsigned long);
int journal_join_abort(struct reiserfs_transaction_handle *,
- struct super_block *p_s_sb, unsigned long);
-void reiserfs_journal_abort(struct super_block *sb, int errno);
+ struct super_block *sb, unsigned long);
+void reiserfs_abort_journal(struct super_block *sb, int errno);
void reiserfs_abort(struct super_block *sb, int errno, const char *fmt, ...);
int reiserfs_allocate_list_bitmaps(struct super_block *s,
struct reiserfs_list_bitmap *, unsigned int);
@@ -1771,8 +1847,8 @@ int reiserfs_convert_objectid_map_v1(struct super_block *);
/* stree.c */
int B_IS_IN_TREE(const struct buffer_head *);
-extern void copy_item_head(struct item_head *p_v_to,
- const struct item_head *p_v_from);
+extern void copy_item_head(struct item_head *to,
+ const struct item_head *from);
// first key is in cpu form, second - le
extern int comp_short_keys(const struct reiserfs_key *le_key,
@@ -1807,20 +1883,20 @@ static inline void copy_key(struct reiserfs_key *to,
memcpy(to, from, KEY_SIZE);
}
-int comp_items(const struct item_head *stored_ih, const struct treepath *p_s_path);
-const struct reiserfs_key *get_rkey(const struct treepath *p_s_chk_path,
- const struct super_block *p_s_sb);
+int comp_items(const struct item_head *stored_ih, const struct treepath *path);
+const struct reiserfs_key *get_rkey(const struct treepath *chk_path,
+ const struct super_block *sb);
int search_by_key(struct super_block *, const struct cpu_key *,
struct treepath *, int);
#define search_item(s,key,path) search_by_key (s, key, path, DISK_LEAF_NODE_LEVEL)
-int search_for_position_by_key(struct super_block *p_s_sb,
- const struct cpu_key *p_s_cpu_key,
- struct treepath *p_s_search_path);
-extern void decrement_bcount(struct buffer_head *p_s_bh);
-void decrement_counters_in_path(struct treepath *p_s_search_path);
-void pathrelse(struct treepath *p_s_search_path);
+int search_for_position_by_key(struct super_block *sb,
+ const struct cpu_key *cpu_key,
+ struct treepath *search_path);
+extern void decrement_bcount(struct buffer_head *bh);
+void decrement_counters_in_path(struct treepath *search_path);
+void pathrelse(struct treepath *search_path);
int reiserfs_check_path(struct treepath *p);
-void pathrelse_and_restore(struct super_block *s, struct treepath *p_s_search_path);
+void pathrelse_and_restore(struct super_block *s, struct treepath *search_path);
int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
struct treepath *path,
@@ -1843,14 +1919,14 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
struct treepath *path,
const struct cpu_key *key,
- struct inode *inode, struct buffer_head *p_s_un_bh);
+ struct inode *inode, struct buffer_head *un_bh);
void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
struct inode *inode, struct reiserfs_key *key);
int reiserfs_delete_object(struct reiserfs_transaction_handle *th,
- struct inode *p_s_inode);
+ struct inode *inode);
int reiserfs_do_truncate(struct reiserfs_transaction_handle *th,
- struct inode *p_s_inode, struct page *,
+ struct inode *inode, struct page *,
int update_timestamps);
#define i_block_size(inode) ((inode)->i_sb->s_blocksize)
@@ -1894,10 +1970,12 @@ void make_le_item_head(struct item_head *ih, const struct cpu_key *key,
loff_t offset, int type, int length, int entry_count);
struct inode *reiserfs_iget(struct super_block *s, const struct cpu_key *key);
+struct reiserfs_security_handle;
int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
struct inode *dir, int mode,
const char *symname, loff_t i_size,
- struct dentry *dentry, struct inode *inode);
+ struct dentry *dentry, struct inode *inode,
+ struct reiserfs_security_handle *security);
void reiserfs_update_sd_size(struct reiserfs_transaction_handle *th,
struct inode *inode, loff_t size);
@@ -1955,7 +2033,7 @@ int reiserfs_global_version_in_proc(char *buffer, char **start, off_t offset,
#define PROC_INFO_MAX( sb, field, value ) VOID_V
#define PROC_INFO_INC( sb, field ) VOID_V
#define PROC_INFO_ADD( sb, field, val ) VOID_V
-#define PROC_INFO_BH_STAT( p_s_sb, p_s_bh, n_node_level ) VOID_V
+#define PROC_INFO_BH_STAT(sb, bh, n_node_level) VOID_V
#endif
/* dir.c */
@@ -1963,6 +2041,7 @@ extern const struct inode_operations reiserfs_dir_inode_operations;
extern const struct inode_operations reiserfs_symlink_inode_operations;
extern const struct inode_operations reiserfs_special_inode_operations;
extern const struct file_operations reiserfs_dir_operations;
+int reiserfs_readdir_dentry(struct dentry *, void *, filldir_t, loff_t *);
/* tail_conversion.c */
int direct2indirect(struct reiserfs_transaction_handle *, struct inode *,
@@ -1979,13 +2058,20 @@ extern const struct address_space_operations reiserfs_address_space_operations;
/* fix_nodes.c */
-int fix_nodes(int n_op_mode, struct tree_balance *p_s_tb,
- struct item_head *p_s_ins_ih, const void *);
+int fix_nodes(int n_op_mode, struct tree_balance *tb,
+ struct item_head *ins_ih, const void *);
void unfix_nodes(struct tree_balance *);
/* prints.c */
-void reiserfs_panic(struct super_block *s, const char *fmt, ...)
+void __reiserfs_panic(struct super_block *s, const char *id,
+ const char *function, const char *fmt, ...)
__attribute__ ((noreturn));
+#define reiserfs_panic(s, id, fmt, args...) \
+ __reiserfs_panic(s, id, __func__, fmt, ##args)
+void __reiserfs_error(struct super_block *s, const char *id,
+ const char *function, const char *fmt, ...);
+#define reiserfs_error(s, id, fmt, args...) \
+ __reiserfs_error(s, id, __func__, fmt, ##args)
void reiserfs_info(struct super_block *s, const char *fmt, ...);
void reiserfs_debug(struct super_block *s, int level, const char *fmt, ...);
void print_indirect_item(struct buffer_head *bh, int item_num);
@@ -2022,7 +2108,7 @@ void leaf_paste_in_buffer(struct buffer_info *bi, int pasted_item_num,
int zeros_number);
void leaf_cut_from_buffer(struct buffer_info *bi, int cut_item_num,
int pos_in_item, int cut_size);
-void leaf_paste_entries(struct buffer_head *bh, int item_num, int before,
+void leaf_paste_entries(struct buffer_info *bi, int item_num, int before,
int new_entry_count, struct reiserfs_de_head *new_dehs,
const char *records, int paste_size);
/* ibalance.c */
@@ -2178,29 +2264,6 @@ long reiserfs_compat_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg);
int reiserfs_unpack(struct inode *inode, struct file *filp);
-/* ioctl's command */
-#define REISERFS_IOC_UNPACK _IOW(0xCD,1,long)
-/* define following flags to be the same as in ext2, so that chattr(1),
- lsattr(1) will work with us. */
-#define REISERFS_IOC_GETFLAGS FS_IOC_GETFLAGS
-#define REISERFS_IOC_SETFLAGS FS_IOC_SETFLAGS
-#define REISERFS_IOC_GETVERSION FS_IOC_GETVERSION
-#define REISERFS_IOC_SETVERSION FS_IOC_SETVERSION
-
-/* the 32 bit compat definitions with int argument */
-#define REISERFS_IOC32_UNPACK _IOW(0xCD, 1, int)
-#define REISERFS_IOC32_GETFLAGS FS_IOC32_GETFLAGS
-#define REISERFS_IOC32_SETFLAGS FS_IOC32_SETFLAGS
-#define REISERFS_IOC32_GETVERSION FS_IOC32_GETVERSION
-#define REISERFS_IOC32_SETVERSION FS_IOC32_SETVERSION
-
-/* Locking primitives */
-/* Right now we are still falling back to (un)lock_kernel, but eventually that
- would evolve into real per-fs locks */
-#define reiserfs_write_lock( sb ) lock_kernel()
-#define reiserfs_write_unlock( sb ) unlock_kernel()
-
-/* xattr stuff */
-#define REISERFS_XATTR_DIR_SEM(s) (REISERFS_SB(s)->xattr_dir_sem)
+#endif /* __KERNEL__ */
#endif /* _LINUX_REISER_FS_H */
diff --git a/include/linux/reiserfs_fs_i.h b/include/linux/reiserfs_fs_i.h
index ce3663fb010..76360b36ac3 100644
--- a/include/linux/reiserfs_fs_i.h
+++ b/include/linux/reiserfs_fs_i.h
@@ -51,7 +51,7 @@ struct reiserfs_inode_info {
/* we use these for fsync or O_SYNC to decide which transaction
** needs to be committed in order for this inode to be properly
** flushed */
- unsigned long i_trans_id;
+ unsigned int i_trans_id;
struct reiserfs_journal_list *i_jl;
struct mutex i_mmap;
#ifdef CONFIG_REISERFS_FS_POSIX_ACL
@@ -59,7 +59,7 @@ struct reiserfs_inode_info {
struct posix_acl *i_acl_default;
#endif
#ifdef CONFIG_REISERFS_FS_XATTR
- struct rw_semaphore xattr_sem;
+ struct rw_semaphore i_xattr_sem;
#endif
struct inode vfs_inode;
};
diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
index bda6b562a1e..dab68bbed67 100644
--- a/include/linux/reiserfs_fs_sb.h
+++ b/include/linux/reiserfs_fs_sb.h
@@ -14,7 +14,7 @@ typedef enum {
} reiserfs_super_block_flags;
/* struct reiserfs_super_block accessors/mutators
- * since this is a disk structure, it will always be in
+ * since this is a disk structure, it will always be in
* little endian format. */
#define sb_block_count(sbp) (le32_to_cpu((sbp)->s_v1.s_block_count))
#define set_sb_block_count(sbp,v) ((sbp)->s_v1.s_block_count = cpu_to_le32(v))
@@ -73,6 +73,9 @@ typedef enum {
#define sb_version(sbp) (le16_to_cpu((sbp)->s_v1.s_version))
#define set_sb_version(sbp,v) ((sbp)->s_v1.s_version = cpu_to_le16(v))
+#define sb_mnt_count(sbp) (le16_to_cpu((sbp)->s_mnt_count))
+#define set_sb_mnt_count(sbp, v) ((sbp)->s_mnt_count = cpu_to_le16(v))
+
#define sb_reserved_for_journal(sbp) \
(le16_to_cpu((sbp)->s_v1.s_reserved_for_journal))
#define set_sb_reserved_for_journal(sbp,v) \
@@ -80,16 +83,16 @@ typedef enum {
/* LOGGING -- */
-/* These all interelate for performance.
+/* These all interelate for performance.
**
-** If the journal block count is smaller than n transactions, you lose speed.
+** If the journal block count is smaller than n transactions, you lose speed.
** I don't know what n is yet, I'm guessing 8-16.
**
** typical transaction size depends on the application, how often fsync is
-** called, and how many metadata blocks you dirty in a 30 second period.
+** called, and how many metadata blocks you dirty in a 30 second period.
** The more small files (<16k) you use, the larger your transactions will
** be.
-**
+**
** If your journal fills faster than dirty buffers get flushed to disk, it must flush them before allowing the journal
** to wrap, which slows things down. If you need high speed meta data updates, the journal should be big enough
** to prevent wrapping before dirty meta blocks get to disk.
@@ -153,7 +156,7 @@ struct reiserfs_journal_list {
atomic_t j_commit_left;
atomic_t j_older_commits_done; /* all commits older than this on disk */
struct mutex j_commit_mutex;
- unsigned long j_trans_id;
+ unsigned int j_trans_id;
time_t j_timestamp;
struct reiserfs_list_bitmap *j_list_bitmap;
struct buffer_head *j_commit_bh; /* commit buffer head */
@@ -182,7 +185,7 @@ struct reiserfs_journal {
int j_1st_reserved_block; /* first block on s_dev of reserved area journal */
unsigned long j_state;
- unsigned long j_trans_id;
+ unsigned int j_trans_id;
unsigned long j_mount_id;
unsigned long j_start; /* start of current waiting commit (index into j_ap_blocks) */
unsigned long j_len; /* length of current waiting commit */
@@ -190,7 +193,7 @@ struct reiserfs_journal {
atomic_t j_wcount; /* count of writers for current commit */
unsigned long j_bcount; /* batch count. allows turning X transactions into 1 */
unsigned long j_first_unflushed_offset; /* first unflushed transactions offset */
- unsigned long j_last_flush_trans_id; /* last fully flushed journal timestamp */
+ unsigned j_last_flush_trans_id; /* last fully flushed journal timestamp */
struct buffer_head *j_header_bh;
time_t j_trans_start_time; /* time this transaction started */
@@ -223,10 +226,10 @@ struct reiserfs_journal {
int j_num_work_lists; /* number that need attention from kreiserfsd */
/* debugging to make sure things are flushed in order */
- int j_last_flush_id;
+ unsigned int j_last_flush_id;
/* debugging to make sure things are committed in order */
- int j_last_commit_id;
+ unsigned int j_last_commit_id;
struct list_head j_bitmap_nodes;
struct list_head j_dirty_buffers;
@@ -239,7 +242,7 @@ struct reiserfs_journal {
struct reiserfs_list_bitmap j_list_bitmap[JOURNAL_NUM_BITMAPS]; /* array of bitmaps to record the deleted blocks */
struct reiserfs_journal_cnode *j_hash_table[JOURNAL_HASH_SIZE]; /* hash table for real buffer heads in current trans */
- struct reiserfs_journal_cnode *j_list_hash_table[JOURNAL_HASH_SIZE]; /* hash table for all the real buffer heads in all
+ struct reiserfs_journal_cnode *j_list_hash_table[JOURNAL_HASH_SIZE]; /* hash table for all the real buffer heads in all
the transactions */
struct list_head j_prealloc_list; /* list of inodes which have preallocated blocks */
int j_persistent_trans;
@@ -399,10 +402,7 @@ struct reiserfs_sb_info {
int reserved_blocks; /* amount of blocks reserved for further allocations */
spinlock_t bitmap_lock; /* this lock on now only used to protect reserved_blocks variable */
struct dentry *priv_root; /* root of /.reiserfs_priv */
-#ifdef CONFIG_REISERFS_FS_XATTR
- struct dentry *xattr_root; /* root of /.reiserfs_priv/.xa */
- struct rw_semaphore xattr_dir_sem;
-#endif
+ struct dentry *xattr_root; /* root of /.reiserfs_priv/xattrs */
int j_errno;
#ifdef CONFIG_QUOTA
char *s_qf_names[MAXQUOTAS];
@@ -426,7 +426,7 @@ enum reiserfs_mount_options {
partition will be dealt with in a
manner of 3.5.x */
-/* -o hash={tea, rupasov, r5, detect} is meant for properly mounting
+/* -o hash={tea, rupasov, r5, detect} is meant for properly mounting
** reiserfs disks from 3.5.19 or earlier. 99% of the time, this option
** is not required. If the normal autodection code can't determine which
** hash to use (because both hashes had the same value for a file)
@@ -451,9 +451,9 @@ enum reiserfs_mount_options {
REISERFS_NO_UNHASHED_RELOCATION,
REISERFS_HASHED_RELOCATION,
REISERFS_ATTRS,
- REISERFS_XATTRS,
REISERFS_XATTRS_USER,
REISERFS_POSIXACL,
+ REISERFS_EXPOSE_PRIVROOT,
REISERFS_BARRIER_NONE,
REISERFS_BARRIER_FLUSH,
@@ -489,9 +489,9 @@ enum reiserfs_mount_options {
#define reiserfs_data_log(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_LOG))
#define reiserfs_data_ordered(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_ORDERED))
#define reiserfs_data_writeback(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_WRITEBACK))
-#define reiserfs_xattrs(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_XATTRS))
#define reiserfs_xattrs_user(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_XATTRS_USER))
#define reiserfs_posixacl(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_POSIXACL))
+#define reiserfs_expose_privroot(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_EXPOSE_PRIVROOT))
#define reiserfs_xattrs_optional(s) (reiserfs_xattrs_user(s) || reiserfs_posixacl(s))
#define reiserfs_barrier_none(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_BARRIER_NONE))
#define reiserfs_barrier_flush(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_BARRIER_FLUSH))
diff --git a/include/linux/reiserfs_xattr.h b/include/linux/reiserfs_xattr.h
index af135ae895d..99928dce37e 100644
--- a/include/linux/reiserfs_xattr.h
+++ b/include/linux/reiserfs_xattr.h
@@ -15,6 +15,12 @@ struct reiserfs_xattr_header {
__le32 h_hash; /* hash of the value */
};
+struct reiserfs_security_handle {
+ char *name;
+ void *value;
+ size_t length;
+};
+
#ifdef __KERNEL__
#include <linux/init.h>
@@ -29,22 +35,15 @@ struct iattr;
struct super_block;
struct nameidata;
-struct reiserfs_xattr_handler {
- char *prefix;
- int (*init) (void);
- void (*exit) (void);
- int (*get) (struct inode * inode, const char *name, void *buffer,
- size_t size);
- int (*set) (struct inode * inode, const char *name, const void *buffer,
- size_t size, int flags);
- int (*del) (struct inode * inode, const char *name);
- int (*list) (struct inode * inode, const char *name, int namelen,
- char *out);
- struct list_head handlers;
-};
+int reiserfs_xattr_register_handlers(void) __init;
+void reiserfs_xattr_unregister_handlers(void);
+int reiserfs_xattr_init(struct super_block *sb, int mount_flags);
+int reiserfs_lookup_privroot(struct super_block *sb);
+int reiserfs_delete_xattrs(struct inode *inode);
+int reiserfs_chown_xattrs(struct inode *inode, struct iattr *attrs);
+int reiserfs_permission(struct inode *inode, int mask);
#ifdef CONFIG_REISERFS_FS_XATTR
-#define is_reiserfs_priv_object(inode) IS_PRIVATE(inode)
#define has_xattr_dir(inode) (REISERFS_I(inode)->i_flags & i_has_xattr_dir)
ssize_t reiserfs_getxattr(struct dentry *dentry, const char *name,
void *buffer, size_t size);
@@ -52,104 +51,94 @@ int reiserfs_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags);
ssize_t reiserfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
int reiserfs_removexattr(struct dentry *dentry, const char *name);
-int reiserfs_delete_xattrs(struct inode *inode);
-int reiserfs_chown_xattrs(struct inode *inode, struct iattr *attrs);
-int reiserfs_xattr_init(struct super_block *sb, int mount_flags);
-int reiserfs_permission(struct inode *inode, int mask);
-int reiserfs_xattr_del(struct inode *, const char *);
-int reiserfs_xattr_get(const struct inode *, const char *, void *, size_t);
+int reiserfs_xattr_get(struct inode *, const char *, void *, size_t);
int reiserfs_xattr_set(struct inode *, const char *, const void *, size_t, int);
-
-extern struct reiserfs_xattr_handler user_handler;
-extern struct reiserfs_xattr_handler trusted_handler;
-extern struct reiserfs_xattr_handler security_handler;
-
-int reiserfs_xattr_register_handlers(void) __init;
-void reiserfs_xattr_unregister_handlers(void);
-
-static inline void reiserfs_write_lock_xattrs(struct super_block *sb)
+int reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *,
+ struct inode *, const char *, const void *,
+ size_t, int);
+
+extern struct xattr_handler reiserfs_xattr_user_handler;
+extern struct xattr_handler reiserfs_xattr_trusted_handler;
+extern struct xattr_handler reiserfs_xattr_security_handler;
+#ifdef CONFIG_REISERFS_FS_SECURITY
+int reiserfs_security_init(struct inode *dir, struct inode *inode,
+ struct reiserfs_security_handle *sec);
+int reiserfs_security_write(struct reiserfs_transaction_handle *th,
+ struct inode *inode,
+ struct reiserfs_security_handle *sec);
+void reiserfs_security_free(struct reiserfs_security_handle *sec);
+#endif
+
+#define xattr_size(size) ((size) + sizeof(struct reiserfs_xattr_header))
+static inline loff_t reiserfs_xattr_nblocks(struct inode *inode, loff_t size)
{
- down_write(&REISERFS_XATTR_DIR_SEM(sb));
-}
-static inline void reiserfs_write_unlock_xattrs(struct super_block *sb)
-{
- up_write(&REISERFS_XATTR_DIR_SEM(sb));
-}
-static inline void reiserfs_read_lock_xattrs(struct super_block *sb)
-{
- down_read(&REISERFS_XATTR_DIR_SEM(sb));
+ loff_t ret = 0;
+ if (reiserfs_file_data_log(inode)) {
+ ret = _ROUND_UP(xattr_size(size), inode->i_sb->s_blocksize);
+ ret >>= inode->i_sb->s_blocksize_bits;
+ }
+ return ret;
}
-static inline void reiserfs_read_unlock_xattrs(struct super_block *sb)
+/* We may have to create up to 3 objects: xattr root, xattr dir, xattr file.
+ * Let's try to be smart about it.
+ * xattr root: We cache it. If it's not cached, we may need to create it.
+ * xattr dir: If anything has been loaded for this inode, we can set a flag
+ * saying so.
+ * xattr file: Since we don't cache xattrs, we can't tell. We always include
+ * blocks for it.
+ *
+ * However, since root and dir can be created between calls - YOU MUST SAVE
+ * THIS VALUE.
+ */
+static inline size_t reiserfs_xattr_jcreate_nblocks(struct inode *inode)
{
- up_read(&REISERFS_XATTR_DIR_SEM(sb));
-}
+ size_t nblocks = JOURNAL_BLOCKS_PER_OBJECT(inode->i_sb);
-static inline void reiserfs_write_lock_xattr_i(struct inode *inode)
-{
- down_write(&REISERFS_I(inode)->xattr_sem);
-}
-static inline void reiserfs_write_unlock_xattr_i(struct inode *inode)
-{
- up_write(&REISERFS_I(inode)->xattr_sem);
-}
-static inline void reiserfs_read_lock_xattr_i(struct inode *inode)
-{
- down_read(&REISERFS_I(inode)->xattr_sem);
-}
-
-static inline void reiserfs_read_unlock_xattr_i(struct inode *inode)
-{
- up_read(&REISERFS_I(inode)->xattr_sem);
-}
+ if ((REISERFS_I(inode)->i_flags & i_has_xattr_dir) == 0) {
+ nblocks += JOURNAL_BLOCKS_PER_OBJECT(inode->i_sb);
+ if (!REISERFS_SB(inode->i_sb)->xattr_root->d_inode)
+ nblocks += JOURNAL_BLOCKS_PER_OBJECT(inode->i_sb);
+ }
-static inline void reiserfs_mark_inode_private(struct inode *inode)
-{
- inode->i_flags |= S_PRIVATE;
+ return nblocks;
}
static inline void reiserfs_init_xattr_rwsem(struct inode *inode)
{
- init_rwsem(&REISERFS_I(inode)->xattr_sem);
+ init_rwsem(&REISERFS_I(inode)->i_xattr_sem);
}
#else
-#define is_reiserfs_priv_object(inode) 0
-#define reiserfs_mark_inode_private(inode) do {;} while(0)
#define reiserfs_getxattr NULL
#define reiserfs_setxattr NULL
#define reiserfs_listxattr NULL
#define reiserfs_removexattr NULL
-#define reiserfs_write_lock_xattrs(sb) do {;} while(0)
-#define reiserfs_write_unlock_xattrs(sb) do {;} while(0)
-#define reiserfs_read_lock_xattrs(sb)
-#define reiserfs_read_unlock_xattrs(sb)
-#define reiserfs_permission NULL
-
-#define reiserfs_xattr_register_handlers() 0
-#define reiserfs_xattr_unregister_handlers()
-
-static inline int reiserfs_delete_xattrs(struct inode *inode)
+static inline void reiserfs_init_xattr_rwsem(struct inode *inode)
{
- return 0;
-};
-static inline int reiserfs_chown_xattrs(struct inode *inode,
- struct iattr *attrs)
+}
+#endif /* CONFIG_REISERFS_FS_XATTR */
+
+#ifndef CONFIG_REISERFS_FS_SECURITY
+static inline int reiserfs_security_init(struct inode *dir,
+ struct inode *inode,
+ struct reiserfs_security_handle *sec)
{
return 0;
-};
-static inline int reiserfs_xattr_init(struct super_block *sb, int mount_flags)
+}
+static inline int
+reiserfs_security_write(struct reiserfs_transaction_handle *th,
+ struct inode *inode,
+ struct reiserfs_security_handle *sec)
{
- sb->s_flags = (sb->s_flags & ~MS_POSIXACL); /* to be sure */
return 0;
-};
-static inline void reiserfs_init_xattr_rwsem(struct inode *inode)
-{
}
-#endif /* CONFIG_REISERFS_FS_XATTR */
+static inline void reiserfs_security_free(struct reiserfs_security_handle *sec)
+{}
+#endif
#endif /* __KERNEL__ */
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h
index 4c5bcf6ca7e..511f42fc681 100644
--- a/include/linux/res_counter.h
+++ b/include/linux/res_counter.h
@@ -49,6 +49,8 @@ struct res_counter {
struct res_counter *parent;
};
+#define RESOURCE_MAX (unsigned long long)LLONG_MAX
+
/**
* Helpers to interact with userspace
* res_counter_read_u64() - returns the value of the specified member.
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
index 164332cbb77..e73e2429a1b 100644
--- a/include/linux/rfkill.h
+++ b/include/linux/rfkill.h
@@ -4,6 +4,7 @@
/*
* Copyright (C) 2006 - 2007 Ivo van Doorn
* Copyright (C) 2007 Dmitry Torokhov
+ * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -22,131 +23,354 @@
*/
#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <linux/leds.h>
+
+/* define userspace visible states */
+#define RFKILL_STATE_SOFT_BLOCKED 0
+#define RFKILL_STATE_UNBLOCKED 1
+#define RFKILL_STATE_HARD_BLOCKED 2
/**
* enum rfkill_type - type of rfkill switch.
- * RFKILL_TYPE_WLAN: switch is on a 802.11 wireless network device.
- * RFKILL_TYPE_BLUETOOTH: switch is on a bluetooth device.
- * RFKILL_TYPE_UWB: switch is on a ultra wideband device.
- * RFKILL_TYPE_WIMAX: switch is on a WiMAX device.
- * RFKILL_TYPE_WWAN: switch is on a wireless WAN device.
+ *
+ * @RFKILL_TYPE_ALL: toggles all switches (userspace only)
+ * @RFKILL_TYPE_WLAN: switch is on a 802.11 wireless network device.
+ * @RFKILL_TYPE_BLUETOOTH: switch is on a bluetooth device.
+ * @RFKILL_TYPE_UWB: switch is on a ultra wideband device.
+ * @RFKILL_TYPE_WIMAX: switch is on a WiMAX device.
+ * @RFKILL_TYPE_WWAN: switch is on a wireless WAN device.
+ * @NUM_RFKILL_TYPES: number of defined rfkill types
*/
enum rfkill_type {
- RFKILL_TYPE_WLAN ,
+ RFKILL_TYPE_ALL = 0,
+ RFKILL_TYPE_WLAN,
RFKILL_TYPE_BLUETOOTH,
RFKILL_TYPE_UWB,
RFKILL_TYPE_WIMAX,
RFKILL_TYPE_WWAN,
- RFKILL_TYPE_MAX,
+ NUM_RFKILL_TYPES,
};
-enum rfkill_state {
- RFKILL_STATE_SOFT_BLOCKED = 0, /* Radio output blocked */
- RFKILL_STATE_UNBLOCKED = 1, /* Radio output allowed */
- RFKILL_STATE_HARD_BLOCKED = 2, /* Output blocked, non-overrideable */
- RFKILL_STATE_MAX, /* marker for last valid state */
+/**
+ * enum rfkill_operation - operation types
+ * @RFKILL_OP_ADD: a device was added
+ * @RFKILL_OP_DEL: a device was removed
+ * @RFKILL_OP_CHANGE: a device's state changed -- userspace changes one device
+ * @RFKILL_OP_CHANGE_ALL: userspace changes all devices (of a type, or all)
+ */
+enum rfkill_operation {
+ RFKILL_OP_ADD = 0,
+ RFKILL_OP_DEL,
+ RFKILL_OP_CHANGE,
+ RFKILL_OP_CHANGE_ALL,
};
-/*
- * These are DEPRECATED, drivers using them should be verified to
- * comply with the rfkill usage guidelines in Documentation/rfkill.txt
- * and then converted to use the new names for rfkill_state
- */
-#define RFKILL_STATE_OFF RFKILL_STATE_SOFT_BLOCKED
-#define RFKILL_STATE_ON RFKILL_STATE_UNBLOCKED
-
-/**
- * struct rfkill - rfkill control structure.
- * @name: Name of the switch.
- * @type: Radio type which the button controls, the value stored
- * here should be a value from enum rfkill_type.
- * @state: State of the switch, "UNBLOCKED" means radio can operate.
- * @user_claim_unsupported: Whether the hardware supports exclusive
- * RF-kill control by userspace. Set this before registering.
- * @user_claim: Set when the switch is controlled exlusively by userspace.
- * @mutex: Guards switch state transitions. It serializes callbacks
- * and also protects the state.
- * @data: Pointer to the RF button drivers private data which will be
- * passed along when toggling radio state.
- * @toggle_radio(): Mandatory handler to control state of the radio.
- * only RFKILL_STATE_SOFT_BLOCKED and RFKILL_STATE_UNBLOCKED are
- * valid parameters.
- * @get_state(): handler to read current radio state from hardware,
- * may be called from atomic context, should return 0 on success.
- * Either this handler OR judicious use of rfkill_force_state() is
- * MANDATORY for any driver capable of RFKILL_STATE_HARD_BLOCKED.
- * @led_trigger: A LED trigger for this button's LED.
- * @dev: Device structure integrating the switch into device tree.
- * @node: Used to place switch into list of all switches known to the
- * the system.
- *
- * This structure represents a RF switch located on a network device.
- */
-struct rfkill {
- const char *name;
- enum rfkill_type type;
-
- bool user_claim_unsupported;
- bool user_claim;
-
- /* the mutex serializes callbacks and also protects
- * the state */
- struct mutex mutex;
- enum rfkill_state state;
- void *data;
- int (*toggle_radio)(void *data, enum rfkill_state state);
- int (*get_state)(void *data, enum rfkill_state *state);
+/**
+ * struct rfkill_event - events for userspace on /dev/rfkill
+ * @idx: index of dev rfkill
+ * @type: type of the rfkill struct
+ * @op: operation code
+ * @hard: hard state (0/1)
+ * @soft: soft state (0/1)
+ *
+ * Structure used for userspace communication on /dev/rfkill,
+ * used for events from the kernel and control to the kernel.
+ */
+struct rfkill_event {
+ __u32 idx;
+ __u8 type;
+ __u8 op;
+ __u8 soft, hard;
+} __packed;
-#ifdef CONFIG_RFKILL_LEDS
- struct led_trigger led_trigger;
-#endif
+/* ioctl for turning off rfkill-input (if present) */
+#define RFKILL_IOC_MAGIC 'R'
+#define RFKILL_IOC_NOINPUT 1
+#define RFKILL_IOCTL_NOINPUT _IO(RFKILL_IOC_MAGIC, RFKILL_IOC_NOINPUT)
- struct device dev;
- struct list_head node;
- enum rfkill_state state_for_resume;
+/* and that's all userspace gets */
+#ifdef __KERNEL__
+/* don't allow anyone to use these in the kernel */
+enum rfkill_user_states {
+ RFKILL_USER_STATE_SOFT_BLOCKED = RFKILL_STATE_SOFT_BLOCKED,
+ RFKILL_USER_STATE_UNBLOCKED = RFKILL_STATE_UNBLOCKED,
+ RFKILL_USER_STATE_HARD_BLOCKED = RFKILL_STATE_HARD_BLOCKED,
};
-#define to_rfkill(d) container_of(d, struct rfkill, dev)
+#undef RFKILL_STATE_SOFT_BLOCKED
+#undef RFKILL_STATE_UNBLOCKED
+#undef RFKILL_STATE_HARD_BLOCKED
-struct rfkill * __must_check rfkill_allocate(struct device *parent,
- enum rfkill_type type);
-void rfkill_free(struct rfkill *rfkill);
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/leds.h>
+#include <linux/err.h>
+
+/* this is opaque */
+struct rfkill;
+
+/**
+ * struct rfkill_ops - rfkill driver methods
+ *
+ * @poll: poll the rfkill block state(s) -- only assign this method
+ * when you need polling. When called, simply call one of the
+ * rfkill_set{,_hw,_sw}_state family of functions. If the hw
+ * is getting unblocked you need to take into account the return
+ * value of those functions to make sure the software block is
+ * properly used.
+ * @query: query the rfkill block state(s) and call exactly one of the
+ * rfkill_set{,_hw,_sw}_state family of functions. Assign this
+ * method if input events can cause hardware state changes to make
+ * the rfkill core query your driver before setting a requested
+ * block.
+ * @set_block: turn the transmitter on (blocked == false) or off
+ * (blocked == true) -- ignore and return 0 when hard blocked.
+ * This callback must be assigned.
+ */
+struct rfkill_ops {
+ void (*poll)(struct rfkill *rfkill, void *data);
+ void (*query)(struct rfkill *rfkill, void *data);
+ int (*set_block)(void *data, bool blocked);
+};
+
+#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
+/**
+ * rfkill_alloc - allocate rfkill structure
+ * @name: name of the struct -- the string is not copied internally
+ * @parent: device that has rf switch on it
+ * @type: type of the switch (RFKILL_TYPE_*)
+ * @ops: rfkill methods
+ * @ops_data: data passed to each method
+ *
+ * This function should be called by the transmitter driver to allocate an
+ * rfkill structure. Returns %NULL on failure.
+ */
+struct rfkill * __must_check rfkill_alloc(const char *name,
+ struct device *parent,
+ const enum rfkill_type type,
+ const struct rfkill_ops *ops,
+ void *ops_data);
+
+/**
+ * rfkill_register - Register a rfkill structure.
+ * @rfkill: rfkill structure to be registered
+ *
+ * This function should be called by the transmitter driver to register
+ * the rfkill structure. Before calling this function the driver needs
+ * to be ready to service method calls from rfkill.
+ *
+ * If rfkill_init_sw_state() is not called before registration,
+ * set_block() will be called to initialize the software blocked state
+ * to a default value.
+ *
+ * If the hardware blocked state is not set before registration,
+ * it is assumed to be unblocked.
+ */
int __must_check rfkill_register(struct rfkill *rfkill);
+
+/**
+ * rfkill_pause_polling(struct rfkill *rfkill)
+ *
+ * Pause polling -- say transmitter is off for other reasons.
+ * NOTE: not necessary for suspend/resume -- in that case the
+ * core stops polling anyway
+ */
+void rfkill_pause_polling(struct rfkill *rfkill);
+
+/**
+ * rfkill_resume_polling(struct rfkill *rfkill)
+ *
+ * Pause polling -- say transmitter is off for other reasons.
+ * NOTE: not necessary for suspend/resume -- in that case the
+ * core stops polling anyway
+ */
+void rfkill_resume_polling(struct rfkill *rfkill);
+
+
+/**
+ * rfkill_unregister - Unregister a rfkill structure.
+ * @rfkill: rfkill structure to be unregistered
+ *
+ * This function should be called by the network driver during device
+ * teardown to destroy rfkill structure. Until it returns, the driver
+ * needs to be able to service method calls.
+ */
void rfkill_unregister(struct rfkill *rfkill);
-int rfkill_force_state(struct rfkill *rfkill, enum rfkill_state state);
-int rfkill_set_default(enum rfkill_type type, enum rfkill_state state);
+/**
+ * rfkill_destroy - free rfkill structure
+ * @rfkill: rfkill structure to be destroyed
+ *
+ * Destroys the rfkill structure.
+ */
+void rfkill_destroy(struct rfkill *rfkill);
/**
- * rfkill_state_complement - return complementar state
- * @state: state to return the complement of
+ * rfkill_set_hw_state - Set the internal rfkill hardware block state
+ * @rfkill: pointer to the rfkill class to modify.
+ * @state: the current hardware block state to set
+ *
+ * rfkill drivers that get events when the hard-blocked state changes
+ * use this function to notify the rfkill core (and through that also
+ * userspace) of the current state. They should also use this after
+ * resume if the state could have changed.
*
- * Returns RFKILL_STATE_SOFT_BLOCKED if @state is RFKILL_STATE_UNBLOCKED,
- * returns RFKILL_STATE_UNBLOCKED otherwise.
+ * You need not (but may) call this function if poll_state is assigned.
+ *
+ * This function can be called in any context, even from within rfkill
+ * callbacks.
+ *
+ * The function returns the combined block state (true if transmitter
+ * should be blocked) so that drivers need not keep track of the soft
+ * block state -- which they might not be able to.
*/
-static inline enum rfkill_state rfkill_state_complement(enum rfkill_state state)
+bool __must_check rfkill_set_hw_state(struct rfkill *rfkill, bool blocked);
+
+/**
+ * rfkill_set_sw_state - Set the internal rfkill software block state
+ * @rfkill: pointer to the rfkill class to modify.
+ * @state: the current software block state to set
+ *
+ * rfkill drivers that get events when the soft-blocked state changes
+ * (yes, some platforms directly act on input but allow changing again)
+ * use this function to notify the rfkill core (and through that also
+ * userspace) of the current state.
+ *
+ * Drivers should also call this function after resume if the state has
+ * been changed by the user. This only makes sense for "persistent"
+ * devices (see rfkill_init_sw_state()).
+ *
+ * This function can be called in any context, even from within rfkill
+ * callbacks.
+ *
+ * The function returns the combined block state (true if transmitter
+ * should be blocked).
+ */
+bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked);
+
+/**
+ * rfkill_init_sw_state - Initialize persistent software block state
+ * @rfkill: pointer to the rfkill class to modify.
+ * @state: the current software block state to set
+ *
+ * rfkill drivers that preserve their software block state over power off
+ * use this function to notify the rfkill core (and through that also
+ * userspace) of their initial state. It should only be used before
+ * registration.
+ *
+ * In addition, it marks the device as "persistent", an attribute which
+ * can be read by userspace. Persistent devices are expected to preserve
+ * their own state when suspended.
+ */
+void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked);
+
+/**
+ * rfkill_set_states - Set the internal rfkill block states
+ * @rfkill: pointer to the rfkill class to modify.
+ * @sw: the current software block state to set
+ * @hw: the current hardware block state to set
+ *
+ * This function can be called in any context, even from within rfkill
+ * callbacks.
+ */
+void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw);
+
+/**
+ * rfkill_blocked - query rfkill block
+ *
+ * @rfkill: rfkill struct to query
+ */
+bool rfkill_blocked(struct rfkill *rfkill);
+#else /* !RFKILL */
+static inline struct rfkill * __must_check
+rfkill_alloc(const char *name,
+ struct device *parent,
+ const enum rfkill_type type,
+ const struct rfkill_ops *ops,
+ void *ops_data)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline int __must_check rfkill_register(struct rfkill *rfkill)
+{
+ if (rfkill == ERR_PTR(-ENODEV))
+ return 0;
+ return -EINVAL;
+}
+
+static inline void rfkill_pause_polling(struct rfkill *rfkill)
+{
+}
+
+static inline void rfkill_resume_polling(struct rfkill *rfkill)
+{
+}
+
+static inline void rfkill_unregister(struct rfkill *rfkill)
+{
+}
+
+static inline void rfkill_destroy(struct rfkill *rfkill)
{
- return (state == RFKILL_STATE_UNBLOCKED) ?
- RFKILL_STATE_SOFT_BLOCKED : RFKILL_STATE_UNBLOCKED;
}
+static inline bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked)
+{
+ return blocked;
+}
+
+static inline bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
+{
+ return blocked;
+}
+
+static inline void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked)
+{
+}
+
+static inline void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
+{
+}
+
+static inline bool rfkill_blocked(struct rfkill *rfkill)
+{
+ return false;
+}
+#endif /* RFKILL || RFKILL_MODULE */
+
+
+#ifdef CONFIG_RFKILL_LEDS
/**
- * rfkill_get_led_name - Get the LED trigger name for the button's LED.
+ * rfkill_get_led_trigger_name - Get the LED trigger name for the button's LED.
* This function might return a NULL pointer if registering of the
- * LED trigger failed.
- * Use this as "default_trigger" for the LED.
+ * LED trigger failed. Use this as "default_trigger" for the LED.
*/
-static inline char *rfkill_get_led_name(struct rfkill *rfkill)
-{
-#ifdef CONFIG_RFKILL_LEDS
- return (char *)(rfkill->led_trigger.name);
+const char *rfkill_get_led_trigger_name(struct rfkill *rfkill);
+
+/**
+ * rfkill_set_led_trigger_name -- set the LED trigger name
+ * @rfkill: rfkill struct
+ * @name: LED trigger name
+ *
+ * This function sets the LED trigger name of the radio LED
+ * trigger that rfkill creates. It is optional, but if called
+ * must be called before rfkill_register() to be effective.
+ */
+void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name);
#else
+static inline const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
+{
return NULL;
-#endif
}
+static inline void
+rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
+{
+}
+#endif
+
+#endif /* __KERNEL__ */
+
#endif /* RFKILL_H */
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index b3b35966008..29f8599e6be 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -1,6 +1,7 @@
#ifndef _LINUX_RING_BUFFER_H
#define _LINUX_RING_BUFFER_H
+#include <linux/kmemcheck.h>
#include <linux/mm.h>
#include <linux/seq_file.h>
@@ -8,20 +9,27 @@ struct ring_buffer;
struct ring_buffer_iter;
/*
- * Don't reference this struct directly, use functions below.
+ * Don't refer to this struct directly, use functions below.
*/
struct ring_buffer_event {
- u32 type:2, len:3, time_delta:27;
+ kmemcheck_bitfield_begin(bitfield);
+ u32 type_len:5, time_delta:27;
+ kmemcheck_bitfield_end(bitfield);
+
u32 array[];
};
/**
* enum ring_buffer_type - internal ring buffer types
*
- * @RINGBUF_TYPE_PADDING: Left over page padding
- * array is ignored
- * size is variable depending on how much
+ * @RINGBUF_TYPE_PADDING: Left over page padding or discarded event
+ * If time_delta is 0:
+ * array is ignored
+ * size is variable depending on how much
* padding is needed
+ * If time_delta is non zero:
+ * array[0] holds the actual length
+ * size = 4 + length (bytes)
*
* @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta
* array[0] = time delta (28 .. 59)
@@ -32,22 +40,23 @@ struct ring_buffer_event {
* array[1..2] = tv_sec
* size = 16 bytes
*
- * @RINGBUF_TYPE_DATA: Data record
- * If len is zero:
+ * <= @RINGBUF_TYPE_DATA_TYPE_LEN_MAX:
+ * Data record
+ * If type_len is zero:
* array[0] holds the actual length
* array[1..(length+3)/4] holds data
- * size = 4 + 4 + length (bytes)
+ * size = 4 + length (bytes)
* else
- * length = len << 2
+ * length = type_len << 2
* array[0..(length+3)/4-1] holds data
* size = 4 + length (bytes)
*/
enum ring_buffer_type {
+ RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28,
RINGBUF_TYPE_PADDING,
RINGBUF_TYPE_TIME_EXTEND,
/* FIXME: RINGBUF_TYPE_TIME_STAMP not implemented */
RINGBUF_TYPE_TIME_STAMP,
- RINGBUF_TYPE_DATA,
};
unsigned ring_buffer_event_length(struct ring_buffer_event *event);
@@ -66,21 +75,61 @@ ring_buffer_event_time_delta(struct ring_buffer_event *event)
}
/*
+ * ring_buffer_event_discard can discard any event in the ring buffer.
+ * it is up to the caller to protect against a reader from
+ * consuming it or a writer from wrapping and replacing it.
+ *
+ * No external protection is needed if this is called before
+ * the event is commited. But in that case it would be better to
+ * use ring_buffer_discard_commit.
+ *
+ * Note, if an event that has not been committed is discarded
+ * with ring_buffer_event_discard, it must still be committed.
+ */
+void ring_buffer_event_discard(struct ring_buffer_event *event);
+
+/*
+ * ring_buffer_discard_commit will remove an event that has not
+ * ben committed yet. If this is used, then ring_buffer_unlock_commit
+ * must not be called on the discarded event. This function
+ * will try to remove the event from the ring buffer completely
+ * if another event has not been written after it.
+ *
+ * Example use:
+ *
+ * if (some_condition)
+ * ring_buffer_discard_commit(buffer, event);
+ * else
+ * ring_buffer_unlock_commit(buffer, event);
+ */
+void ring_buffer_discard_commit(struct ring_buffer *buffer,
+ struct ring_buffer_event *event);
+
+/*
* size is in bytes for each per CPU buffer.
*/
struct ring_buffer *
-ring_buffer_alloc(unsigned long size, unsigned flags);
+__ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key);
+
+/*
+ * Because the ring buffer is generic, if other users of the ring buffer get
+ * traced by ftrace, it can produce lockdep warnings. We need to keep each
+ * ring buffer's lock class separate.
+ */
+#define ring_buffer_alloc(size, flags) \
+({ \
+ static struct lock_class_key __key; \
+ __ring_buffer_alloc((size), (flags), &__key); \
+})
+
void ring_buffer_free(struct ring_buffer *buffer);
int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size);
-struct ring_buffer_event *
-ring_buffer_lock_reserve(struct ring_buffer *buffer,
- unsigned long length,
- unsigned long *flags);
+struct ring_buffer_event *ring_buffer_lock_reserve(struct ring_buffer *buffer,
+ unsigned long length);
int ring_buffer_unlock_commit(struct ring_buffer *buffer,
- struct ring_buffer_event *event,
- unsigned long flags);
+ struct ring_buffer_event *event);
int ring_buffer_write(struct ring_buffer *buffer,
unsigned long length, void *data);
@@ -120,18 +169,27 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer);
unsigned long ring_buffer_overruns(struct ring_buffer *buffer);
unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
+unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu);
+unsigned long ring_buffer_nmi_dropped_cpu(struct ring_buffer *buffer, int cpu);
+
+u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu);
+void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
+ int cpu, u64 *ts);
+void ring_buffer_set_clock(struct ring_buffer *buffer,
+ u64 (*clock)(void));
-u64 ring_buffer_time_stamp(int cpu);
-void ring_buffer_normalize_time_stamp(int cpu, u64 *ts);
+size_t ring_buffer_page_len(void *page);
-void tracing_on(void);
-void tracing_off(void);
-void tracing_off_permanent(void);
void *ring_buffer_alloc_read_page(struct ring_buffer *buffer);
void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data);
-int ring_buffer_read_page(struct ring_buffer *buffer,
- void **data_page, int cpu, int full);
+int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page,
+ size_t len, int cpu, int full);
+
+struct trace_seq;
+
+int ring_buffer_print_entry_header(struct trace_seq *s);
+int ring_buffer_print_page_header(struct trace_seq *s);
enum ring_buffer_flags {
RB_FL_OVERWRITE = 1 << 0,
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index b35bc0e19cd..216d024f830 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -83,7 +83,8 @@ static inline void page_dup_rmap(struct page *page, struct vm_area_struct *vma,
/*
* Called from mm/vmscan.c to handle paging out
*/
-int page_referenced(struct page *, int is_locked, struct mem_cgroup *cnt);
+int page_referenced(struct page *, int is_locked,
+ struct mem_cgroup *cnt, unsigned long *vm_flags);
int try_to_unmap(struct page *, int ignore_refs);
/*
@@ -105,18 +106,11 @@ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
*/
int page_mkclean(struct page *);
-#ifdef CONFIG_UNEVICTABLE_LRU
/*
* called in munlock()/munmap() path to check for other vmas holding
* the page mlocked.
*/
int try_to_munlock(struct page *);
-#else
-static inline int try_to_munlock(struct page *page)
-{
- return 0; /* a.k.a. SWAP_SUCCESS */
-}
-#endif
#else /* !CONFIG_MMU */
@@ -124,7 +118,7 @@ static inline int try_to_munlock(struct page *page)
#define anon_vma_prepare(vma) (0)
#define anon_vma_link(vma) do {} while (0)
-#define page_referenced(page,l,cnt) TestClearPageReferenced(page)
+#define page_referenced(page, locked, cnt, flags) TestClearPageReferenced(page)
#define try_to_unmap(page, refs) SWAP_FAIL
static inline int page_mkclean(struct page *page)
diff --git a/include/linux/romfs_fs.h b/include/linux/romfs_fs.h
index e20bbf9eb36..c490fbc43fe 100644
--- a/include/linux/romfs_fs.h
+++ b/include/linux/romfs_fs.h
@@ -53,9 +53,4 @@ struct romfs_inode {
#define ROMFH_PAD (ROMFH_SIZE-1)
#define ROMFH_MASK (~ROMFH_PAD)
-#ifdef __KERNEL__
-
-/* Not much now */
-
-#endif /* __KERNEL__ */
#endif
diff --git a/include/linux/rotary_encoder.h b/include/linux/rotary_encoder.h
new file mode 100644
index 00000000000..215278b8df2
--- /dev/null
+++ b/include/linux/rotary_encoder.h
@@ -0,0 +1,15 @@
+#ifndef __ROTARY_ENCODER_H__
+#define __ROTARY_ENCODER_H__
+
+struct rotary_encoder_platform_data {
+ unsigned int steps;
+ unsigned int axis;
+ unsigned int gpio_a;
+ unsigned int gpio_b;
+ unsigned int inverted_a;
+ unsigned int inverted_b;
+ bool relative_axis;
+ bool rollover;
+};
+
+#endif /* __ROTARY_ENCODER_H__ */
diff --git a/include/linux/rtc-v3020.h b/include/linux/rtc-v3020.h
index bf74e63c98f..8ba646e610d 100644
--- a/include/linux/rtc-v3020.h
+++ b/include/linux/rtc-v3020.h
@@ -14,6 +14,12 @@
* is used depends on the board. */
struct v3020_platform_data {
int leftshift; /* (1<<(leftshift)) & readl() */
+
+ int use_gpio:1;
+ unsigned int gpio_cs;
+ unsigned int gpio_wr;
+ unsigned int gpio_rd;
+ unsigned int gpio_io;
};
#define V3020_STATUS_0 0x00
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 4046b75563c..60f88a7fb13 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -99,6 +99,7 @@ struct rtc_pll_info {
#ifdef __KERNEL__
+#include <linux/types.h>
#include <linux/interrupt.h>
extern int rtc_month_days(unsigned int month, unsigned int year);
@@ -232,6 +233,11 @@ int rtc_register(rtc_task_t *task);
int rtc_unregister(rtc_task_t *task);
int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg);
+static inline bool is_leap_year(unsigned int year)
+{
+ return (!(year % 4) && (year % 100)) || !(year % 400);
+}
+
#endif /* __KERNEL__ */
#endif /* _LINUX_RTC_H_ */
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 1e5f6730ff3..ba3254ecf7f 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -217,6 +217,7 @@ enum
#define RTPROT_DNROUTED 13 /* DECnet routing daemon */
#define RTPROT_XORP 14 /* XORP */
#define RTPROT_NTK 15 /* Netsukuku */
+#define RTPROT_DHCP 16 /* DHCP client */
/* rtm_scope
@@ -622,8 +623,8 @@ static __inline__ int rtattr_strcmp(const struct rtattr *rta, const char *str)
extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo);
extern int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid);
-extern int rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
- struct nlmsghdr *nlh, gfp_t flags);
+extern void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid,
+ u32 group, struct nlmsghdr *nlh, gfp_t flags);
extern void rtnl_set_sk_err(struct net *net, u32 group, int error);
extern int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics);
extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst,
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 011db2f4c94..4d075426988 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -68,7 +68,7 @@ struct sched_param {
#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/signal.h>
-#include <linux/fs_struct.h>
+#include <linux/path.h>
#include <linux/compiler.h>
#include <linux/completion.h>
#include <linux/pid.h>
@@ -77,6 +77,7 @@ struct sched_param {
#include <linux/proportions.h>
#include <linux/seccomp.h>
#include <linux/rcupdate.h>
+#include <linux/rculist.h>
#include <linux/rtmutex.h>
#include <linux/time.h>
@@ -91,12 +92,13 @@ struct sched_param {
#include <asm/processor.h>
-struct mem_cgroup;
struct exec_domain;
struct futex_pi_state;
struct robust_list_head;
struct bio;
-struct bts_tracer;
+struct fs_struct;
+struct bts_context;
+struct perf_counter_context;
/*
* List of flags we want to share for kernel threads,
@@ -115,6 +117,7 @@ struct bts_tracer;
* 11 bit fractions.
*/
extern unsigned long avenrun[]; /* Load averages */
+extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
#define FSHIFT 11 /* nr of bits of precision */
#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */
@@ -134,8 +137,11 @@ DECLARE_PER_CPU(unsigned long, process_counts);
extern int nr_processes(void);
extern unsigned long nr_running(void);
extern unsigned long nr_uninterruptible(void);
-extern unsigned long nr_active(void);
extern unsigned long nr_iowait(void);
+extern void calc_global_load(void);
+extern u64 cpu_nr_migrations(int cpu);
+
+extern unsigned long get_parent_ip(unsigned long addr);
struct seq_file;
struct cfs_rq;
@@ -202,7 +208,8 @@ extern unsigned long long time_sync_thresh;
#define task_is_stopped_or_traced(task) \
((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
#define task_contributes_to_load(task) \
- ((task->state & TASK_UNINTERRUPTIBLE) != 0)
+ ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
+ (task->flags & PF_FROZEN) == 0)
#define __set_task_state(tsk, state_value) \
do { (tsk)->state = (state_value); } while (0)
@@ -253,6 +260,7 @@ extern void task_rq_unlock_wait(struct task_struct *p);
extern cpumask_var_t nohz_cpu_mask;
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
extern int select_nohz_load_balancer(int cpu);
+extern int get_nohz_load_balancer(void);
#else
static inline int select_nohz_load_balancer(int cpu)
{
@@ -297,17 +305,11 @@ extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write,
struct file *filp, void __user *buffer,
size_t *lenp, loff_t *ppos);
extern unsigned int softlockup_panic;
-extern unsigned long sysctl_hung_task_check_count;
-extern unsigned long sysctl_hung_task_timeout_secs;
-extern unsigned long sysctl_hung_task_warnings;
extern int softlockup_thresh;
#else
static inline void softlockup_tick(void)
{
}
-static inline void spawn_softlockup_task(void)
-{
-}
static inline void touch_softlockup_watchdog(void)
{
}
@@ -316,6 +318,15 @@ static inline void touch_all_softlockup_watchdogs(void)
}
#endif
+#ifdef CONFIG_DETECT_HUNG_TASK
+extern unsigned int sysctl_hung_task_panic;
+extern unsigned long sysctl_hung_task_check_count;
+extern unsigned long sysctl_hung_task_timeout_secs;
+extern unsigned long sysctl_hung_task_warnings;
+extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
+ struct file *filp, void __user *buffer,
+ size_t *lenp, loff_t *ppos);
+#endif
/* Attach to any functions which should be ignored in wchan output. */
#define __sched __attribute__((__section__(".sched.text")))
@@ -331,7 +342,9 @@ extern signed long schedule_timeout(signed long timeout);
extern signed long schedule_timeout_interruptible(signed long timeout);
extern signed long schedule_timeout_killable(signed long timeout);
extern signed long schedule_timeout_uninterruptible(signed long timeout);
+asmlinkage void __schedule(void);
asmlinkage void schedule(void);
+extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
struct nsproxy;
struct user_namespace;
@@ -389,8 +402,15 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
(mm)->hiwater_vm = (mm)->total_vm; \
} while (0)
-#define get_mm_hiwater_rss(mm) max((mm)->hiwater_rss, get_mm_rss(mm))
-#define get_mm_hiwater_vm(mm) max((mm)->hiwater_vm, (mm)->total_vm)
+static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
+{
+ return max(mm->hiwater_rss, get_mm_rss(mm));
+}
+
+static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
+{
+ return max(mm->hiwater_vm, mm->total_vm);
+}
extern void set_dumpable(struct mm_struct *mm, int value);
extern int get_dumpable(struct mm_struct *mm);
@@ -538,25 +558,8 @@ struct signal_struct {
struct list_head cpu_timers[3];
- /* job control IDs */
-
- /*
- * pgrp and session fields are deprecated.
- * use the task_session_Xnr and task_pgrp_Xnr routines below
- */
-
- union {
- pid_t pgrp __deprecated;
- pid_t __pgrp;
- };
-
struct pid *tty_old_pgrp;
- union {
- pid_t session __deprecated;
- pid_t __session;
- };
-
/* boolean value for session group leader */
int leader;
@@ -670,9 +673,13 @@ struct user_struct {
struct task_group *tg;
#ifdef CONFIG_SYSFS
struct kobject kobj;
- struct work_struct work;
+ struct delayed_work work;
#endif
#endif
+
+#ifdef CONFIG_PERF_COUNTERS
+ atomic_long_t locked_vm;
+#endif
};
extern int uids_sysfs_init(void);
@@ -839,7 +846,17 @@ struct sched_group {
*/
u32 reciprocal_cpu_power;
- unsigned long cpumask[];
+ /*
+ * The CPUs this group covers.
+ *
+ * NOTE: this field is variable length. (Allocated dynamically
+ * by attaching extra space to the end of the structure,
+ * depending on how many CPUs the kernel has booted up with)
+ *
+ * It is also be embedded into static data structures at build
+ * time. (See 'struct static_sched_group' in kernel/sched.c)
+ */
+ unsigned long cpumask[0];
};
static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
@@ -925,8 +942,17 @@ struct sched_domain {
char *name;
#endif
- /* span of all CPUs in this domain */
- unsigned long span[];
+ /*
+ * Span of all CPUs in this domain.
+ *
+ * NOTE: this field is variable length. (Allocated dynamically
+ * by attaching extra space to the end of the structure,
+ * depending on how many CPUs the kernel has booted up with)
+ *
+ * It is also be embedded into static data structures at build
+ * time. (See 'struct static_sched_domain' in kernel/sched.c)
+ */
+ unsigned long span[0];
};
static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
@@ -998,6 +1024,7 @@ struct sched_class {
struct rq *busiest, struct sched_domain *sd,
enum cpu_idle_type idle);
void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
+ int (*needs_post_schedule) (struct rq *this_rq);
void (*post_schedule) (struct rq *this_rq);
void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
@@ -1052,6 +1079,11 @@ struct sched_entity {
u64 last_wakeup;
u64 avg_overlap;
+ u64 nr_migrations;
+
+ u64 start_runtime;
+ u64 avg_wakeup;
+
#ifdef CONFIG_SCHEDSTATS
u64 wait_start;
u64 wait_max;
@@ -1067,7 +1099,6 @@ struct sched_entity {
u64 exec_max;
u64 slice_max;
- u64 nr_migrations;
u64 nr_migrations_cold;
u64 nr_failed_migrations_affine;
u64 nr_failed_migrations_running;
@@ -1146,7 +1177,6 @@ struct task_struct {
* a short time
*/
unsigned char fpu_counter;
- s8 oomkilladj; /* OOM kill score adjustment (bit shift). */
#ifdef CONFIG_BLK_DEV_IO_TRACE
unsigned int btrace_seq;
#endif
@@ -1164,6 +1194,7 @@ struct task_struct {
#endif
struct list_head tasks;
+ struct plist_node pushable_tasks;
struct mm_struct *mm, *active_mm;
@@ -1175,13 +1206,14 @@ struct task_struct {
/* ??? */
unsigned int personality;
unsigned did_exec:1;
+ unsigned in_execve:1; /* Tell the LSMs that the process is doing an
+ * execve */
pid_t pid;
pid_t tgid;
-#ifdef CONFIG_CC_STACKPROTECTOR
/* Canary value for the -fstack-protector gcc feature */
unsigned long stack_canary;
-#endif
+
/*
* pointers to (original) parent process, youngest child, younger sibling,
* older sibling, respectively. (p->father can be replaced with
@@ -1204,18 +1236,11 @@ struct task_struct {
struct list_head ptraced;
struct list_head ptrace_entry;
-#ifdef CONFIG_X86_PTRACE_BTS
/*
* This is the tracer handle for the ptrace BTS extension.
* This field actually belongs to the ptracer task.
*/
- struct bts_tracer *bts;
- /*
- * The buffer to hold the BTS data.
- */
- void *bts_buffer;
- size_t bts_size;
-#endif /* CONFIG_X86_PTRACE_BTS */
+ struct bts_context *bts;
/* PID/PID hash table linkage. */
struct pid_link pids[PIDTYPE_MAX];
@@ -1242,7 +1267,9 @@ struct task_struct {
* credentials (COW) */
const struct cred *cred; /* effective (overridable) subjective task
* credentials (COW) */
- struct mutex cred_exec_mutex; /* execve vs ptrace cred calculation mutex */
+ struct mutex cred_guard_mutex; /* guard against foreign influences on
+ * credential calculations
+ * (notably. ptrace) */
char comm[TASK_COMM_LEN]; /* executable name excluding path
- access with [gs]et_task_comm (which lock
@@ -1254,9 +1281,8 @@ struct task_struct {
/* ipc stuff */
struct sysv_sem sysvsem;
#endif
-#ifdef CONFIG_DETECT_SOFTLOCKUP
+#ifdef CONFIG_DETECT_HUNG_TASK
/* hung task detection */
- unsigned long last_switch_timestamp;
unsigned long last_switch_count;
#endif
/* CPU-specific state of this task */
@@ -1290,9 +1316,15 @@ struct task_struct {
/* Thread group tracking */
u32 parent_exec_id;
u32 self_exec_id;
-/* Protection of (de-)allocation: mm, files, fs, tty, keyrings */
+/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
+ * mempolicy */
spinlock_t alloc_lock;
+#ifdef CONFIG_GENERIC_HARDIRQS
+ /* IRQ handler threads */
+ struct irqaction *irqaction;
+#endif
+
/* Protection of the PI data structures: */
spinlock_t pi_lock;
@@ -1328,6 +1360,7 @@ struct task_struct {
int lockdep_depth;
unsigned int lockdep_recursion;
struct held_lock held_locks[MAX_LOCK_DEPTH];
+ gfp_t lockdep_reclaim_gfp;
#endif
/* journalling filesystem info */
@@ -1352,8 +1385,7 @@ struct task_struct {
cputime_t acct_timexpd; /* stime + utime since last update */
#endif
#ifdef CONFIG_CPUSETS
- nodemask_t mems_allowed;
- int cpuset_mems_generation;
+ nodemask_t mems_allowed; /* Protected by alloc_lock */
int cpuset_mem_spread_rotor;
#endif
#ifdef CONFIG_CGROUPS
@@ -1370,8 +1402,13 @@ struct task_struct {
struct list_head pi_state_list;
struct futex_pi_state *pi_state_cache;
#endif
+#ifdef CONFIG_PERF_COUNTERS
+ struct perf_counter_context *perf_counter_ctxp;
+ struct mutex perf_counter_mutex;
+ struct list_head perf_counter_list;
+#endif
#ifdef CONFIG_NUMA
- struct mempolicy *mempolicy;
+ struct mempolicy *mempolicy; /* Protected by alloc_lock */
short il_next;
#endif
atomic_t fs_excl; /* holding fs exclusive resources */
@@ -1405,6 +1442,8 @@ struct task_struct {
int curr_ret_stack;
/* Stack of return addresses for return function tracing */
struct ftrace_ret_stack *ret_stack;
+ /* time stamp for last schedule */
+ unsigned long long ftrace_timestamp;
/*
* Number of functions that haven't been traced
* because of depth overrun.
@@ -1416,7 +1455,9 @@ struct task_struct {
#ifdef CONFIG_TRACING
/* state flags for use by tracers */
unsigned long trace;
-#endif
+ /* bitmask of trace recursion */
+ unsigned long trace_recursion;
+#endif /* CONFIG_TRACING */
};
/* Future-safe accessor for struct task_struct's cpus_allowed. */
@@ -1453,16 +1494,6 @@ static inline int rt_task(struct task_struct *p)
return rt_prio(p->prio);
}
-static inline void set_task_session(struct task_struct *tsk, pid_t session)
-{
- tsk->signal->__session = session;
-}
-
-static inline void set_task_pgrp(struct task_struct *tsk, pid_t pgrp)
-{
- tsk->signal->__pgrp = pgrp;
-}
-
static inline struct pid *task_pid(struct task_struct *task)
{
return task->pids[PIDTYPE_PID].pid;
@@ -1473,6 +1504,11 @@ static inline struct pid *task_tgid(struct task_struct *task)
return task->group_leader->pids[PIDTYPE_PID].pid;
}
+/*
+ * Without tasklist or rcu lock it is not safe to dereference
+ * the result of task_pgrp/task_session even if task == current,
+ * we can race with another thread doing sys_setsid/sys_setpgid.
+ */
static inline struct pid *task_pgrp(struct task_struct *task)
{
return task->group_leader->pids[PIDTYPE_PGID].pid;
@@ -1498,17 +1534,23 @@ struct pid_namespace;
*
* see also pid_nr() etc in include/linux/pid.h
*/
+pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
+ struct pid_namespace *ns);
static inline pid_t task_pid_nr(struct task_struct *tsk)
{
return tsk->pid;
}
-pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
+static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
+ struct pid_namespace *ns)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
+}
static inline pid_t task_pid_vnr(struct task_struct *tsk)
{
- return pid_vnr(task_pid(tsk));
+ return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
}
@@ -1525,31 +1567,34 @@ static inline pid_t task_tgid_vnr(struct task_struct *tsk)
}
-static inline pid_t task_pgrp_nr(struct task_struct *tsk)
+static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
+ struct pid_namespace *ns)
{
- return tsk->signal->__pgrp;
+ return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
}
-pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
-
static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
{
- return pid_vnr(task_pgrp(tsk));
+ return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
}
-static inline pid_t task_session_nr(struct task_struct *tsk)
+static inline pid_t task_session_nr_ns(struct task_struct *tsk,
+ struct pid_namespace *ns)
{
- return tsk->signal->__session;
+ return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
}
-pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
-
static inline pid_t task_session_vnr(struct task_struct *tsk)
{
- return pid_vnr(task_session(tsk));
+ return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
}
+/* obsolete, do not use */
+static inline pid_t task_pgrp_nr(struct task_struct *tsk)
+{
+ return task_pgrp_nr_ns(tsk, &init_pid_ns);
+}
/**
* pid_alive - check that a task structure is not stale
@@ -1673,6 +1718,16 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
return set_cpus_allowed_ptr(p, &new_mask);
}
+/*
+ * Architectures can set this to 1 if they have specified
+ * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
+ * but then during bootup it turns out that sched_clock()
+ * is reliable after all:
+ */
+#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
+extern int sched_clock_stable;
+#endif
+
extern unsigned long long sched_clock(void);
extern void sched_clock_init(void);
@@ -1740,11 +1795,23 @@ extern unsigned int sysctl_sched_child_runs_first;
extern unsigned int sysctl_sched_features;
extern unsigned int sysctl_sched_migration_cost;
extern unsigned int sysctl_sched_nr_migrate;
+extern unsigned int sysctl_timer_migration;
int sched_nr_latency_handler(struct ctl_table *table, int write,
struct file *file, void __user *buffer, size_t *length,
loff_t *ppos);
#endif
+#ifdef CONFIG_SCHED_DEBUG
+static inline unsigned int get_sysctl_timer_migration(void)
+{
+ return sysctl_timer_migration;
+}
+#else
+static inline unsigned int get_sysctl_timer_migration(void)
+{
+ return 1;
+}
+#endif
extern unsigned int sysctl_sched_rt_period;
extern int sysctl_sched_rt_runtime;
@@ -1811,9 +1878,6 @@ extern struct pid_namespace init_pid_ns;
/*
* find a task by one of its numerical ids
*
- * find_task_by_pid_type_ns():
- * it is the most generic call - it finds a task by all id,
- * type and namespace specified
* find_task_by_pid_ns():
* finds a task by its pid in the specified namespace
* find_task_by_vpid():
@@ -1822,9 +1886,6 @@ extern struct pid_namespace init_pid_ns;
* see also find_vpid() etc in include/linux/pid.h
*/
-extern struct task_struct *find_task_by_pid_type_ns(int type, int pid,
- struct pid_namespace *ns);
-
extern struct task_struct *find_task_by_vpid(pid_t nr);
extern struct task_struct *find_task_by_pid_ns(pid_t nr,
struct pid_namespace *ns);
@@ -1859,6 +1920,7 @@ extern void sched_dead(struct task_struct *p);
extern void proc_caches_init(void);
extern void flush_signals(struct task_struct *);
+extern void __flush_signals(struct task_struct *);
extern void ignore_signals(struct task_struct *);
extern void flush_signal_handlers(struct task_struct *, int force_default);
extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
@@ -1949,7 +2011,8 @@ extern void mm_release(struct task_struct *, struct mm_struct *);
/* Allocate a new mm structure and copy contents from tsk->mm */
extern struct mm_struct *dup_mm(struct task_struct *tsk);
-extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *);
+extern int copy_thread(unsigned long, unsigned long, unsigned long,
+ struct task_struct *, struct pt_regs *);
extern void flush_thread(void);
extern void exit_thread(void);
@@ -1974,8 +2037,10 @@ extern void set_task_comm(struct task_struct *tsk, char *from);
extern char *get_task_comm(char *to, struct task_struct *tsk);
#ifdef CONFIG_SMP
+extern void wait_task_context_switch(struct task_struct *p);
extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
#else
+static inline void wait_task_context_switch(struct task_struct *p) {}
static inline unsigned long wait_task_inactive(struct task_struct *p,
long match_state)
{
@@ -1983,7 +2048,8 @@ static inline unsigned long wait_task_inactive(struct task_struct *p,
}
#endif
-#define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks)
+#define next_task(p) \
+ list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
#define for_each_process(p) \
for (p = &init_task ; (p = next_task(p)) != &init_task ; )
@@ -2022,8 +2088,8 @@ int same_thread_group(struct task_struct *p1, struct task_struct *p2)
static inline struct task_struct *next_thread(const struct task_struct *p)
{
- return list_entry(rcu_dereference(p->thread_group.next),
- struct task_struct, thread_group);
+ return list_entry_rcu(p->thread_group.next,
+ struct task_struct, thread_group);
}
static inline int thread_group_empty(struct task_struct *p)
@@ -2034,6 +2100,11 @@ static inline int thread_group_empty(struct task_struct *p)
#define delay_group_leader(p) \
(thread_group_leader(p) && !thread_group_empty(p))
+static inline int task_detached(struct task_struct *p)
+{
+ return p->exit_signal == -1;
+}
+
/*
* Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
* subscriptions and synchronises with wait4(). Also used in procfs. Also
@@ -2090,6 +2161,19 @@ static inline int object_is_on_stack(void *obj)
extern void thread_info_cache_init(void);
+#ifdef CONFIG_DEBUG_STACK_USAGE
+static inline unsigned long stack_not_used(struct task_struct *p)
+{
+ unsigned long *n = end_of_stack(p);
+
+ do { /* Skip over canary */
+ n++;
+ } while (!*n);
+
+ return (unsigned long)n - (unsigned long)end_of_stack(p);
+}
+#endif
+
/* set thread flags in other task's structures
* - see asm/thread_info.h for TIF_xxxx flags available
*/
@@ -2133,6 +2217,12 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
+static inline int restart_syscall(void)
+{
+ set_tsk_thread_flag(current, TIF_SIGPENDING);
+ return -ERESTARTNOINTR;
+}
+
static inline int signal_pending(struct task_struct *p)
{
return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
@@ -2343,6 +2433,13 @@ static inline void inc_syscw(struct task_struct *tsk)
#define TASK_SIZE_OF(tsk) TASK_SIZE
#endif
+/*
+ * Call the function if the target task is executing on a CPU right now:
+ */
+extern void task_oncpu_function_call(struct task_struct *p,
+ void (*func) (void *info), void *info);
+
+
#ifdef CONFIG_MM_OWNER
extern void mm_update_next_owner(struct mm_struct *mm);
extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index 8ba1c320f97..b464b9d3d24 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -60,7 +60,7 @@ typedef struct sctphdr {
__be16 source;
__be16 dest;
__be32 vtag;
- __be32 checksum;
+ __le32 checksum;
} __attribute__((packed)) sctp_sctphdr_t;
#ifdef __KERNEL__
@@ -172,35 +172,35 @@ typedef struct sctp_paramhdr {
typedef enum {
/* RFC 2960 Section 3.3.5 */
- SCTP_PARAM_HEARTBEAT_INFO = __constant_htons(1),
+ SCTP_PARAM_HEARTBEAT_INFO = cpu_to_be16(1),
/* RFC 2960 Section 3.3.2.1 */
- SCTP_PARAM_IPV4_ADDRESS = __constant_htons(5),
- SCTP_PARAM_IPV6_ADDRESS = __constant_htons(6),
- SCTP_PARAM_STATE_COOKIE = __constant_htons(7),
- SCTP_PARAM_UNRECOGNIZED_PARAMETERS = __constant_htons(8),
- SCTP_PARAM_COOKIE_PRESERVATIVE = __constant_htons(9),
- SCTP_PARAM_HOST_NAME_ADDRESS = __constant_htons(11),
- SCTP_PARAM_SUPPORTED_ADDRESS_TYPES = __constant_htons(12),
- SCTP_PARAM_ECN_CAPABLE = __constant_htons(0x8000),
+ SCTP_PARAM_IPV4_ADDRESS = cpu_to_be16(5),
+ SCTP_PARAM_IPV6_ADDRESS = cpu_to_be16(6),
+ SCTP_PARAM_STATE_COOKIE = cpu_to_be16(7),
+ SCTP_PARAM_UNRECOGNIZED_PARAMETERS = cpu_to_be16(8),
+ SCTP_PARAM_COOKIE_PRESERVATIVE = cpu_to_be16(9),
+ SCTP_PARAM_HOST_NAME_ADDRESS = cpu_to_be16(11),
+ SCTP_PARAM_SUPPORTED_ADDRESS_TYPES = cpu_to_be16(12),
+ SCTP_PARAM_ECN_CAPABLE = cpu_to_be16(0x8000),
/* AUTH Extension Section 3 */
- SCTP_PARAM_RANDOM = __constant_htons(0x8002),
- SCTP_PARAM_CHUNKS = __constant_htons(0x8003),
- SCTP_PARAM_HMAC_ALGO = __constant_htons(0x8004),
+ SCTP_PARAM_RANDOM = cpu_to_be16(0x8002),
+ SCTP_PARAM_CHUNKS = cpu_to_be16(0x8003),
+ SCTP_PARAM_HMAC_ALGO = cpu_to_be16(0x8004),
/* Add-IP: Supported Extensions, Section 4.2 */
- SCTP_PARAM_SUPPORTED_EXT = __constant_htons(0x8008),
+ SCTP_PARAM_SUPPORTED_EXT = cpu_to_be16(0x8008),
/* PR-SCTP Sec 3.1 */
- SCTP_PARAM_FWD_TSN_SUPPORT = __constant_htons(0xc000),
+ SCTP_PARAM_FWD_TSN_SUPPORT = cpu_to_be16(0xc000),
/* Add-IP Extension. Section 3.2 */
- SCTP_PARAM_ADD_IP = __constant_htons(0xc001),
- SCTP_PARAM_DEL_IP = __constant_htons(0xc002),
- SCTP_PARAM_ERR_CAUSE = __constant_htons(0xc003),
- SCTP_PARAM_SET_PRIMARY = __constant_htons(0xc004),
- SCTP_PARAM_SUCCESS_REPORT = __constant_htons(0xc005),
- SCTP_PARAM_ADAPTATION_LAYER_IND = __constant_htons(0xc006),
+ SCTP_PARAM_ADD_IP = cpu_to_be16(0xc001),
+ SCTP_PARAM_DEL_IP = cpu_to_be16(0xc002),
+ SCTP_PARAM_ERR_CAUSE = cpu_to_be16(0xc003),
+ SCTP_PARAM_SET_PRIMARY = cpu_to_be16(0xc004),
+ SCTP_PARAM_SUCCESS_REPORT = cpu_to_be16(0xc005),
+ SCTP_PARAM_ADAPTATION_LAYER_IND = cpu_to_be16(0xc006),
} sctp_param_t; /* enum */
@@ -212,13 +212,13 @@ typedef enum {
*
*/
typedef enum {
- SCTP_PARAM_ACTION_DISCARD = __constant_htons(0x0000),
- SCTP_PARAM_ACTION_DISCARD_ERR = __constant_htons(0x4000),
- SCTP_PARAM_ACTION_SKIP = __constant_htons(0x8000),
- SCTP_PARAM_ACTION_SKIP_ERR = __constant_htons(0xc000),
+ SCTP_PARAM_ACTION_DISCARD = cpu_to_be16(0x0000),
+ SCTP_PARAM_ACTION_DISCARD_ERR = cpu_to_be16(0x4000),
+ SCTP_PARAM_ACTION_SKIP = cpu_to_be16(0x8000),
+ SCTP_PARAM_ACTION_SKIP_ERR = cpu_to_be16(0xc000),
} sctp_param_action_t;
-enum { SCTP_PARAM_ACTION_MASK = __constant_htons(0xc000), };
+enum { SCTP_PARAM_ACTION_MASK = cpu_to_be16(0xc000), };
/* RFC 2960 Section 3.3.1 Payload Data (DATA) (0) */
@@ -457,17 +457,17 @@ typedef struct sctp_operr_chunk {
*/
typedef enum {
- SCTP_ERROR_NO_ERROR = __constant_htons(0x00),
- SCTP_ERROR_INV_STRM = __constant_htons(0x01),
- SCTP_ERROR_MISS_PARAM = __constant_htons(0x02),
- SCTP_ERROR_STALE_COOKIE = __constant_htons(0x03),
- SCTP_ERROR_NO_RESOURCE = __constant_htons(0x04),
- SCTP_ERROR_DNS_FAILED = __constant_htons(0x05),
- SCTP_ERROR_UNKNOWN_CHUNK = __constant_htons(0x06),
- SCTP_ERROR_INV_PARAM = __constant_htons(0x07),
- SCTP_ERROR_UNKNOWN_PARAM = __constant_htons(0x08),
- SCTP_ERROR_NO_DATA = __constant_htons(0x09),
- SCTP_ERROR_COOKIE_IN_SHUTDOWN = __constant_htons(0x0a),
+ SCTP_ERROR_NO_ERROR = cpu_to_be16(0x00),
+ SCTP_ERROR_INV_STRM = cpu_to_be16(0x01),
+ SCTP_ERROR_MISS_PARAM = cpu_to_be16(0x02),
+ SCTP_ERROR_STALE_COOKIE = cpu_to_be16(0x03),
+ SCTP_ERROR_NO_RESOURCE = cpu_to_be16(0x04),
+ SCTP_ERROR_DNS_FAILED = cpu_to_be16(0x05),
+ SCTP_ERROR_UNKNOWN_CHUNK = cpu_to_be16(0x06),
+ SCTP_ERROR_INV_PARAM = cpu_to_be16(0x07),
+ SCTP_ERROR_UNKNOWN_PARAM = cpu_to_be16(0x08),
+ SCTP_ERROR_NO_DATA = cpu_to_be16(0x09),
+ SCTP_ERROR_COOKIE_IN_SHUTDOWN = cpu_to_be16(0x0a),
/* SCTP Implementation Guide:
@@ -476,9 +476,9 @@ typedef enum {
* 13 Protocol Violation
*/
- SCTP_ERROR_RESTART = __constant_htons(0x0b),
- SCTP_ERROR_USER_ABORT = __constant_htons(0x0c),
- SCTP_ERROR_PROTO_VIOLATION = __constant_htons(0x0d),
+ SCTP_ERROR_RESTART = cpu_to_be16(0x0b),
+ SCTP_ERROR_USER_ABORT = cpu_to_be16(0x0c),
+ SCTP_ERROR_PROTO_VIOLATION = cpu_to_be16(0x0d),
/* ADDIP Section 3.3 New Error Causes
*
@@ -487,17 +487,17 @@ typedef enum {
*
* Value Cause Code
* --------- ----------------
- * 0x0100 Request to Delete Last Remaining IP Address.
- * 0x0101 Operation Refused Due to Resource Shortage.
- * 0x0102 Request to Delete Source IP Address.
- * 0x0103 Association Aborted due to illegal ASCONF-ACK
- * 0x0104 Request refused - no authorization.
+ * 0x00A0 Request to Delete Last Remaining IP Address.
+ * 0x00A1 Operation Refused Due to Resource Shortage.
+ * 0x00A2 Request to Delete Source IP Address.
+ * 0x00A3 Association Aborted due to illegal ASCONF-ACK
+ * 0x00A4 Request refused - no authorization.
*/
- SCTP_ERROR_DEL_LAST_IP = __constant_htons(0x0100),
- SCTP_ERROR_RSRC_LOW = __constant_htons(0x0101),
- SCTP_ERROR_DEL_SRC_IP = __constant_htons(0x0102),
- SCTP_ERROR_ASCONF_ACK = __constant_htons(0x0103),
- SCTP_ERROR_REQ_REFUSED = __constant_htons(0x0104),
+ SCTP_ERROR_DEL_LAST_IP = cpu_to_be16(0x00A0),
+ SCTP_ERROR_RSRC_LOW = cpu_to_be16(0x00A1),
+ SCTP_ERROR_DEL_SRC_IP = cpu_to_be16(0x00A2),
+ SCTP_ERROR_ASCONF_ACK = cpu_to_be16(0x00A3),
+ SCTP_ERROR_REQ_REFUSED = cpu_to_be16(0x00A4),
/* AUTH Section 4. New Error Cause
*
@@ -509,7 +509,7 @@ typedef enum {
* --------------------------------------------------------------
* 0x0105 Unsupported HMAC Identifier
*/
- SCTP_ERROR_UNSUP_HMAC = __constant_htons(0x0105)
+ SCTP_ERROR_UNSUP_HMAC = cpu_to_be16(0x0105)
} sctp_error_t;
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index 262a8dccfa8..167c33361d9 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -21,6 +21,8 @@ extern long prctl_set_seccomp(unsigned long);
#else /* CONFIG_SECCOMP */
+#include <linux/errno.h>
+
typedef struct { } seccomp_t;
#define secure_computing(x) do { } while (0)
diff --git a/include/linux/security.h b/include/linux/security.h
index 1f2ab6353c0..5eff459b383 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -32,6 +32,7 @@
#include <linux/sched.h>
#include <linux/key.h>
#include <linux/xfrm.h>
+#include <linux/gfp.h>
#include <net/flow.h>
/* Maximum number of letters for an LSM name string */
@@ -880,11 +881,6 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @sock contains the listening socket structure.
* @newsock contains the newly created server socket for connection.
* Return 0 if permission is granted.
- * @socket_post_accept:
- * This hook allows a security module to copy security
- * information into the newly created socket's inode.
- * @sock contains the listening socket structure.
- * @newsock contains the newly created server socket for connection.
* @socket_sendmsg:
* Check permission before transmitting a message to another socket.
* @sock contains the socket structure.
@@ -1554,8 +1550,6 @@ struct security_operations {
struct sockaddr *address, int addrlen);
int (*socket_listen) (struct socket *sock, int backlog);
int (*socket_accept) (struct socket *sock, struct socket *newsock);
- void (*socket_post_accept) (struct socket *sock,
- struct socket *newsock);
int (*socket_sendmsg) (struct socket *sock,
struct msghdr *msg, int size);
int (*socket_recvmsg) (struct socket *sock,
@@ -2203,6 +2197,8 @@ static inline int security_file_mmap(struct file *file, unsigned long reqprot,
unsigned long addr,
unsigned long addr_only)
{
+ if ((addr < mmap_min_addr) && !capable(CAP_SYS_RAWIO))
+ return -EACCES;
return 0;
}
@@ -2537,7 +2533,6 @@ int security_socket_bind(struct socket *sock, struct sockaddr *address, int addr
int security_socket_connect(struct socket *sock, struct sockaddr *address, int addrlen);
int security_socket_listen(struct socket *sock, int backlog);
int security_socket_accept(struct socket *sock, struct socket *newsock);
-void security_socket_post_accept(struct socket *sock, struct socket *newsock);
int security_socket_sendmsg(struct socket *sock, struct msghdr *msg, int size);
int security_socket_recvmsg(struct socket *sock, struct msghdr *msg,
int size, int flags);
@@ -2616,11 +2611,6 @@ static inline int security_socket_accept(struct socket *sock,
return 0;
}
-static inline void security_socket_post_accept(struct socket *sock,
- struct socket *newsock)
-{
-}
-
static inline int security_socket_sendmsg(struct socket *sock,
struct msghdr *msg, int size)
{
@@ -2966,5 +2956,28 @@ static inline void securityfs_remove(struct dentry *dentry)
#endif
+#ifdef CONFIG_SECURITY
+
+static inline char *alloc_secdata(void)
+{
+ return (char *)get_zeroed_page(GFP_KERNEL);
+}
+
+static inline void free_secdata(void *secdata)
+{
+ free_page((unsigned long)secdata);
+}
+
+#else
+
+static inline char *alloc_secdata(void)
+{
+ return (char *)1;
+}
+
+static inline void free_secdata(void *secdata)
+{ }
+#endif /* CONFIG_SECURITY */
+
#endif /* ! __LINUX_SECURITY_H */
diff --git a/include/linux/selinux_netlink.h b/include/linux/selinux_netlink.h
index bbf489decd8..d239797785c 100644
--- a/include/linux/selinux_netlink.h
+++ b/include/linux/selinux_netlink.h
@@ -12,6 +12,8 @@
#ifndef _LINUX_SELINUX_NETLINK_H
#define _LINUX_SELINUX_NETLINK_H
+#include <linux/types.h>
+
/* Message types. */
#define SELNL_MSG_BASE 0x10
enum {
@@ -38,11 +40,11 @@ enum selinux_nlgroups {
/* Message structures */
struct selnl_msg_setenforce {
- int32_t val;
+ __s32 val;
};
struct selnl_msg_policyload {
- u_int32_t seqno;
+ __u32 seqno;
};
#endif /* _LINUX_SELINUX_NETLINK_H */
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index f616f31576d..0c6a86b7959 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -43,6 +43,7 @@ int seq_release(struct inode *, struct file *);
int seq_escape(struct seq_file *, const char *, const char *);
int seq_putc(struct seq_file *m, char c);
int seq_puts(struct seq_file *m, const char *s);
+int seq_write(struct seq_file *seq, const void *data, size_t len);
int seq_printf(struct seq_file *, const char *, ...)
__attribute__ ((format (printf,2,3)));
@@ -55,7 +56,7 @@ int seq_bitmap(struct seq_file *m, const unsigned long *bits,
unsigned int nr_bits);
static inline int seq_cpumask(struct seq_file *m, const struct cpumask *mask)
{
- return seq_bitmap(m, mask->bits, nr_cpu_ids);
+ return seq_bitmap(m, cpumask_bits(mask), nr_cpu_ids);
}
static inline int seq_nodemask(struct seq_file *m, nodemask_t *mask)
@@ -63,12 +64,13 @@ static inline int seq_nodemask(struct seq_file *m, nodemask_t *mask)
return seq_bitmap(m, mask->bits, MAX_NUMNODES);
}
-int seq_bitmap_list(struct seq_file *m, unsigned long *bits,
+int seq_bitmap_list(struct seq_file *m, const unsigned long *bits,
unsigned int nr_bits);
-static inline int seq_cpumask_list(struct seq_file *m, cpumask_t *mask)
+static inline int seq_cpumask_list(struct seq_file *m,
+ const struct cpumask *mask)
{
- return seq_bitmap_list(m, mask->bits, NR_CPUS);
+ return seq_bitmap_list(m, cpumask_bits(mask), nr_cpu_ids);
}
static inline int seq_nodemask_list(struct seq_file *m, nodemask_t *mask)
diff --git a/include/linux/serial.h b/include/linux/serial.h
index 9136cc5608c..e5bb75a6380 100644
--- a/include/linux/serial.h
+++ b/include/linux/serial.h
@@ -96,54 +96,76 @@ struct serial_uart_config {
/*
* Definitions for async_struct (and serial_struct) flags field
+ *
+ * Define ASYNCB_* for convenient use with {test,set,clear}_bit.
*/
-#define ASYNC_HUP_NOTIFY 0x0001 /* Notify getty on hangups and closes
- on the callout port */
-#define ASYNC_FOURPORT 0x0002 /* Set OU1, OUT2 per AST Fourport settings */
-#define ASYNC_SAK 0x0004 /* Secure Attention Key (Orange book) */
-#define ASYNC_SPLIT_TERMIOS 0x0008 /* Separate termios for dialin/callout */
-
-#define ASYNC_SPD_MASK 0x1030
-#define ASYNC_SPD_HI 0x0010 /* Use 56000 instead of 38400 bps */
-
-#define ASYNC_SPD_VHI 0x0020 /* Use 115200 instead of 38400 bps */
-#define ASYNC_SPD_CUST 0x0030 /* Use user-specified divisor */
-
-#define ASYNC_SKIP_TEST 0x0040 /* Skip UART test during autoconfiguration */
-#define ASYNC_AUTO_IRQ 0x0080 /* Do automatic IRQ during autoconfiguration */
-#define ASYNC_SESSION_LOCKOUT 0x0100 /* Lock out cua opens based on session */
-#define ASYNC_PGRP_LOCKOUT 0x0200 /* Lock out cua opens based on pgrp */
-#define ASYNC_CALLOUT_NOHUP 0x0400 /* Don't do hangups for cua device */
-
-#define ASYNC_HARDPPS_CD 0x0800 /* Call hardpps when CD goes high */
-
-#define ASYNC_SPD_SHI 0x1000 /* Use 230400 instead of 38400 bps */
-#define ASYNC_SPD_WARP 0x1010 /* Use 460800 instead of 38400 bps */
-
-#define ASYNC_LOW_LATENCY 0x2000 /* Request low latency behaviour */
-
-#define ASYNC_BUGGY_UART 0x4000 /* This is a buggy UART, skip some safety
- * checks. Note: can be dangerous! */
-
-#define ASYNC_AUTOPROBE 0x8000 /* Port was autoprobed by PCI or PNP code */
-
-#define ASYNC_FLAGS 0x7FFF /* Possible legal async flags */
-#define ASYNC_USR_MASK 0x3430 /* Legal flags that non-privileged
- * users can set or reset */
-
-/* Internal flags used only by kernel/chr_drv/serial.c */
-#define ASYNC_INITIALIZED 0x80000000 /* Serial port was initialized */
-#define ASYNC_NORMAL_ACTIVE 0x20000000 /* Normal device is active */
-#define ASYNC_BOOT_AUTOCONF 0x10000000 /* Autoconfigure port on bootup */
-#define ASYNC_CLOSING 0x08000000 /* Serial port is closing */
-#define ASYNC_CTS_FLOW 0x04000000 /* Do CTS flow control */
-#define ASYNC_CHECK_CD 0x02000000 /* i.e., CLOCAL */
-#define ASYNC_SHARE_IRQ 0x01000000 /* for multifunction cards
- --- no longer used */
-#define ASYNC_CONS_FLOW 0x00800000 /* flow control for console */
-
-#define ASYNC_BOOT_ONLYMCA 0x00400000 /* Probe only if MCA bus */
-#define ASYNC_INTERNAL_FLAGS 0xFFC00000 /* Internal flags */
+#define ASYNCB_HUP_NOTIFY 0 /* Notify getty on hangups and closes
+ * on the callout port */
+#define ASYNCB_FOURPORT 1 /* Set OU1, OUT2 per AST Fourport settings */
+#define ASYNCB_SAK 2 /* Secure Attention Key (Orange book) */
+#define ASYNCB_SPLIT_TERMIOS 3 /* Separate termios for dialin/callout */
+#define ASYNCB_SPD_HI 4 /* Use 56000 instead of 38400 bps */
+#define ASYNCB_SPD_VHI 5 /* Use 115200 instead of 38400 bps */
+#define ASYNCB_SKIP_TEST 6 /* Skip UART test during autoconfiguration */
+#define ASYNCB_AUTO_IRQ 7 /* Do automatic IRQ during
+ * autoconfiguration */
+#define ASYNCB_SESSION_LOCKOUT 8 /* Lock out cua opens based on session */
+#define ASYNCB_PGRP_LOCKOUT 9 /* Lock out cua opens based on pgrp */
+#define ASYNCB_CALLOUT_NOHUP 10 /* Don't do hangups for cua device */
+#define ASYNCB_HARDPPS_CD 11 /* Call hardpps when CD goes high */
+#define ASYNCB_SPD_SHI 12 /* Use 230400 instead of 38400 bps */
+#define ASYNCB_LOW_LATENCY 13 /* Request low latency behaviour */
+#define ASYNCB_BUGGY_UART 14 /* This is a buggy UART, skip some safety
+ * checks. Note: can be dangerous! */
+#define ASYNCB_AUTOPROBE 15 /* Port was autoprobed by PCI or PNP code */
+#define ASYNCB_LAST_USER 15
+
+/* Internal flags used only by kernel */
+#define ASYNCB_INITIALIZED 31 /* Serial port was initialized */
+#define ASYNCB_NORMAL_ACTIVE 29 /* Normal device is active */
+#define ASYNCB_BOOT_AUTOCONF 28 /* Autoconfigure port on bootup */
+#define ASYNCB_CLOSING 27 /* Serial port is closing */
+#define ASYNCB_CTS_FLOW 26 /* Do CTS flow control */
+#define ASYNCB_CHECK_CD 25 /* i.e., CLOCAL */
+#define ASYNCB_SHARE_IRQ 24 /* for multifunction cards, no longer used */
+#define ASYNCB_CONS_FLOW 23 /* flow control for console */
+#define ASYNCB_BOOT_ONLYMCA 22 /* Probe only if MCA bus */
+#define ASYNCB_FIRST_KERNEL 22
+
+#define ASYNC_HUP_NOTIFY (1U << ASYNCB_HUP_NOTIFY)
+#define ASYNC_FOURPORT (1U << ASYNCB_FOURPORT)
+#define ASYNC_SAK (1U << ASYNCB_SAK)
+#define ASYNC_SPLIT_TERMIOS (1U << ASYNCB_SPLIT_TERMIOS)
+#define ASYNC_SPD_HI (1U << ASYNCB_SPD_HI)
+#define ASYNC_SPD_VHI (1U << ASYNCB_SPD_VHI)
+#define ASYNC_SKIP_TEST (1U << ASYNCB_SKIP_TEST)
+#define ASYNC_AUTO_IRQ (1U << ASYNCB_AUTO_IRQ)
+#define ASYNC_SESSION_LOCKOUT (1U << ASYNCB_SESSION_LOCKOUT)
+#define ASYNC_PGRP_LOCKOUT (1U << ASYNCB_PGRP_LOCKOUT)
+#define ASYNC_CALLOUT_NOHUP (1U << ASYNCB_CALLOUT_NOHUP)
+#define ASYNC_HARDPPS_CD (1U << ASYNCB_HARDPPS_CD)
+#define ASYNC_SPD_SHI (1U << ASYNCB_SPD_SHI)
+#define ASYNC_LOW_LATENCY (1U << ASYNCB_LOW_LATENCY)
+#define ASYNC_BUGGY_UART (1U << ASYNCB_BUGGY_UART)
+#define ASYNC_AUTOPROBE (1U << ASYNCB_AUTOPROBE)
+
+#define ASYNC_FLAGS ((1U << ASYNCB_LAST_USER) - 1)
+#define ASYNC_USR_MASK (ASYNC_SPD_HI|ASYNC_SPD_VHI| \
+ ASYNC_CALLOUT_NOHUP|ASYNC_SPD_SHI|ASYNC_LOW_LATENCY)
+#define ASYNC_SPD_CUST (ASYNC_SPD_HI|ASYNC_SPD_VHI)
+#define ASYNC_SPD_WARP (ASYNC_SPD_HI|ASYNC_SPD_SHI)
+#define ASYNC_SPD_MASK (ASYNC_SPD_HI|ASYNC_SPD_VHI|ASYNC_SPD_SHI)
+
+#define ASYNC_INITIALIZED (1U << ASYNCB_INITIALIZED)
+#define ASYNC_NORMAL_ACTIVE (1U << ASYNCB_NORMAL_ACTIVE)
+#define ASYNC_BOOT_AUTOCONF (1U << ASYNCB_BOOT_AUTOCONF)
+#define ASYNC_CLOSING (1U << ASYNCB_CLOSING)
+#define ASYNC_CTS_FLOW (1U << ASYNCB_CTS_FLOW)
+#define ASYNC_CHECK_CD (1U << ASYNCB_CHECK_CD)
+#define ASYNC_SHARE_IRQ (1U << ASYNCB_SHARE_IRQ)
+#define ASYNC_CONS_FLOW (1U << ASYNCB_CONS_FLOW)
+#define ASYNC_BOOT_ONLYMCA (1U << ASYNCB_BOOT_ONLYMCA)
+#define ASYNC_INTERNAL_FLAGS (~((1U << ASYNCB_FIRST_KERNEL) - 1))
/*
* Multiport serial configuration structure --- external structure
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index df9245c7bd3..23d2fb051f9 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -41,7 +41,8 @@
#define PORT_XSCALE 15
#define PORT_RM9000 16 /* PMC-Sierra RM9xxx internal UART */
#define PORT_OCTEON 17 /* Cavium OCTEON internal UART */
-#define PORT_MAX_8250 17 /* max port ID */
+#define PORT_AR7 18 /* Texas Instruments AR7 internal UART */
+#define PORT_MAX_8250 18 /* max port ID */
/*
* ARM specific type numbers. These are not currently guaranteed
@@ -164,6 +165,15 @@
/* NWPSERIAL */
#define PORT_NWPSERIAL 85
+/* MAX3100 */
+#define PORT_MAX3100 86
+
+/* Timberdale UART */
+#define PORT_TIMBUART 87
+
+/* Qualcomm MSM SoCs */
+#define PORT_MSM 88
+
#ifdef __KERNEL__
#include <linux/compiler.h>
@@ -277,7 +287,7 @@ struct uart_port {
struct uart_icount icount; /* statistics */
struct console *cons; /* struct console, if any */
-#ifdef CONFIG_SERIAL_CORE_CONSOLE
+#if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(SUPPORT_SYSRQ)
unsigned long sysrq; /* sysrq timeout */
#endif
diff --git a/include/linux/serial_max3100.h b/include/linux/serial_max3100.h
new file mode 100644
index 00000000000..4976befb6ae
--- /dev/null
+++ b/include/linux/serial_max3100.h
@@ -0,0 +1,52 @@
+/*
+ *
+ * Copyright (C) 2007 Christian Pellegrin
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+
+#ifndef _LINUX_SERIAL_MAX3100_H
+#define _LINUX_SERIAL_MAX3100_H 1
+
+
+/**
+ * struct plat_max3100 - MAX3100 SPI UART platform data
+ * @loopback: force MAX3100 in loopback
+ * @crystal: 1 for 3.6864 Mhz, 0 for 1.8432
+ * @max3100_hw_suspend: MAX3100 has a shutdown pin. This is a hook
+ * called on suspend and resume to activate it.
+ * @poll_time: poll time for CTS signal in ms, 0 disables (so no hw
+ * flow ctrl is possible but you have less CPU usage)
+ *
+ * You should use this structure in your machine description to specify
+ * how the MAX3100 is connected. Example:
+ *
+ * static struct plat_max3100 max3100_plat_data = {
+ * .loopback = 0,
+ * .crystal = 0,
+ * .poll_time = 100,
+ * };
+ *
+ * static struct spi_board_info spi_board_info[] = {
+ * {
+ * .modalias = "max3100",
+ * .platform_data = &max3100_plat_data,
+ * .irq = IRQ_EINT12,
+ * .max_speed_hz = 5*1000*1000,
+ * .chip_select = 0,
+ * },
+ * };
+ *
+ **/
+struct plat_max3100 {
+ int loopback;
+ int crystal;
+ void (*max3100_hw_suspend) (int suspend);
+ int poll_time;
+};
+
+#endif
diff --git a/include/linux/serial_reg.h b/include/linux/serial_reg.h
index 96c0d93fc2c..850db2e8051 100644
--- a/include/linux/serial_reg.h
+++ b/include/linux/serial_reg.h
@@ -323,6 +323,7 @@
#define UART_OMAP_MVER 0x14 /* Module version register */
#define UART_OMAP_SYSC 0x15 /* System configuration register */
#define UART_OMAP_SYSS 0x16 /* System status register */
+#define UART_OMAP_WER 0x17 /* Wake-up enable register */
#endif /* _LINUX_SERIAL_REG_H */
diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h
index 893cc53486b..1c297ddc9d5 100644
--- a/include/linux/serial_sci.h
+++ b/include/linux/serial_sci.h
@@ -25,8 +25,7 @@ struct plat_sci_port {
unsigned int irqs[SCIx_NR_IRQS]; /* ERI, RXI, TXI, BRI */
unsigned int type; /* SCI / SCIF / IRDA */
upf_t flags; /* UPF_* flags */
+ char *clk; /* clock string */
};
-int early_sci_setup(struct uart_port *port);
-
#endif /* __LINUX_SERIAL_SCI_H */
diff --git a/include/linux/serio.h b/include/linux/serio.h
index e0417e4d3f1..126d24c9eaa 100644
--- a/include/linux/serio.h
+++ b/include/linux/serio.h
@@ -15,6 +15,7 @@
#ifdef __KERNEL__
+#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/spinlock.h>
@@ -28,7 +29,10 @@ struct serio {
char name[32];
char phys[32];
- unsigned int manual_bind;
+ bool manual_bind;
+ bool registered; /* port has been fully registered with driver core */
+ bool suspended; /* port is suspended */
+
struct serio_device_id id;
@@ -47,7 +51,6 @@ struct serio {
struct mutex drv_mutex; /* protects serio->drv so attributes can pin driver */
struct device dev;
- unsigned int registered; /* port has been fully registered with driver core */
struct list_head node;
};
@@ -58,7 +61,7 @@ struct serio_driver {
char *description;
struct serio_device_id *id_table;
- unsigned int manual_bind;
+ bool manual_bind;
void (*write_wakeup)(struct serio *);
irqreturn_t (*interrupt)(struct serio *, unsigned char, unsigned int);
diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h
index 68e212ff9dd..eb1423a0078 100644
--- a/include/linux/sh_intc.h
+++ b/include/linux/sh_intc.h
@@ -85,6 +85,7 @@ struct intc_desc symbol __initdata = { \
}
#endif
+unsigned int intc_evt2irq(unsigned int vector);
void __init register_intc_controller(struct intc_desc *desc);
int intc_set_priority(unsigned int irq, unsigned int prio);
diff --git a/include/linux/sh_timer.h b/include/linux/sh_timer.h
new file mode 100644
index 00000000000..864bd56bd3b
--- /dev/null
+++ b/include/linux/sh_timer.h
@@ -0,0 +1,13 @@
+#ifndef __SH_TIMER_H__
+#define __SH_TIMER_H__
+
+struct sh_timer_config {
+ char *name;
+ long channel_offset;
+ int timer_bit;
+ char *clk;
+ unsigned long clockevent_rating;
+ unsigned long clocksource_rating;
+};
+
+#endif /* __SH_TIMER_H__ */
diff --git a/include/linux/sht15.h b/include/linux/sht15.h
new file mode 100644
index 00000000000..046bce05eca
--- /dev/null
+++ b/include/linux/sht15.h
@@ -0,0 +1,24 @@
+/*
+ * sht15.h - support for the SHT15 Temperature and Humidity Sensor
+ *
+ * Copyright (c) 2009 Jonathan Cameron
+ *
+ * Copyright (c) 2007 Wouter Horre
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/**
+ * struct sht15_platform_data - sht15 connectivity info
+ * @gpio_data: no. of gpio to which bidirectional data line is connected
+ * @gpio_sck: no. of gpio to which the data clock is connected.
+ * @supply_mv: supply voltage in mv. Overridden by regulator if available.
+ **/
+struct sht15_platform_data {
+ int gpio_data;
+ int gpio_sck;
+ int supply_mv;
+};
+
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 84f997f8aa5..c7552836bd9 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -235,6 +235,8 @@ static inline int valid_signal(unsigned long sig)
extern int next_signal(struct sigpending *pending, sigset_t *mask);
extern int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p);
extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *);
+extern long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig,
+ siginfo_t *info);
extern long do_sigpending(void __user *, unsigned long);
extern int sigprocmask(int, sigset_t *, sigset_t *);
extern int show_unhandled_signals;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 9dcf956ad18..b47b3f039d1 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -15,6 +15,7 @@
#define _LINUX_SKBUFF_H
#include <linux/kernel.h>
+#include <linux/kmemcheck.h>
#include <linux/compiler.h>
#include <linux/time.h>
#include <linux/cache.h>
@@ -29,9 +30,6 @@
#include <linux/dmaengine.h>
#include <linux/hrtimer.h>
-#define HAVE_ALLOC_SKB /* For the drivers to know */
-#define HAVE_ALIGNABLE_SKB /* Ditto 8) */
-
/* Don't change this without changing skb_csum_unnecessary! */
#define CHECKSUM_NONE 0
#define CHECKSUM_UNNECESSARY 1
@@ -135,6 +133,56 @@ struct skb_frag_struct {
__u32 size;
};
+#define HAVE_HW_TIME_STAMP
+
+/**
+ * struct skb_shared_hwtstamps - hardware time stamps
+ * @hwtstamp: hardware time stamp transformed into duration
+ * since arbitrary point in time
+ * @syststamp: hwtstamp transformed to system time base
+ *
+ * Software time stamps generated by ktime_get_real() are stored in
+ * skb->tstamp. The relation between the different kinds of time
+ * stamps is as follows:
+ *
+ * syststamp and tstamp can be compared against each other in
+ * arbitrary combinations. The accuracy of a
+ * syststamp/tstamp/"syststamp from other device" comparison is
+ * limited by the accuracy of the transformation into system time
+ * base. This depends on the device driver and its underlying
+ * hardware.
+ *
+ * hwtstamps can only be compared against other hwtstamps from
+ * the same device.
+ *
+ * This structure is attached to packets as part of the
+ * &skb_shared_info. Use skb_hwtstamps() to get a pointer.
+ */
+struct skb_shared_hwtstamps {
+ ktime_t hwtstamp;
+ ktime_t syststamp;
+};
+
+/**
+ * struct skb_shared_tx - instructions for time stamping of outgoing packets
+ * @hardware: generate hardware time stamp
+ * @software: generate software time stamp
+ * @in_progress: device driver is going to provide
+ * hardware time stamp
+ * @flags: all shared_tx flags
+ *
+ * These flags are attached to packets as part of the
+ * &skb_shared_info. Use skb_tx() to get a pointer.
+ */
+union skb_shared_tx {
+ struct {
+ __u8 hardware:1,
+ software:1,
+ in_progress:1;
+ };
+ __u8 flags;
+};
+
/* This data is invariant across clones and lives at
* the end of the header data, ie. at skb->end.
*/
@@ -142,18 +190,23 @@ struct skb_shared_info {
atomic_t dataref;
unsigned short nr_frags;
unsigned short gso_size;
+#ifdef CONFIG_HAS_DMA
+ dma_addr_t dma_head;
+#endif
/* Warning: this field is not always filled in (UFO)! */
unsigned short gso_segs;
unsigned short gso_type;
__be32 ip6_frag_id;
-#ifdef CONFIG_HAS_DMA
- unsigned int num_dma_maps;
-#endif
+ union skb_shared_tx tx_flags;
struct sk_buff *frag_list;
+ struct skb_shared_hwtstamps hwtstamps;
skb_frag_t frags[MAX_SKB_FRAGS];
#ifdef CONFIG_HAS_DMA
- dma_addr_t dma_maps[MAX_SKB_FRAGS + 1];
+ dma_addr_t dma_maps[MAX_SKB_FRAGS];
#endif
+ /* Intermediate layers must ensure that destructor_arg
+ * remains valid until skb destructor */
+ void * destructor_arg;
};
/* We divide dataref into two halves. The higher 16 bits hold references
@@ -188,6 +241,8 @@ enum {
SKB_GSO_TCP_ECN = 1 << 3,
SKB_GSO_TCPV6 = 1 << 4,
+
+ SKB_GSO_FCOE = 1 << 5,
};
#if BITS_PER_LONG > 32
@@ -210,7 +265,7 @@ typedef unsigned char *sk_buff_data_t;
* @transport_header: Transport layer header
* @network_header: Network layer header
* @mac_header: Link layer header
- * @dst: destination entry
+ * @_skb_dst: destination entry
* @sp: the security path, used for xfrm
* @cb: Control buffer. Free for use by every layer. Put private vars here
* @len: Length of actual data
@@ -250,9 +305,6 @@ typedef unsigned char *sk_buff_data_t;
* @tc_verd: traffic control verdict
* @ndisc_nodetype: router type (from link layer)
* @do_not_encrypt: set to prevent encryption of this frame
- * @requeue: set to indicate that the wireless core should attempt
- * a software retry on this frame if we failed to
- * receive an ACK for it
* @dma_cookie: a cookie to one of several possible DMA operations
* done by skb DMA functions
* @secmark: security marking
@@ -268,10 +320,7 @@ struct sk_buff {
ktime_t tstamp;
struct net_device *dev;
- union {
- struct dst_entry *dst;
- struct rtable *rtable;
- };
+ unsigned long _skb_dst;
#ifdef CONFIG_XFRM
struct sec_path *sp;
#endif
@@ -295,6 +344,7 @@ struct sk_buff {
};
};
__u32 priority;
+ kmemcheck_bitfield_begin(flags1);
__u8 local_df:1,
cloned:1,
ip_summed:2,
@@ -305,6 +355,7 @@ struct sk_buff {
ipvs_property:1,
peeked:1,
nf_trace:1;
+ kmemcheck_bitfield_end(flags1);
__be16 protocol;
void (*destructor)(struct sk_buff *skb);
@@ -324,13 +375,16 @@ struct sk_buff {
__u16 tc_verd; /* traffic control verdict */
#endif
#endif
+
+ kmemcheck_bitfield_begin(flags2);
#ifdef CONFIG_IPV6_NDISC_NODETYPE
__u8 ndisc_nodetype:2;
#endif
#if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE)
__u8 do_not_encrypt:1;
- __u8 requeue:1;
#endif
+ kmemcheck_bitfield_end(flags2);
+
/* 0/13/14 bit hole */
#ifdef CONFIG_NET_DMA
@@ -372,7 +426,23 @@ extern void skb_dma_unmap(struct device *dev, struct sk_buff *skb,
enum dma_data_direction dir);
#endif
+static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
+{
+ return (struct dst_entry *)skb->_skb_dst;
+}
+
+static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
+{
+ skb->_skb_dst = (unsigned long)dst;
+}
+
+static inline struct rtable *skb_rtable(const struct sk_buff *skb)
+{
+ return (struct rtable *)skb_dst(skb);
+}
+
extern void kfree_skb(struct sk_buff *skb);
+extern void consume_skb(struct sk_buff *skb);
extern void __kfree_skb(struct sk_buff *skb);
extern struct sk_buff *__alloc_skb(unsigned int size,
gfp_t priority, int fclone, int node);
@@ -411,7 +481,8 @@ extern int skb_to_sgvec(struct sk_buff *skb,
extern int skb_cow_data(struct sk_buff *skb, int tailbits,
struct sk_buff **trailer);
extern int skb_pad(struct sk_buff *skb, int pad);
-#define dev_kfree_skb(a) kfree_skb(a)
+#define dev_kfree_skb(a) consume_skb(a)
+#define dev_consume_skb(a) kfree_skb_clean(a)
extern void skb_over_panic(struct sk_buff *skb, int len,
void *here);
extern void skb_under_panic(struct sk_buff *skb, int len,
@@ -459,6 +530,16 @@ static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
/* Internal */
#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
+static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
+{
+ return &skb_shinfo(skb)->hwtstamps;
+}
+
+static inline union skb_shared_tx *skb_tx(struct sk_buff *skb)
+{
+ return &skb_shinfo(skb)->tx_flags;
+}
+
/**
* skb_queue_empty - check if a queue is empty
* @list: queue head
@@ -999,7 +1080,7 @@ extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
int off, int size);
#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
-#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_shinfo(skb)->frag_list)
+#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frags(skb))
#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
#ifdef NET_SKBUFF_DATA_USES_OFFSET
@@ -1278,7 +1359,7 @@ static inline int skb_network_offset(const struct sk_buff *skb)
* The networking layer reserves some headroom in skb data (via
* dev_alloc_skb). This is used to avoid having to reallocate skb data when
* the header has to grow. In the default case, if the header has to grow
- * 16 bytes or less we avoid the reallocation.
+ * 32 bytes or less we avoid the reallocation.
*
* Unfortunately this headroom changes the DMA alignment of the resulting
* network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
@@ -1286,11 +1367,11 @@ static inline int skb_network_offset(const struct sk_buff *skb)
* perhaps setting it to a cacheline in size (since that will maintain
* cacheline alignment of the DMA). It must be a power of 2.
*
- * Various parts of the networking layer expect at least 16 bytes of
+ * Various parts of the networking layer expect at least 32 bytes of
* headroom, you should not reduce this.
*/
#ifndef NET_SKB_PAD
-#define NET_SKB_PAD 16
+#define NET_SKB_PAD 32
#endif
extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
@@ -1638,6 +1719,25 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
skb = skb->prev)
+static inline bool skb_has_frags(const struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->frag_list != NULL;
+}
+
+static inline void skb_frag_list_init(struct sk_buff *skb)
+{
+ skb_shinfo(skb)->frag_list = NULL;
+}
+
+static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
+{
+ frag->next = skb_shinfo(skb)->frag_list;
+ skb_shinfo(skb)->frag_list = frag;
+}
+
+#define skb_walk_frags(skb, iter) \
+ for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
+
extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
int *peeked, int *err);
extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
@@ -1652,8 +1752,14 @@ extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
struct iovec *iov);
extern int skb_copy_datagram_from_iovec(struct sk_buff *skb,
int offset,
- struct iovec *from,
+ const struct iovec *from,
+ int from_offset,
int len);
+extern int skb_copy_datagram_const_iovec(const struct sk_buff *from,
+ int offset,
+ const struct iovec *to,
+ int to_offset,
+ int size);
extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
unsigned int flags);
@@ -1678,8 +1784,6 @@ extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
int shiftlen);
extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
-extern int skb_gro_receive(struct sk_buff **head,
- struct sk_buff *skb);
static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
int len, void *buffer)
@@ -1726,6 +1830,11 @@ static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
extern void skb_init(void);
+static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
+{
+ return skb->tstamp;
+}
+
/**
* skb_get_timestamp - get timestamp from a skb
* @skb: skb to get stamp from
@@ -1735,11 +1844,18 @@ extern void skb_init(void);
* This function converts the offset back to a struct timeval and stores
* it in stamp.
*/
-static inline void skb_get_timestamp(const struct sk_buff *skb, struct timeval *stamp)
+static inline void skb_get_timestamp(const struct sk_buff *skb,
+ struct timeval *stamp)
{
*stamp = ktime_to_timeval(skb->tstamp);
}
+static inline void skb_get_timestampns(const struct sk_buff *skb,
+ struct timespec *stamp)
+{
+ *stamp = ktime_to_timespec(skb->tstamp);
+}
+
static inline void __net_timestamp(struct sk_buff *skb)
{
skb->tstamp = ktime_get_real();
@@ -1755,6 +1871,20 @@ static inline ktime_t net_invalid_timestamp(void)
return ktime_set(0, 0);
}
+/**
+ * skb_tstamp_tx - queue clone of skb with send time stamps
+ * @orig_skb: the original outgoing packet
+ * @hwtstamps: hardware time stamps, may be NULL if not available
+ *
+ * If the skb has a socket associated, then this function clones the
+ * skb (thus sharing the actual data and optional structures), stores
+ * the optional hardware time stamping information (if non NULL) or
+ * generates a software time stamp (otherwise), then queues the clone
+ * to the error queue of the socket. Errors are silently ignored.
+ */
+extern void skb_tstamp_tx(struct sk_buff *orig_skb,
+ struct skb_shared_hwtstamps *hwtstamps);
+
extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
@@ -1885,7 +2015,7 @@ static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
skb->queue_mapping = queue_mapping;
}
-static inline u16 skb_get_queue_mapping(struct sk_buff *skb)
+static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
{
return skb->queue_mapping;
}
@@ -1895,6 +2025,24 @@ static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_bu
to->queue_mapping = from->queue_mapping;
}
+static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
+{
+ skb->queue_mapping = rx_queue + 1;
+}
+
+static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
+{
+ return skb->queue_mapping - 1;
+}
+
+static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
+{
+ return (skb->queue_mapping != 0);
+}
+
+extern u16 skb_tx_hash(const struct net_device *dev,
+ const struct sk_buff *skb);
+
#ifdef CONFIG_XFRM
static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
{
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 24c5602bee9..2da8372519f 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -62,6 +62,15 @@
# define SLAB_DEBUG_OBJECTS 0x00000000UL
#endif
+#define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */
+
+/* Don't track use of uninitialized memory */
+#ifdef CONFIG_KMEMCHECK
+# define SLAB_NOTRACK 0x01000000UL
+#else
+# define SLAB_NOTRACK 0x00000000UL
+#endif
+
/* The following flags affect the page allocator grouping pages by mobility */
#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
@@ -317,4 +326,6 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
return kmalloc_node(size, flags | __GFP_ZERO, node);
}
+void __init kmem_cache_init_late(void);
+
#endif /* _LINUX_SLAB_H */
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 6ca6a7b66d7..850d057500d 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -14,6 +14,88 @@
#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
#include <linux/compiler.h>
+#include <linux/kmemtrace.h>
+
+/*
+ * struct kmem_cache
+ *
+ * manages a cache.
+ */
+
+struct kmem_cache {
+/* 1) per-cpu data, touched during every alloc/free */
+ struct array_cache *array[NR_CPUS];
+/* 2) Cache tunables. Protected by cache_chain_mutex */
+ unsigned int batchcount;
+ unsigned int limit;
+ unsigned int shared;
+
+ unsigned int buffer_size;
+ u32 reciprocal_buffer_size;
+/* 3) touched by every alloc & free from the backend */
+
+ unsigned int flags; /* constant flags */
+ unsigned int num; /* # of objs per slab */
+
+/* 4) cache_grow/shrink */
+ /* order of pgs per slab (2^n) */
+ unsigned int gfporder;
+
+ /* force GFP flags, e.g. GFP_DMA */
+ gfp_t gfpflags;
+
+ size_t colour; /* cache colouring range */
+ unsigned int colour_off; /* colour offset */
+ struct kmem_cache *slabp_cache;
+ unsigned int slab_size;
+ unsigned int dflags; /* dynamic flags */
+
+ /* constructor func */
+ void (*ctor)(void *obj);
+
+/* 5) cache creation/removal */
+ const char *name;
+ struct list_head next;
+
+/* 6) statistics */
+#ifdef CONFIG_DEBUG_SLAB
+ unsigned long num_active;
+ unsigned long num_allocations;
+ unsigned long high_mark;
+ unsigned long grown;
+ unsigned long reaped;
+ unsigned long errors;
+ unsigned long max_freeable;
+ unsigned long node_allocs;
+ unsigned long node_frees;
+ unsigned long node_overflow;
+ atomic_t allochit;
+ atomic_t allocmiss;
+ atomic_t freehit;
+ atomic_t freemiss;
+
+ /*
+ * If debugging is enabled, then the allocator can add additional
+ * fields and/or padding to every object. buffer_size contains the total
+ * object size including these internal fields, the following two
+ * variables contain the offset to the user object and its size.
+ */
+ int obj_offset;
+ int obj_size;
+#endif /* CONFIG_DEBUG_SLAB */
+
+ /*
+ * We put nodelists[] at the end of kmem_cache, because we want to size
+ * this array to nr_node_ids slots instead of MAX_NUMNODES
+ * (see kmem_cache_init())
+ * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
+ * is statically defined, so we reserve the max number of nodes.
+ */
+ struct kmem_list3 *nodelists[MAX_NUMNODES];
+ /*
+ * Do not add fields after nodelists[]
+ */
+};
/* Size description struct for general caches. */
struct cache_sizes {
@@ -28,8 +110,26 @@ extern struct cache_sizes malloc_sizes[];
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);
-static inline void *kmalloc(size_t size, gfp_t flags)
+#ifdef CONFIG_KMEMTRACE
+extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
+extern size_t slab_buffer_size(struct kmem_cache *cachep);
+#else
+static __always_inline void *
+kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
+{
+ return kmem_cache_alloc(cachep, flags);
+}
+static inline size_t slab_buffer_size(struct kmem_cache *cachep)
+{
+ return 0;
+}
+#endif
+
+static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
+ struct kmem_cache *cachep;
+ void *ret;
+
if (__builtin_constant_p(size)) {
int i = 0;
@@ -47,10 +147,17 @@ static inline void *kmalloc(size_t size, gfp_t flags)
found:
#ifdef CONFIG_ZONE_DMA
if (flags & GFP_DMA)
- return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep,
- flags);
+ cachep = malloc_sizes[i].cs_dmacachep;
+ else
#endif
- return kmem_cache_alloc(malloc_sizes[i].cs_cachep, flags);
+ cachep = malloc_sizes[i].cs_cachep;
+
+ ret = kmem_cache_alloc_notrace(cachep, flags);
+
+ trace_kmalloc(_THIS_IP_, ret,
+ size, slab_buffer_size(cachep), flags);
+
+ return ret;
}
return __kmalloc(size, flags);
}
@@ -59,8 +166,25 @@ found:
extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
-static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
+#ifdef CONFIG_KMEMTRACE
+extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
+ gfp_t flags,
+ int nodeid);
+#else
+static __always_inline void *
+kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
+ gfp_t flags,
+ int nodeid)
{
+ return kmem_cache_alloc_node(cachep, flags, nodeid);
+}
+#endif
+
+static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
+{
+ struct kmem_cache *cachep;
+ void *ret;
+
if (__builtin_constant_p(size)) {
int i = 0;
@@ -78,11 +202,18 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
found:
#ifdef CONFIG_ZONE_DMA
if (flags & GFP_DMA)
- return kmem_cache_alloc_node(malloc_sizes[i].cs_dmacachep,
- flags, node);
+ cachep = malloc_sizes[i].cs_dmacachep;
+ else
#endif
- return kmem_cache_alloc_node(malloc_sizes[i].cs_cachep,
- flags, node);
+ cachep = malloc_sizes[i].cs_cachep;
+
+ ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
+
+ trace_kmalloc_node(_THIS_IP_, ret,
+ size, slab_buffer_size(cachep),
+ flags, node);
+
+ return ret;
}
return __kmalloc_node(size, flags, node);
}
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
index 59a3fa476ab..bb5368df4be 100644
--- a/include/linux/slob_def.h
+++ b/include/linux/slob_def.h
@@ -3,14 +3,15 @@
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
-static inline void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
+static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
+ gfp_t flags)
{
return kmem_cache_alloc_node(cachep, flags, -1);
}
void *__kmalloc_node(size_t size, gfp_t flags, int node);
-static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
+static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
return __kmalloc_node(size, flags, node);
}
@@ -23,14 +24,19 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
* kmalloc is the normal method of allocating memory
* in the kernel.
*/
-static inline void *kmalloc(size_t size, gfp_t flags)
+static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
return __kmalloc_node(size, flags, -1);
}
-static inline void *__kmalloc(size_t size, gfp_t flags)
+static __always_inline void *__kmalloc(size_t size, gfp_t flags)
{
return kmalloc(size, flags);
}
+static inline void kmem_cache_init_late(void)
+{
+ /* Nothing to do */
+}
+
#endif /* __LINUX_SLOB_DEF_H */
diff --git a/include/linux/slow-work.h b/include/linux/slow-work.h
new file mode 100644
index 00000000000..b65c8881f07
--- /dev/null
+++ b/include/linux/slow-work.h
@@ -0,0 +1,95 @@
+/* Worker thread pool for slow items, such as filesystem lookups or mkdirs
+ *
+ * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ *
+ * See Documentation/slow-work.txt
+ */
+
+#ifndef _LINUX_SLOW_WORK_H
+#define _LINUX_SLOW_WORK_H
+
+#ifdef CONFIG_SLOW_WORK
+
+#include <linux/sysctl.h>
+
+struct slow_work;
+
+/*
+ * The operations used to support slow work items
+ */
+struct slow_work_ops {
+ /* get a ref on a work item
+ * - return 0 if successful, -ve if not
+ */
+ int (*get_ref)(struct slow_work *work);
+
+ /* discard a ref to a work item */
+ void (*put_ref)(struct slow_work *work);
+
+ /* execute a work item */
+ void (*execute)(struct slow_work *work);
+};
+
+/*
+ * A slow work item
+ * - A reference is held on the parent object by the thread pool when it is
+ * queued
+ */
+struct slow_work {
+ unsigned long flags;
+#define SLOW_WORK_PENDING 0 /* item pending (further) execution */
+#define SLOW_WORK_EXECUTING 1 /* item currently executing */
+#define SLOW_WORK_ENQ_DEFERRED 2 /* item enqueue deferred */
+#define SLOW_WORK_VERY_SLOW 3 /* item is very slow */
+ const struct slow_work_ops *ops; /* operations table for this item */
+ struct list_head link; /* link in queue */
+};
+
+/**
+ * slow_work_init - Initialise a slow work item
+ * @work: The work item to initialise
+ * @ops: The operations to use to handle the slow work item
+ *
+ * Initialise a slow work item.
+ */
+static inline void slow_work_init(struct slow_work *work,
+ const struct slow_work_ops *ops)
+{
+ work->flags = 0;
+ work->ops = ops;
+ INIT_LIST_HEAD(&work->link);
+}
+
+/**
+ * vslow_work_init - Initialise a very slow work item
+ * @work: The work item to initialise
+ * @ops: The operations to use to handle the slow work item
+ *
+ * Initialise a very slow work item. This item will be restricted such that
+ * only a certain number of the pool threads will be able to execute items of
+ * this type.
+ */
+static inline void vslow_work_init(struct slow_work *work,
+ const struct slow_work_ops *ops)
+{
+ work->flags = 1 << SLOW_WORK_VERY_SLOW;
+ work->ops = ops;
+ INIT_LIST_HEAD(&work->link);
+}
+
+extern int slow_work_enqueue(struct slow_work *work);
+extern int slow_work_register_user(void);
+extern void slow_work_unregister_user(void);
+
+#ifdef CONFIG_SYSCTL
+extern ctl_table slow_work_sysctls[];
+#endif
+
+#endif /* CONFIG_SLOW_WORK */
+#endif /* _LINUX_SLOW_WORK_H */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 2f5c16b1aac..4dcbc2c7149 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -10,6 +10,7 @@
#include <linux/gfp.h>
#include <linux/workqueue.h>
#include <linux/kobject.h>
+#include <linux/kmemtrace.h>
enum stat_item {
ALLOC_FASTPATH, /* Allocation from cpu slab */
@@ -46,7 +47,6 @@ struct kmem_cache_cpu {
struct kmem_cache_node {
spinlock_t list_lock; /* Protect partial list and nr_partial */
unsigned long nr_partial;
- unsigned long min_partial;
struct list_head partial;
#ifdef CONFIG_SLUB_DEBUG
atomic_long_t nr_slabs;
@@ -89,6 +89,7 @@ struct kmem_cache {
void (*ctor)(void *);
int inuse; /* Offset to metadata */
int align; /* Alignment */
+ unsigned long min_partial;
const char *name; /* Name (only for display!) */
struct list_head list; /* List of slab caches */
#ifdef CONFIG_SLUB_DEBUG
@@ -121,10 +122,23 @@ struct kmem_cache {
#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
/*
+ * Maximum kmalloc object size handled by SLUB. Larger object allocations
+ * are passed through to the page allocator. The page allocator "fastpath"
+ * is relatively slow so we need this value sufficiently high so that
+ * performance critical objects are allocated through the SLUB fastpath.
+ *
+ * This should be dropped to PAGE_SIZE / 2 once the page allocator
+ * "fastpath" becomes competitive with the slab allocator fastpaths.
+ */
+#define SLUB_MAX_SIZE (2 * PAGE_SIZE)
+
+#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2)
+
+/*
* We keep the general caches in an array of slab caches that are used for
* 2^x bytes of allocations.
*/
-extern struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1];
+extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT];
/*
* Sorry that the following has to be that ugly but some versions of GCC
@@ -204,15 +218,32 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);
+#ifdef CONFIG_KMEMTRACE
+extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
+#else
+static __always_inline void *
+kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
+{
+ return kmem_cache_alloc(s, gfpflags);
+}
+#endif
+
static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
{
- return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size));
+ unsigned int order = get_order(size);
+ void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
+
+ trace_kmalloc(_THIS_IP_, ret, size, PAGE_SIZE << order, flags);
+
+ return ret;
}
static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
+ void *ret;
+
if (__builtin_constant_p(size)) {
- if (size > PAGE_SIZE)
+ if (size > SLUB_MAX_SIZE)
return kmalloc_large(size, flags);
if (!(flags & SLUB_DMA)) {
@@ -221,7 +252,11 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
if (!s)
return ZERO_SIZE_PTR;
- return kmem_cache_alloc(s, flags);
+ ret = kmem_cache_alloc_notrace(s, flags);
+
+ trace_kmalloc(_THIS_IP_, ret, size, s->size, flags);
+
+ return ret;
}
}
return __kmalloc(size, flags);
@@ -231,19 +266,42 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
void *__kmalloc_node(size_t size, gfp_t flags, int node);
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
+#ifdef CONFIG_KMEMTRACE
+extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
+ gfp_t gfpflags,
+ int node);
+#else
+static __always_inline void *
+kmem_cache_alloc_node_notrace(struct kmem_cache *s,
+ gfp_t gfpflags,
+ int node)
+{
+ return kmem_cache_alloc_node(s, gfpflags, node);
+}
+#endif
+
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
+ void *ret;
+
if (__builtin_constant_p(size) &&
- size <= PAGE_SIZE && !(flags & SLUB_DMA)) {
+ size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) {
struct kmem_cache *s = kmalloc_slab(size);
if (!s)
return ZERO_SIZE_PTR;
- return kmem_cache_alloc_node(s, flags, node);
+ ret = kmem_cache_alloc_node_notrace(s, flags, node);
+
+ trace_kmalloc_node(_THIS_IP_, ret,
+ size, s->size, flags, node);
+
+ return ret;
}
return __kmalloc_node(size, flags, node);
}
#endif
+void __init kmem_cache_init_late(void);
+
#endif /* _LINUX_SLUB_DEF_H */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 715196b09d6..9e3d8af0920 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -38,7 +38,7 @@ int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
/*
* main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc.
* (defined in asm header):
- */
+ */
/*
* stops all CPUs but the current one:
@@ -82,7 +82,8 @@ smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
return 0;
}
-void __smp_call_function_single(int cpuid, struct call_single_data *data);
+void __smp_call_function_single(int cpuid, struct call_single_data *data,
+ int wait);
/*
* Generic and arch helpers
@@ -121,6 +122,8 @@ extern unsigned int setup_max_cpus;
#else /* !SMP */
+static inline void smp_send_stop(void) { }
+
/*
* These macros fold the SMP functionality into a single CPU system
*/
@@ -174,7 +177,12 @@ static inline void init_call_single_data(void)
#define get_cpu() ({ preempt_disable(); smp_processor_id(); })
#define put_cpu() preempt_enable()
-#define put_cpu_no_resched() preempt_enable_no_resched()
+
+/*
+ * Callback to arch code if there's nosmp or maxcpus=0 on the
+ * boot command line:
+ */
+extern void arch_disable_smp_support(void);
void smp_setup_processor_id(void);
diff --git a/include/linux/smsc911x.h b/include/linux/smsc911x.h
index 1cbf0313add..5241e4fb4ec 100644
--- a/include/linux/smsc911x.h
+++ b/include/linux/smsc911x.h
@@ -43,5 +43,18 @@ struct smsc911x_platform_config {
/* Constants for flags */
#define SMSC911X_USE_16BIT (BIT(0))
#define SMSC911X_USE_32BIT (BIT(1))
+#define SMSC911X_FORCE_INTERNAL_PHY (BIT(2))
+#define SMSC911X_FORCE_EXTERNAL_PHY (BIT(3))
+#define SMSC911X_SAVE_MAC_ADDRESS (BIT(4))
+
+/*
+ * SMSC911X_SWAP_FIFO:
+ * Enables software byte swap for fifo data. Should only be used as a
+ * "last resort" in the case of big endian mode on boards with incorrectly
+ * routed data bus to older devices such as LAN9118. Newer devices such as
+ * LAN9221 can handle this in hardware, there are registers to control
+ * this swapping but the driver doesn't currently use them.
+ */
+#define SMSC911X_SWAP_FIFO (BIT(5))
#endif /* __LINUX_SMSC911X_H__ */
diff --git a/include/linux/snmp.h b/include/linux/snmp.h
index aee3f1e1d1c..0f953fe4041 100644
--- a/include/linux/snmp.h
+++ b/include/linux/snmp.h
@@ -18,7 +18,7 @@
enum
{
IPSTATS_MIB_NUM = 0,
- IPSTATS_MIB_INRECEIVES, /* InReceives */
+ IPSTATS_MIB_INPKTS, /* InReceives */
IPSTATS_MIB_INHDRERRORS, /* InHdrErrors */
IPSTATS_MIB_INTOOBIGERRORS, /* InTooBigErrors */
IPSTATS_MIB_INNOROUTES, /* InNoRoutes */
@@ -28,7 +28,7 @@ enum
IPSTATS_MIB_INDISCARDS, /* InDiscards */
IPSTATS_MIB_INDELIVERS, /* InDelivers */
IPSTATS_MIB_OUTFORWDATAGRAMS, /* OutForwDatagrams */
- IPSTATS_MIB_OUTREQUESTS, /* OutRequests */
+ IPSTATS_MIB_OUTPKTS, /* OutRequests */
IPSTATS_MIB_OUTDISCARDS, /* OutDiscards */
IPSTATS_MIB_OUTNOROUTES, /* OutNoRoutes */
IPSTATS_MIB_REASMTIMEOUT, /* ReasmTimeout */
@@ -42,6 +42,12 @@ enum
IPSTATS_MIB_OUTMCASTPKTS, /* OutMcastPkts */
IPSTATS_MIB_INBCASTPKTS, /* InBcastPkts */
IPSTATS_MIB_OUTBCASTPKTS, /* OutBcastPkts */
+ IPSTATS_MIB_INOCTETS, /* InOctets */
+ IPSTATS_MIB_OUTOCTETS, /* OutOctets */
+ IPSTATS_MIB_INMCASTOCTETS, /* InMcastOctets */
+ IPSTATS_MIB_OUTMCASTOCTETS, /* OutMcastOctets */
+ IPSTATS_MIB_INBCASTOCTETS, /* InBcastOctets */
+ IPSTATS_MIB_OUTBCASTOCTETS, /* OutBcastOctets */
__IPSTATS_MIB_MAX
};
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 20fc4bbfca4..3b461dffe24 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -24,10 +24,12 @@ struct __kernel_sockaddr_storage {
#include <linux/types.h> /* pid_t */
#include <linux/compiler.h> /* __user */
-#ifdef CONFIG_PROC_FS
+#ifdef __KERNEL__
+# ifdef CONFIG_PROC_FS
struct seq_file;
extern void socket_seq_show(struct seq_file *seq);
-#endif
+# endif
+#endif /* __KERNEL__ */
typedef unsigned short sa_family_t;
@@ -179,6 +181,7 @@ struct ucred {
#define AF_ASH 18 /* Ash */
#define AF_ECONET 19 /* Acorn Econet */
#define AF_ATMSVC 20 /* ATM SVCs */
+#define AF_RDS 21 /* RDS sockets */
#define AF_SNA 22 /* Linux SNA Project (nutters!) */
#define AF_IRDA 23 /* IRDA sockets */
#define AF_PPPOX 24 /* PPPoX sockets */
@@ -191,7 +194,8 @@ struct ucred {
#define AF_RXRPC 33 /* RxRPC sockets */
#define AF_ISDN 34 /* mISDN sockets */
#define AF_PHONET 35 /* Phonet sockets */
-#define AF_MAX 36 /* For now.. */
+#define AF_IEEE802154 36 /* IEEE802154 sockets */
+#define AF_MAX 37 /* For now.. */
/* Protocol families, same as address families. */
#define PF_UNSPEC AF_UNSPEC
@@ -217,6 +221,7 @@ struct ucred {
#define PF_ASH AF_ASH
#define PF_ECONET AF_ECONET
#define PF_ATMSVC AF_ATMSVC
+#define PF_RDS AF_RDS
#define PF_SNA AF_SNA
#define PF_IRDA AF_IRDA
#define PF_PPPOX AF_PPPOX
@@ -229,6 +234,7 @@ struct ucred {
#define PF_RXRPC AF_RXRPC
#define PF_ISDN AF_ISDN
#define PF_PHONET AF_PHONET
+#define PF_IEEE802154 AF_IEEE802154
#define PF_MAX AF_MAX
/* Maximum queue length specifiable by listen. */
@@ -298,14 +304,16 @@ struct ucred {
#define SOL_PPPOL2TP 273
#define SOL_BLUETOOTH 274
#define SOL_PNPIPE 275
+#define SOL_RDS 276
+#define SOL_IUCV 277
/* IPX options */
#define IPX_TYPE 1
#ifdef __KERNEL__
extern int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
-extern int memcpy_fromiovecend(unsigned char *kdata, struct iovec *iov,
- int offset, int len);
+extern int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
+ int offset, int len);
extern int csum_partial_copy_fromiovecend(unsigned char *kdata,
struct iovec *iov,
int offset,
@@ -313,6 +321,8 @@ extern int csum_partial_copy_fromiovecend(unsigned char *kdata,
extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode);
extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len);
+extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
+ int offset, int len);
extern int move_addr_to_user(struct sockaddr *kaddr, int klen, void __user *uaddr, int __user *ulen);
extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr *kaddr);
extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
diff --git a/include/linux/sockios.h b/include/linux/sockios.h
index abef7596655..241f179347d 100644
--- a/include/linux/sockios.h
+++ b/include/linux/sockios.h
@@ -122,6 +122,9 @@
#define SIOCBRADDIF 0x89a2 /* add interface to bridge */
#define SIOCBRDELIF 0x89a3 /* remove interface from bridge */
+/* hardware time stamping: parameters in linux/net_tstamp.h */
+#define SIOCSHWTSTAMP 0x89b0
+
/* Device private ioctl calls */
/*
diff --git a/include/linux/sonypi.h b/include/linux/sonypi.h
index f41ffd7c2dd..34c4475ac4a 100644
--- a/include/linux/sonypi.h
+++ b/include/linux/sonypi.h
@@ -103,6 +103,14 @@
#define SONYPI_EVENT_WIRELESS_OFF 61
#define SONYPI_EVENT_ZOOM_IN_PRESSED 62
#define SONYPI_EVENT_ZOOM_OUT_PRESSED 63
+#define SONYPI_EVENT_CD_EJECT_PRESSED 64
+#define SONYPI_EVENT_MODEKEY_PRESSED 65
+#define SONYPI_EVENT_PKEY_P4 66
+#define SONYPI_EVENT_PKEY_P5 67
+#define SONYPI_EVENT_SETTINGKEY_PRESSED 68
+#define SONYPI_EVENT_VOLUME_INC_PRESSED 69
+#define SONYPI_EVENT_VOLUME_DEC_PRESSED 70
+#define SONYPI_EVENT_BRIGHTNESS_PRESSED 71
/* get/set brightness */
#define SONYPI_IOCGBRT _IOR('v', 0, __u8)
diff --git a/include/linux/spi/ad7879.h b/include/linux/spi/ad7879.h
new file mode 100644
index 00000000000..4231104c9af
--- /dev/null
+++ b/include/linux/spi/ad7879.h
@@ -0,0 +1,35 @@
+/* linux/spi/ad7879.h */
+
+/* Touchscreen characteristics vary between boards and models. The
+ * platform_data for the device's "struct device" holds this information.
+ *
+ * It's OK if the min/max values are zero.
+ */
+struct ad7879_platform_data {
+ u16 model; /* 7879 */
+ u16 x_plate_ohms;
+ u16 x_min, x_max;
+ u16 y_min, y_max;
+ u16 pressure_min, pressure_max;
+
+ /* [0..255] 0=OFF Starts at 1=550us and goes
+ * all the way to 9.440ms in steps of 35us.
+ */
+ u8 pen_down_acc_interval;
+ /* [0..15] Starts at 0=128us and goes all the
+ * way to 4.096ms in steps of 128us.
+ */
+ u8 first_conversion_delay;
+ /* [0..3] 0 = 2us, 1 = 4us, 2 = 8us, 3 = 16us */
+ u8 acquisition_time;
+ /* [0..3] Average X middle samples 0 = 2, 1 = 4, 2 = 8, 3 = 16 */
+ u8 averaging;
+ /* [0..3] Perform X measurements 0 = OFF,
+ * 1 = 4, 2 = 8, 3 = 16 (median > averaging)
+ */
+ u8 median;
+ /* 1 = AUX/VBAT/GPIO set to GPIO Output */
+ u8 gpio_output;
+ /* Initial GPIO pin state (valid if gpio_output = 1) */
+ u8 gpio_default;
+};
diff --git a/include/linux/spi/ads7846.h b/include/linux/spi/ads7846.h
index 05eab2f11e6..51948eb6927 100644
--- a/include/linux/spi/ads7846.h
+++ b/include/linux/spi/ads7846.h
@@ -17,6 +17,7 @@ struct ads7846_platform_data {
u16 vref_mv; /* external vref value, milliVolts */
bool keep_vref_on; /* set to keep vref on for differential
* measurements as well */
+ bool swap_xy; /* swap x and y axes */
/* Settling time of the analog signals; a function of Vcc and the
* capacitance on the X/Y drivers. If set to non-zero, two samples
@@ -51,5 +52,6 @@ struct ads7846_platform_data {
void **filter_data);
int (*filter) (void *filter_data, int data_idx, int *val);
void (*filter_cleanup)(void *filter_data);
+ void (*wait_for_sync)(void);
};
diff --git a/include/linux/spi/eeprom.h b/include/linux/spi/eeprom.h
index 1085212c446..306e7b1c69e 100644
--- a/include/linux/spi/eeprom.h
+++ b/include/linux/spi/eeprom.h
@@ -1,6 +1,8 @@
#ifndef __LINUX_SPI_EEPROM_H
#define __LINUX_SPI_EEPROM_H
+#include <linux/memory.h>
+
/*
* Put one of these structures in platform_data for SPI EEPROMS handled
* by the "at25" driver. On SPI, most EEPROMS understand the same core
@@ -17,6 +19,10 @@ struct spi_eeprom {
#define EE_ADDR2 0x0002 /* 16 bit addrs */
#define EE_ADDR3 0x0004 /* 24 bit addrs */
#define EE_READONLY 0x0008 /* disallow writes */
+
+ /* for exporting this chip's data to other kernel code */
+ void (*setup)(struct memory_accessor *mem, void *context);
+ void *context;
};
#endif /* __LINUX_SPI_EEPROM_H */
diff --git a/include/linux/spi/libertas_spi.h b/include/linux/spi/libertas_spi.h
new file mode 100644
index 00000000000..1b5d5384fcd
--- /dev/null
+++ b/include/linux/spi/libertas_spi.h
@@ -0,0 +1,29 @@
+/*
+ * board-specific data for the libertas_spi driver.
+ *
+ * Copyright 2008 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+#ifndef _LIBERTAS_SPI_H_
+#define _LIBERTAS_SPI_H_
+
+struct spi_device;
+
+struct libertas_spi_platform_data {
+ /* There are two ways to read data from the WLAN module's SPI
+ * interface. Setting 0 or 1 here controls which one is used.
+ *
+ * Usually you want to set use_dummy_writes = 1.
+ * However, if that doesn't work or if you are using a slow SPI clock
+ * speed, you may want to use 0 here. */
+ u16 use_dummy_writes;
+
+ /* Board specific setup/teardown */
+ int (*setup)(struct spi_device *spi);
+ int (*teardown)(struct spi_device *spi);
+};
+#endif
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 68bb1c501d0..9c4cd27f468 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -204,6 +204,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* SPI slaves, and are numbered from zero to num_chipselects.
* each slave has a chipselect signal, but it's common that not
* every chipselect is connected to a slave.
+ * @dma_alignment: SPI controller constraint on DMA buffers alignment.
* @setup: updates the device mode and clocking records used by a
* device's SPI controller; protocol code may call this. This
* must fail if an unrecognized or unsupported mode is requested.
@@ -239,7 +240,20 @@ struct spi_master {
*/
u16 num_chipselect;
- /* setup mode and clock, etc (spi driver may call many times) */
+ /* some SPI controllers pose alignment requirements on DMAable
+ * buffers; let protocol drivers know about these requirements.
+ */
+ u16 dma_alignment;
+
+ /* spi_device.mode flags understood by this controller driver */
+ u16 mode_bits;
+
+ /* Setup mode and clock, etc (spi driver may call many times).
+ *
+ * IMPORTANT: this may be called when transfers to another
+ * device are active. DO NOT UPDATE SHARED REGISTERS in ways
+ * which could break those transfers.
+ */
int (*setup)(struct spi_device *spi);
/* bidirectional bulk transfers
@@ -512,30 +526,7 @@ static inline void spi_message_free(struct spi_message *m)
kfree(m);
}
-/**
- * spi_setup - setup SPI mode and clock rate
- * @spi: the device whose settings are being modified
- * Context: can sleep, and no requests are queued to the device
- *
- * SPI protocol drivers may need to update the transfer mode if the
- * device doesn't work with its default. They may likewise need
- * to update clock rates or word sizes from initial values. This function
- * changes those settings, and must be called from a context that can sleep.
- * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
- * effect the next time the device is selected and data is transferred to
- * or from it. When this function returns, the spi device is deselected.
- *
- * Note that this call will fail if the protocol driver specifies an option
- * that the underlying controller or its driver does not support. For
- * example, not all hardware supports wire transfers using nine bit words,
- * LSB-first wire encoding, or active-high chipselects.
- */
-static inline int
-spi_setup(struct spi_device *spi)
-{
- return spi->master->setup(spi);
-}
-
+extern int spi_setup(struct spi_device *spi);
/**
* spi_async - asynchronous SPI transfer
diff --git a/include/linux/spi/spi_gpio.h b/include/linux/spi/spi_gpio.h
index 0f01a0f1f40..ca6782ee4b9 100644
--- a/include/linux/spi/spi_gpio.h
+++ b/include/linux/spi/spi_gpio.h
@@ -25,10 +25,16 @@
* ...
* };
*
+ * If chipselect is not used (there's only one device on the bus), assign
+ * SPI_GPIO_NO_CHIPSELECT to the controller_data:
+ * .controller_data = (void *) SPI_GPIO_NO_CHIPSELECT;
+ *
* If the bitbanged bus is later switched to a "native" controller,
* that platform_device and controller_data should be removed.
*/
+#define SPI_GPIO_NO_CHIPSELECT ((unsigned long)-1l)
+
/**
* struct spi_gpio_platform_data - parameter for bitbanged SPI master
* @sck: number of the GPIO used for clock output
diff --git a/include/linux/spi/wl12xx.h b/include/linux/spi/wl12xx.h
new file mode 100644
index 00000000000..11430cab2aa
--- /dev/null
+++ b/include/linux/spi/wl12xx.h
@@ -0,0 +1,31 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ *
+ * Contact: Kalle Valo <kalle.valo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef _LINUX_SPI_WL12XX_H
+#define _LINUX_SPI_WL12XX_H
+
+struct wl12xx_platform_data {
+ void (*set_power)(bool enable);
+};
+
+#endif
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index a0c66a2e00a..252b245cfcf 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -153,9 +153,11 @@ do { \
extern int _raw_spin_trylock(spinlock_t *lock);
extern void _raw_spin_unlock(spinlock_t *lock);
extern void _raw_read_lock(rwlock_t *lock);
+#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
extern int _raw_read_trylock(rwlock_t *lock);
extern void _raw_read_unlock(rwlock_t *lock);
extern void _raw_write_lock(rwlock_t *lock);
+#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
extern int _raw_write_trylock(rwlock_t *lock);
extern void _raw_write_unlock(rwlock_t *lock);
#else
@@ -165,9 +167,13 @@ do { \
# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock)
# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock)
+# define _raw_read_lock_flags(lock, flags) \
+ __raw_read_lock_flags(&(lock)->raw_lock, *(flags))
# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock)
# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock)
# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock)
+# define _raw_write_lock_flags(lock, flags) \
+ __raw_write_lock_flags(&(lock)->raw_lock, *(flags))
# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock)
# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock)
#endif
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index 938234c4a99..d4841ed8215 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -60,6 +60,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
#define __raw_spin_is_locked(lock) ((void)(lock), 0)
/* for sched.c and kernel_lock.c: */
# define __raw_spin_lock(lock) do { (void)(lock); } while (0)
+# define __raw_spin_lock_flags(lock, flags) do { (void)(lock); } while (0)
# define __raw_spin_unlock(lock) do { (void)(lock); } while (0)
# define __raw_spin_trylock(lock) ({ (void)(lock); 1; })
#endif /* DEBUG_SPINLOCK */
diff --git a/include/linux/splice.h b/include/linux/splice.h
index 528dcb93c2f..18e7c7c0cae 100644
--- a/include/linux/splice.h
+++ b/include/linux/splice.h
@@ -11,8 +11,7 @@
#include <linux/pipe_fs_i.h>
/*
- * splice is tied to pipes as a transport (at least for now), so we'll just
- * add the splice flags here.
+ * Flags passed in from splice/tee/vmsplice
*/
#define SPLICE_F_MOVE (0x01) /* move pages instead of copying */
#define SPLICE_F_NONBLOCK (0x02) /* don't block on the pipe splicing (but */
@@ -36,6 +35,8 @@ struct splice_desc {
void *data; /* cookie */
} u;
loff_t pos; /* file position */
+ size_t num_spliced; /* number of bytes already spliced */
+ bool need_wakeup; /* need to wake up writer */
};
struct partial_page {
@@ -66,6 +67,16 @@ extern ssize_t splice_from_pipe(struct pipe_inode_info *, struct file *,
splice_actor *);
extern ssize_t __splice_from_pipe(struct pipe_inode_info *,
struct splice_desc *, splice_actor *);
+extern int splice_from_pipe_feed(struct pipe_inode_info *, struct splice_desc *,
+ splice_actor *);
+extern int splice_from_pipe_next(struct pipe_inode_info *,
+ struct splice_desc *);
+extern void splice_from_pipe_begin(struct splice_desc *);
+extern void splice_from_pipe_end(struct pipe_inode_info *,
+ struct splice_desc *);
+extern int pipe_to_file(struct pipe_inode_info *, struct pipe_buffer *,
+ struct splice_desc *);
+
extern ssize_t splice_to_pipe(struct pipe_inode_info *,
struct splice_pipe_desc *);
extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index 17d9b58f637..5ae8fa22d33 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -339,6 +339,10 @@ extern int ssb_bus_pcmciabus_register(struct ssb_bus *bus,
extern void ssb_bus_unregister(struct ssb_bus *bus);
+/* Set a fallback SPROM.
+ * See kdoc at the function definition for complete documentation. */
+extern int ssb_arch_set_fallback_sprom(const struct ssb_sprom *sprom);
+
/* Suspend a SSB bus.
* Call this from the parent bus suspend routine. */
extern int ssb_bus_suspend(struct ssb_bus *bus);
diff --git a/include/linux/ssb/ssb_driver_chipcommon.h b/include/linux/ssb/ssb_driver_chipcommon.h
index 7d7e03dcf77..d3b1d18922f 100644
--- a/include/linux/ssb/ssb_driver_chipcommon.h
+++ b/include/linux/ssb/ssb_driver_chipcommon.h
@@ -181,6 +181,16 @@
#define SSB_CHIPCO_PROG_WAITCNT 0x0124
#define SSB_CHIPCO_FLASH_CFG 0x0128
#define SSB_CHIPCO_FLASH_WAITCNT 0x012C
+#define SSB_CHIPCO_CLKCTLST 0x01E0 /* Clock control and status (rev >= 20) */
+#define SSB_CHIPCO_CLKCTLST_FORCEALP 0x00000001 /* Force ALP request */
+#define SSB_CHIPCO_CLKCTLST_FORCEHT 0x00000002 /* Force HT request */
+#define SSB_CHIPCO_CLKCTLST_FORCEILP 0x00000004 /* Force ILP request */
+#define SSB_CHIPCO_CLKCTLST_HAVEALPREQ 0x00000008 /* ALP available request */
+#define SSB_CHIPCO_CLKCTLST_HAVEHTREQ 0x00000010 /* HT available request */
+#define SSB_CHIPCO_CLKCTLST_HWCROFF 0x00000020 /* Force HW clock request off */
+#define SSB_CHIPCO_CLKCTLST_HAVEHT 0x00010000 /* HT available */
+#define SSB_CHIPCO_CLKCTLST_HAVEALP 0x00020000 /* APL available */
+#define SSB_CHIPCO_HW_WORKAROUND 0x01E4 /* Hardware workaround (rev >= 20) */
#define SSB_CHIPCO_UART0_DATA 0x0300
#define SSB_CHIPCO_UART0_IMR 0x0304
#define SSB_CHIPCO_UART0_FCR 0x0308
@@ -197,6 +207,196 @@
#define SSB_CHIPCO_UART1_LSR 0x0414
#define SSB_CHIPCO_UART1_MSR 0x0418
#define SSB_CHIPCO_UART1_SCRATCH 0x041C
+/* PMU registers (rev >= 20) */
+#define SSB_CHIPCO_PMU_CTL 0x0600 /* PMU control */
+#define SSB_CHIPCO_PMU_CTL_ILP_DIV 0xFFFF0000 /* ILP div mask */
+#define SSB_CHIPCO_PMU_CTL_ILP_DIV_SHIFT 16
+#define SSB_CHIPCO_PMU_CTL_NOILPONW 0x00000200 /* No ILP on wait */
+#define SSB_CHIPCO_PMU_CTL_HTREQEN 0x00000100 /* HT req enable */
+#define SSB_CHIPCO_PMU_CTL_ALPREQEN 0x00000080 /* ALP req enable */
+#define SSB_CHIPCO_PMU_CTL_XTALFREQ 0x0000007C /* Crystal freq */
+#define SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT 2
+#define SSB_CHIPCO_PMU_CTL_ILPDIVEN 0x00000002 /* ILP div enable */
+#define SSB_CHIPCO_PMU_CTL_LPOSEL 0x00000001 /* LPO sel */
+#define SSB_CHIPCO_PMU_CAP 0x0604 /* PMU capabilities */
+#define SSB_CHIPCO_PMU_CAP_REVISION 0x000000FF /* Revision mask */
+#define SSB_CHIPCO_PMU_STAT 0x0608 /* PMU status */
+#define SSB_CHIPCO_PMU_STAT_INTPEND 0x00000040 /* Interrupt pending */
+#define SSB_CHIPCO_PMU_STAT_SBCLKST 0x00000030 /* Backplane clock status? */
+#define SSB_CHIPCO_PMU_STAT_HAVEALP 0x00000008 /* ALP available */
+#define SSB_CHIPCO_PMU_STAT_HAVEHT 0x00000004 /* HT available */
+#define SSB_CHIPCO_PMU_STAT_RESINIT 0x00000003 /* Res init */
+#define SSB_CHIPCO_PMU_RES_STAT 0x060C /* PMU res status */
+#define SSB_CHIPCO_PMU_RES_PEND 0x0610 /* PMU res pending */
+#define SSB_CHIPCO_PMU_TIMER 0x0614 /* PMU timer */
+#define SSB_CHIPCO_PMU_MINRES_MSK 0x0618 /* PMU min res mask */
+#define SSB_CHIPCO_PMU_MAXRES_MSK 0x061C /* PMU max res mask */
+#define SSB_CHIPCO_PMU_RES_TABSEL 0x0620 /* PMU res table sel */
+#define SSB_CHIPCO_PMU_RES_DEPMSK 0x0624 /* PMU res dep mask */
+#define SSB_CHIPCO_PMU_RES_UPDNTM 0x0628 /* PMU res updown timer */
+#define SSB_CHIPCO_PMU_RES_TIMER 0x062C /* PMU res timer */
+#define SSB_CHIPCO_PMU_CLKSTRETCH 0x0630 /* PMU clockstretch */
+#define SSB_CHIPCO_PMU_WATCHDOG 0x0634 /* PMU watchdog */
+#define SSB_CHIPCO_PMU_RES_REQTS 0x0640 /* PMU res req timer sel */
+#define SSB_CHIPCO_PMU_RES_REQT 0x0644 /* PMU res req timer */
+#define SSB_CHIPCO_PMU_RES_REQM 0x0648 /* PMU res req mask */
+#define SSB_CHIPCO_CHIPCTL_ADDR 0x0650
+#define SSB_CHIPCO_CHIPCTL_DATA 0x0654
+#define SSB_CHIPCO_REGCTL_ADDR 0x0658
+#define SSB_CHIPCO_REGCTL_DATA 0x065C
+#define SSB_CHIPCO_PLLCTL_ADDR 0x0660
+#define SSB_CHIPCO_PLLCTL_DATA 0x0664
+
+
+
+/** PMU PLL registers */
+
+/* PMU rev 0 PLL registers */
+#define SSB_PMU0_PLLCTL0 0
+#define SSB_PMU0_PLLCTL0_PDIV_MSK 0x00000001
+#define SSB_PMU0_PLLCTL0_PDIV_FREQ 25000 /* kHz */
+#define SSB_PMU0_PLLCTL1 1
+#define SSB_PMU0_PLLCTL1_WILD_IMSK 0xF0000000 /* Wild int mask (low nibble) */
+#define SSB_PMU0_PLLCTL1_WILD_IMSK_SHIFT 28
+#define SSB_PMU0_PLLCTL1_WILD_FMSK 0x0FFFFF00 /* Wild frac mask */
+#define SSB_PMU0_PLLCTL1_WILD_FMSK_SHIFT 8
+#define SSB_PMU0_PLLCTL1_STOPMOD 0x00000040 /* Stop mod */
+#define SSB_PMU0_PLLCTL2 2
+#define SSB_PMU0_PLLCTL2_WILD_IMSKHI 0x0000000F /* Wild int mask (high nibble) */
+#define SSB_PMU0_PLLCTL2_WILD_IMSKHI_SHIFT 0
+
+/* PMU rev 1 PLL registers */
+#define SSB_PMU1_PLLCTL0 0
+#define SSB_PMU1_PLLCTL0_P1DIV 0x00F00000 /* P1 div */
+#define SSB_PMU1_PLLCTL0_P1DIV_SHIFT 20
+#define SSB_PMU1_PLLCTL0_P2DIV 0x0F000000 /* P2 div */
+#define SSB_PMU1_PLLCTL0_P2DIV_SHIFT 24
+#define SSB_PMU1_PLLCTL1 1
+#define SSB_PMU1_PLLCTL1_M1DIV 0x000000FF /* M1 div */
+#define SSB_PMU1_PLLCTL1_M1DIV_SHIFT 0
+#define SSB_PMU1_PLLCTL1_M2DIV 0x0000FF00 /* M2 div */
+#define SSB_PMU1_PLLCTL1_M2DIV_SHIFT 8
+#define SSB_PMU1_PLLCTL1_M3DIV 0x00FF0000 /* M3 div */
+#define SSB_PMU1_PLLCTL1_M3DIV_SHIFT 16
+#define SSB_PMU1_PLLCTL1_M4DIV 0xFF000000 /* M4 div */
+#define SSB_PMU1_PLLCTL1_M4DIV_SHIFT 24
+#define SSB_PMU1_PLLCTL2 2
+#define SSB_PMU1_PLLCTL2_M5DIV 0x000000FF /* M5 div */
+#define SSB_PMU1_PLLCTL2_M5DIV_SHIFT 0
+#define SSB_PMU1_PLLCTL2_M6DIV 0x0000FF00 /* M6 div */
+#define SSB_PMU1_PLLCTL2_M6DIV_SHIFT 8
+#define SSB_PMU1_PLLCTL2_NDIVMODE 0x000E0000 /* NDIV mode */
+#define SSB_PMU1_PLLCTL2_NDIVMODE_SHIFT 17
+#define SSB_PMU1_PLLCTL2_NDIVINT 0x1FF00000 /* NDIV int */
+#define SSB_PMU1_PLLCTL2_NDIVINT_SHIFT 20
+#define SSB_PMU1_PLLCTL3 3
+#define SSB_PMU1_PLLCTL3_NDIVFRAC 0x00FFFFFF /* NDIV frac */
+#define SSB_PMU1_PLLCTL3_NDIVFRAC_SHIFT 0
+#define SSB_PMU1_PLLCTL4 4
+#define SSB_PMU1_PLLCTL5 5
+#define SSB_PMU1_PLLCTL5_CLKDRV 0xFFFFFF00 /* clk drv */
+#define SSB_PMU1_PLLCTL5_CLKDRV_SHIFT 8
+
+/* BCM4312 PLL resource numbers. */
+#define SSB_PMURES_4312_SWITCHER_BURST 0
+#define SSB_PMURES_4312_SWITCHER_PWM 1
+#define SSB_PMURES_4312_PA_REF_LDO 2
+#define SSB_PMURES_4312_CORE_LDO_BURST 3
+#define SSB_PMURES_4312_CORE_LDO_PWM 4
+#define SSB_PMURES_4312_RADIO_LDO 5
+#define SSB_PMURES_4312_ILP_REQUEST 6
+#define SSB_PMURES_4312_BG_FILTBYP 7
+#define SSB_PMURES_4312_TX_FILTBYP 8
+#define SSB_PMURES_4312_RX_FILTBYP 9
+#define SSB_PMURES_4312_XTAL_PU 10
+#define SSB_PMURES_4312_ALP_AVAIL 11
+#define SSB_PMURES_4312_BB_PLL_FILTBYP 12
+#define SSB_PMURES_4312_RF_PLL_FILTBYP 13
+#define SSB_PMURES_4312_HT_AVAIL 14
+
+/* BCM4325 PLL resource numbers. */
+#define SSB_PMURES_4325_BUCK_BOOST_BURST 0
+#define SSB_PMURES_4325_CBUCK_BURST 1
+#define SSB_PMURES_4325_CBUCK_PWM 2
+#define SSB_PMURES_4325_CLDO_CBUCK_BURST 3
+#define SSB_PMURES_4325_CLDO_CBUCK_PWM 4
+#define SSB_PMURES_4325_BUCK_BOOST_PWM 5
+#define SSB_PMURES_4325_ILP_REQUEST 6
+#define SSB_PMURES_4325_ABUCK_BURST 7
+#define SSB_PMURES_4325_ABUCK_PWM 8
+#define SSB_PMURES_4325_LNLDO1_PU 9
+#define SSB_PMURES_4325_LNLDO2_PU 10
+#define SSB_PMURES_4325_LNLDO3_PU 11
+#define SSB_PMURES_4325_LNLDO4_PU 12
+#define SSB_PMURES_4325_XTAL_PU 13
+#define SSB_PMURES_4325_ALP_AVAIL 14
+#define SSB_PMURES_4325_RX_PWRSW_PU 15
+#define SSB_PMURES_4325_TX_PWRSW_PU 16
+#define SSB_PMURES_4325_RFPLL_PWRSW_PU 17
+#define SSB_PMURES_4325_LOGEN_PWRSW_PU 18
+#define SSB_PMURES_4325_AFE_PWRSW_PU 19
+#define SSB_PMURES_4325_BBPLL_PWRSW_PU 20
+#define SSB_PMURES_4325_HT_AVAIL 21
+
+/* BCM4328 PLL resource numbers. */
+#define SSB_PMURES_4328_EXT_SWITCHER_PWM 0
+#define SSB_PMURES_4328_BB_SWITCHER_PWM 1
+#define SSB_PMURES_4328_BB_SWITCHER_BURST 2
+#define SSB_PMURES_4328_BB_EXT_SWITCHER_BURST 3
+#define SSB_PMURES_4328_ILP_REQUEST 4
+#define SSB_PMURES_4328_RADIO_SWITCHER_PWM 5
+#define SSB_PMURES_4328_RADIO_SWITCHER_BURST 6
+#define SSB_PMURES_4328_ROM_SWITCH 7
+#define SSB_PMURES_4328_PA_REF_LDO 8
+#define SSB_PMURES_4328_RADIO_LDO 9
+#define SSB_PMURES_4328_AFE_LDO 10
+#define SSB_PMURES_4328_PLL_LDO 11
+#define SSB_PMURES_4328_BG_FILTBYP 12
+#define SSB_PMURES_4328_TX_FILTBYP 13
+#define SSB_PMURES_4328_RX_FILTBYP 14
+#define SSB_PMURES_4328_XTAL_PU 15
+#define SSB_PMURES_4328_XTAL_EN 16
+#define SSB_PMURES_4328_BB_PLL_FILTBYP 17
+#define SSB_PMURES_4328_RF_PLL_FILTBYP 18
+#define SSB_PMURES_4328_BB_PLL_PU 19
+
+/* BCM5354 PLL resource numbers. */
+#define SSB_PMURES_5354_EXT_SWITCHER_PWM 0
+#define SSB_PMURES_5354_BB_SWITCHER_PWM 1
+#define SSB_PMURES_5354_BB_SWITCHER_BURST 2
+#define SSB_PMURES_5354_BB_EXT_SWITCHER_BURST 3
+#define SSB_PMURES_5354_ILP_REQUEST 4
+#define SSB_PMURES_5354_RADIO_SWITCHER_PWM 5
+#define SSB_PMURES_5354_RADIO_SWITCHER_BURST 6
+#define SSB_PMURES_5354_ROM_SWITCH 7
+#define SSB_PMURES_5354_PA_REF_LDO 8
+#define SSB_PMURES_5354_RADIO_LDO 9
+#define SSB_PMURES_5354_AFE_LDO 10
+#define SSB_PMURES_5354_PLL_LDO 11
+#define SSB_PMURES_5354_BG_FILTBYP 12
+#define SSB_PMURES_5354_TX_FILTBYP 13
+#define SSB_PMURES_5354_RX_FILTBYP 14
+#define SSB_PMURES_5354_XTAL_PU 15
+#define SSB_PMURES_5354_XTAL_EN 16
+#define SSB_PMURES_5354_BB_PLL_FILTBYP 17
+#define SSB_PMURES_5354_RF_PLL_FILTBYP 18
+#define SSB_PMURES_5354_BB_PLL_PU 19
+
+
+
+/** Chip specific Chip-Status register contents. */
+#define SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL 0x00000003
+#define SSB_CHIPCO_CHST_4325_DEFCIS_SEL 0 /* OTP is powered up, use def. CIS, no SPROM */
+#define SSB_CHIPCO_CHST_4325_SPROM_SEL 1 /* OTP is powered up, SPROM is present */
+#define SSB_CHIPCO_CHST_4325_OTP_SEL 2 /* OTP is powered up, no SPROM */
+#define SSB_CHIPCO_CHST_4325_OTP_PWRDN 3 /* OTP is powered down, SPROM is present */
+#define SSB_CHIPCO_CHST_4325_SDIO_USB_MODE 0x00000004
+#define SSB_CHIPCO_CHST_4325_SDIO_USB_MODE_SHIFT 2
+#define SSB_CHIPCO_CHST_4325_RCAL_VALID 0x00000008
+#define SSB_CHIPCO_CHST_4325_RCAL_VALID_SHIFT 3
+#define SSB_CHIPCO_CHST_4325_RCAL_VALUE 0x000001F0
+#define SSB_CHIPCO_CHST_4325_RCAL_VALUE_SHIFT 4
+#define SSB_CHIPCO_CHST_4325_PMUTOP_2B 0x00000200 /* 1 for 2b, 0 for to 2a */
@@ -353,11 +553,20 @@
struct ssb_device;
struct ssb_serial_port;
+/* Data for the PMU, if available.
+ * Check availability with ((struct ssb_chipcommon)->capabilities & SSB_CHIPCO_CAP_PMU)
+ */
+struct ssb_chipcommon_pmu {
+ u8 rev; /* PMU revision */
+ u32 crystalfreq; /* The active crystal frequency (in kHz) */
+};
+
struct ssb_chipcommon {
struct ssb_device *dev;
u32 capabilities;
/* Fast Powerup Delay constant */
u16 fast_pwrup_delay;
+ struct ssb_chipcommon_pmu pmu;
};
static inline bool ssb_chipco_available(struct ssb_chipcommon *cc)
@@ -365,6 +574,17 @@ static inline bool ssb_chipco_available(struct ssb_chipcommon *cc)
return (cc->dev != NULL);
}
+/* Register access */
+#define chipco_read32(cc, offset) ssb_read32((cc)->dev, offset)
+#define chipco_write32(cc, offset, val) ssb_write32((cc)->dev, offset, val)
+
+#define chipco_mask32(cc, offset, mask) \
+ chipco_write32(cc, offset, chipco_read32(cc, offset) & (mask))
+#define chipco_set32(cc, offset, set) \
+ chipco_write32(cc, offset, chipco_read32(cc, offset) | (set))
+#define chipco_maskset32(cc, offset, mask, set) \
+ chipco_write32(cc, offset, (chipco_read32(cc, offset) & (mask)) | (set))
+
extern void ssb_chipcommon_init(struct ssb_chipcommon *cc);
extern void ssb_chipco_suspend(struct ssb_chipcommon *cc);
@@ -406,4 +626,8 @@ extern int ssb_chipco_serial_init(struct ssb_chipcommon *cc,
struct ssb_serial_port *ports);
#endif /* CONFIG_SSB_SERIAL */
+/* PMU support */
+extern void ssb_pmu_init(struct ssb_chipcommon *cc);
+
+
#endif /* LINUX_SSB_CHIPCO_H_ */
diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h
index 99a0f991e85..a01b982b578 100644
--- a/include/linux/ssb/ssb_regs.h
+++ b/include/linux/ssb/ssb_regs.h
@@ -326,6 +326,42 @@
#define SSB_SPROM5_GPIOB_P3 0xFF00 /* Pin 3 */
#define SSB_SPROM5_GPIOB_P3_SHIFT 8
+/* SPROM Revision 8 */
+#define SSB_SPROM8_BFLLO 0x1084 /* Boardflags (low 16 bits) */
+#define SSB_SPROM8_BFLHI 0x1086 /* Boardflags Hi */
+#define SSB_SPROM8_IL0MAC 0x108C /* 6 byte MAC address */
+#define SSB_SPROM8_CCODE 0x1092 /* 2 byte country code */
+#define SSB_SPROM8_ANTAVAIL 0x109C /* Antenna available bitfields*/
+#define SSB_SPROM8_ANTAVAIL_A 0xFF00 /* A-PHY bitfield */
+#define SSB_SPROM8_ANTAVAIL_A_SHIFT 8
+#define SSB_SPROM8_ANTAVAIL_BG 0x00FF /* B-PHY and G-PHY bitfield */
+#define SSB_SPROM8_ANTAVAIL_BG_SHIFT 0
+#define SSB_SPROM8_AGAIN01 0x109E /* Antenna Gain (in dBm Q5.2) */
+#define SSB_SPROM8_AGAIN0 0x00FF /* Antenna 0 */
+#define SSB_SPROM8_AGAIN0_SHIFT 0
+#define SSB_SPROM8_AGAIN1 0xFF00 /* Antenna 1 */
+#define SSB_SPROM8_AGAIN1_SHIFT 8
+#define SSB_SPROM8_AGAIN23 0x10A0
+#define SSB_SPROM8_AGAIN2 0x00FF /* Antenna 2 */
+#define SSB_SPROM8_AGAIN2_SHIFT 0
+#define SSB_SPROM8_AGAIN3 0xFF00 /* Antenna 3 */
+#define SSB_SPROM8_AGAIN3_SHIFT 8
+#define SSB_SPROM8_GPIOA 0x1096 /*Gen. Purpose IO # 0 and 1 */
+#define SSB_SPROM8_GPIOA_P0 0x00FF /* Pin 0 */
+#define SSB_SPROM8_GPIOA_P1 0xFF00 /* Pin 1 */
+#define SSB_SPROM8_GPIOA_P1_SHIFT 8
+#define SSB_SPROM8_GPIOB 0x1098 /* Gen. Purpose IO # 2 and 3 */
+#define SSB_SPROM8_GPIOB_P2 0x00FF /* Pin 2 */
+#define SSB_SPROM8_GPIOB_P3 0xFF00 /* Pin 3 */
+#define SSB_SPROM8_GPIOB_P3_SHIFT 8
+#define SSB_SPROM8_MAXP_BG 0x10C0 /* Max Power BG in path 1 */
+#define SSB_SPROM8_MAXP_BG_MASK 0x00FF /* Mask for Max Power BG */
+#define SSB_SPROM8_ITSSI_BG 0xFF00 /* Mask for path 1 itssi_bg */
+#define SSB_SPROM8_ITSSI_BG_SHIFT 8
+#define SSB_SPROM8_MAXP_A 0x10C8 /* Max Power A in path 1 */
+#define SSB_SPROM8_MAXP_A_MASK 0x00FF /* Mask for Max Power A */
+#define SSB_SPROM8_ITSSI_A 0xFF00 /* Mask for path 1 itssi_a */
+#define SSB_SPROM8_ITSSI_A_SHIFT 8
/* Values for SSB_SPROM1_BINF_CCODE */
enum {
diff --git a/include/linux/stackprotector.h b/include/linux/stackprotector.h
new file mode 100644
index 00000000000..6f3e54c704c
--- /dev/null
+++ b/include/linux/stackprotector.h
@@ -0,0 +1,16 @@
+#ifndef _LINUX_STACKPROTECTOR_H
+#define _LINUX_STACKPROTECTOR_H 1
+
+#include <linux/compiler.h>
+#include <linux/sched.h>
+#include <linux/random.h>
+
+#ifdef CONFIG_CC_STACKPROTECTOR
+# include <asm/stackprotector.h>
+#else
+static inline void boot_init_stack_canary(void)
+{
+}
+#endif
+
+#endif
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index 1a8cecc4f38..51efbef38fb 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -4,6 +4,8 @@
struct task_struct;
#ifdef CONFIG_STACKTRACE
+struct task_struct;
+
struct stack_trace {
unsigned int nr_entries, max_entries;
unsigned long *entries;
@@ -11,6 +13,7 @@ struct stack_trace {
};
extern void save_stack_trace(struct stack_trace *trace);
+extern void save_stack_trace_bp(struct stack_trace *trace, unsigned long bp);
extern void save_stack_trace_tsk(struct task_struct *tsk,
struct stack_trace *trace);
diff --git a/include/linux/string.h b/include/linux/string.h
index d18fc198aa2..489019ef169 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -10,8 +10,10 @@
#include <linux/compiler.h> /* for inline */
#include <linux/types.h> /* for size_t */
#include <linux/stddef.h> /* for NULL */
+#include <stdarg.h>
extern char *strndup_user(const char __user *, long);
+extern void *memdup_user(const void __user *, size_t);
/*
* Include machine specific inline routines
@@ -111,8 +113,23 @@ extern void argv_free(char **argv);
extern bool sysfs_streq(const char *s1, const char *s2);
+#ifdef CONFIG_BINARY_PRINTF
+int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
+int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf);
+int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4);
+#endif
+
extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
const void *from, size_t available);
+/**
+ * strstarts - does @str start with @prefix?
+ * @str: string to examine
+ * @prefix: prefix to look for.
+ */
+static inline bool strstarts(const char *str, const char *prefix)
+{
+ return strncmp(str, prefix, strlen(prefix)) == 0;
+}
#endif
#endif /* _LINUX_STRING_H_ */
diff --git a/include/linux/stringify.h b/include/linux/stringify.h
index 0b4388356c8..841cec8ed52 100644
--- a/include/linux/stringify.h
+++ b/include/linux/stringify.h
@@ -6,7 +6,7 @@
* converts to "bar".
*/
-#define __stringify_1(x) #x
-#define __stringify(x) __stringify_1(x)
+#define __stringify_1(x...) #x
+#define __stringify(x...) __stringify_1(x)
#endif /* !__LINUX_STRINGIFY_H */
diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h
new file mode 100644
index 00000000000..6508f0dc0ef
--- /dev/null
+++ b/include/linux/sunrpc/bc_xprt.h
@@ -0,0 +1,49 @@
+/******************************************************************************
+
+(c) 2008 NetApp. All Rights Reserved.
+
+NetApp provides this source code under the GPL v2 License.
+The GPL v2 license is available at
+http://opensource.org/licenses/gpl-license.php.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+
+/*
+ * Functions to create and manage the backchannel
+ */
+
+#ifndef _LINUX_SUNRPC_BC_XPRT_H
+#define _LINUX_SUNRPC_BC_XPRT_H
+
+#include <linux/sunrpc/svcsock.h>
+#include <linux/sunrpc/xprt.h>
+#include <linux/sunrpc/sched.h>
+
+#ifdef CONFIG_NFS_V4_1
+struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt);
+void xprt_free_bc_request(struct rpc_rqst *req);
+int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs);
+void xprt_destroy_backchannel(struct rpc_xprt *, int max_reqs);
+void bc_release_request(struct rpc_task *);
+int bc_send(struct rpc_rqst *req);
+#else /* CONFIG_NFS_V4_1 */
+static inline int xprt_setup_backchannel(struct rpc_xprt *xprt,
+ unsigned int min_reqs)
+{
+ return 0;
+}
+#endif /* CONFIG_NFS_V4_1 */
+#endif /* _LINUX_SUNRPC_BC_XPRT_H */
+
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index c39a21040dc..37881f1a0bd 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -143,6 +143,7 @@ int rpc_call_sync(struct rpc_clnt *clnt,
const struct rpc_message *msg, int flags);
struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred,
int flags);
+void rpc_restart_call_prepare(struct rpc_task *);
void rpc_restart_call(struct rpc_task *);
void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int);
size_t rpc_max_payload(struct rpc_clnt *);
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 64981a2f1ca..401097781fc 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -210,6 +210,8 @@ struct rpc_wait_queue {
*/
struct rpc_task *rpc_new_task(const struct rpc_task_setup *);
struct rpc_task *rpc_run_task(const struct rpc_task_setup *);
+struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
+ const struct rpc_call_ops *ops);
void rpc_put_task(struct rpc_task *);
void rpc_exit_task(struct rpc_task *);
void rpc_release_calldata(const struct rpc_call_ops *, void *);
@@ -237,6 +239,7 @@ void rpc_show_tasks(void);
int rpc_init_mempool(void);
void rpc_destroy_mempool(void);
extern struct workqueue_struct *rpciod_workqueue;
+void rpc_prepare_task(struct rpc_task *task);
static inline void rpc_exit(struct rpc_task *task, int status)
{
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 3435d24bfe5..ea8009695c6 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -24,6 +24,15 @@
*/
typedef int (*svc_thread_fn)(void *);
+/* statistics for svc_pool structures */
+struct svc_pool_stats {
+ unsigned long packets;
+ unsigned long sockets_queued;
+ unsigned long threads_woken;
+ unsigned long overloads_avoided;
+ unsigned long threads_timedout;
+};
+
/*
*
* RPC service thread pool.
@@ -41,6 +50,8 @@ struct svc_pool {
struct list_head sp_sockets; /* pending sockets */
unsigned int sp_nrthreads; /* # of threads in pool */
struct list_head sp_all_threads; /* all server threads */
+ int sp_nwaking; /* number of threads woken but not yet active */
+ struct svc_pool_stats sp_stats; /* statistics on pool operation */
} ____cacheline_aligned_in_smp;
/*
@@ -69,7 +80,6 @@ struct svc_serv {
struct list_head sv_tempsocks; /* all temporary sockets */
int sv_tmpcnt; /* count of temporary sockets */
struct timer_list sv_temptimer; /* timer for aging temporary sockets */
- sa_family_t sv_family; /* listener's address family */
char * sv_name; /* service name */
@@ -84,6 +94,17 @@ struct svc_serv {
struct module * sv_module; /* optional module to count when
* adding threads */
svc_thread_fn sv_function; /* main function for threads */
+ unsigned int sv_drc_max_pages; /* Total pages for DRC */
+ unsigned int sv_drc_pages_used;/* DRC pages used */
+#if defined(CONFIG_NFS_V4_1)
+ struct list_head sv_cb_list; /* queue for callback requests
+ * that arrive over the same
+ * connection */
+ spinlock_t sv_cb_lock; /* protects the svc_cb_list */
+ wait_queue_head_t sv_cb_waitq; /* sleep here if there are no
+ * entries in the svc_cb_list */
+ struct svc_xprt *bc_xprt;
+#endif /* CONFIG_NFS_V4_1 */
};
/*
@@ -219,6 +240,7 @@ struct svc_rqst {
struct svc_cred rq_cred; /* auth info */
void * rq_xprt_ctxt; /* transport specific context ptr */
struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */
+ int rq_usedeferral; /* use deferral */
size_t rq_xprt_hlen; /* xprt header len */
struct xdr_buf rq_arg;
@@ -264,6 +286,7 @@ struct svc_rqst {
* cache pages */
wait_queue_head_t rq_wait; /* synchronization */
struct task_struct *rq_task; /* service thread */
+ int rq_waking; /* 1 if thread is being woken */
};
/*
@@ -385,19 +408,22 @@ struct svc_procedure {
/*
* Function prototypes.
*/
-struct svc_serv *svc_create(struct svc_program *, unsigned int, sa_family_t,
+struct svc_serv *svc_create(struct svc_program *, unsigned int,
void (*shutdown)(struct svc_serv *));
struct svc_rqst *svc_prepare_thread(struct svc_serv *serv,
struct svc_pool *pool);
void svc_exit_thread(struct svc_rqst *);
struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
- sa_family_t, void (*shutdown)(struct svc_serv *),
+ void (*shutdown)(struct svc_serv *),
svc_thread_fn, struct module *);
int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
+int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
void svc_destroy(struct svc_serv *);
int svc_process(struct svc_rqst *);
-int svc_register(const struct svc_serv *, const unsigned short,
- const unsigned short);
+int bc_svc_process(struct svc_serv *, struct rpc_rqst *,
+ struct svc_rqst *);
+int svc_register(const struct svc_serv *, const int,
+ const unsigned short, const unsigned short);
void svc_wake_up(struct svc_serv *);
void svc_reserve(struct svc_rqst *rqstp, int space);
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index 0127daca435..2223ae0b5ed 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -71,7 +71,8 @@ int svc_reg_xprt_class(struct svc_xprt_class *);
void svc_unreg_xprt_class(struct svc_xprt_class *);
void svc_xprt_init(struct svc_xprt_class *, struct svc_xprt *,
struct svc_serv *);
-int svc_create_xprt(struct svc_serv *, char *, unsigned short, int);
+int svc_create_xprt(struct svc_serv *, const char *, const int,
+ const unsigned short, int);
void svc_xprt_enqueue(struct svc_xprt *xprt);
void svc_xprt_received(struct svc_xprt *);
void svc_xprt_put(struct svc_xprt *xprt);
@@ -80,40 +81,44 @@ void svc_close_xprt(struct svc_xprt *xprt);
void svc_delete_xprt(struct svc_xprt *xprt);
int svc_port_is_privileged(struct sockaddr *sin);
int svc_print_xprts(char *buf, int maxlen);
-struct svc_xprt *svc_find_xprt(struct svc_serv *, char *, int, int);
-int svc_xprt_names(struct svc_serv *serv, char *buf, int buflen);
+struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name,
+ const sa_family_t af, const unsigned short port);
+int svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen);
static inline void svc_xprt_get(struct svc_xprt *xprt)
{
kref_get(&xprt->xpt_ref);
}
static inline void svc_xprt_set_local(struct svc_xprt *xprt,
- struct sockaddr *sa, int salen)
+ const struct sockaddr *sa,
+ const size_t salen)
{
memcpy(&xprt->xpt_local, sa, salen);
xprt->xpt_locallen = salen;
}
static inline void svc_xprt_set_remote(struct svc_xprt *xprt,
- struct sockaddr *sa, int salen)
+ const struct sockaddr *sa,
+ const size_t salen)
{
memcpy(&xprt->xpt_remote, sa, salen);
xprt->xpt_remotelen = salen;
}
-static inline unsigned short svc_addr_port(struct sockaddr *sa)
+static inline unsigned short svc_addr_port(const struct sockaddr *sa)
{
- unsigned short ret = 0;
+ const struct sockaddr_in *sin = (const struct sockaddr_in *)sa;
+ const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)sa;
+
switch (sa->sa_family) {
case AF_INET:
- ret = ntohs(((struct sockaddr_in *)sa)->sin_port);
- break;
+ return ntohs(sin->sin_port);
case AF_INET6:
- ret = ntohs(((struct sockaddr_in6 *)sa)->sin6_port);
- break;
+ return ntohs(sin6->sin6_port);
}
- return ret;
+
+ return 0;
}
-static inline size_t svc_addr_len(struct sockaddr *sa)
+static inline size_t svc_addr_len(const struct sockaddr *sa)
{
switch (sa->sa_family) {
case AF_INET:
@@ -121,39 +126,43 @@ static inline size_t svc_addr_len(struct sockaddr *sa)
case AF_INET6:
return sizeof(struct sockaddr_in6);
}
- return -EAFNOSUPPORT;
+
+ return 0;
}
-static inline unsigned short svc_xprt_local_port(struct svc_xprt *xprt)
+static inline unsigned short svc_xprt_local_port(const struct svc_xprt *xprt)
{
- return svc_addr_port((struct sockaddr *)&xprt->xpt_local);
+ return svc_addr_port((const struct sockaddr *)&xprt->xpt_local);
}
-static inline unsigned short svc_xprt_remote_port(struct svc_xprt *xprt)
+static inline unsigned short svc_xprt_remote_port(const struct svc_xprt *xprt)
{
- return svc_addr_port((struct sockaddr *)&xprt->xpt_remote);
+ return svc_addr_port((const struct sockaddr *)&xprt->xpt_remote);
}
-static inline char *__svc_print_addr(struct sockaddr *addr,
- char *buf, size_t len)
+static inline char *__svc_print_addr(const struct sockaddr *addr,
+ char *buf, const size_t len)
{
+ const struct sockaddr_in *sin = (const struct sockaddr_in *)addr;
+ const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)addr;
+
switch (addr->sa_family) {
case AF_INET:
- snprintf(buf, len, "%pI4, port=%u",
- &((struct sockaddr_in *)addr)->sin_addr,
- ntohs(((struct sockaddr_in *) addr)->sin_port));
+ snprintf(buf, len, "%pI4, port=%u", &sin->sin_addr,
+ ntohs(sin->sin_port));
break;
case AF_INET6:
snprintf(buf, len, "%pI6, port=%u",
- &((struct sockaddr_in6 *)addr)->sin6_addr,
- ntohs(((struct sockaddr_in6 *) addr)->sin6_port));
+ &sin6->sin6_addr,
+ ntohs(sin6->sin6_port));
break;
default:
snprintf(buf, len, "unknown address type: %d", addr->sa_family);
break;
}
+
return buf;
}
#endif /* SUNRPC_SVC_XPRT_H */
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index 483e10380aa..04dba23c59f 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -38,10 +38,15 @@ int svc_recv(struct svc_rqst *, long);
int svc_send(struct svc_rqst *);
void svc_drop(struct svc_rqst *);
void svc_sock_update_bufs(struct svc_serv *serv);
-int svc_sock_names(char *buf, struct svc_serv *serv, char *toclose);
-int svc_addsock(struct svc_serv *serv, int fd, char *name_return);
+int svc_sock_names(struct svc_serv *serv, char *buf,
+ const size_t buflen,
+ const char *toclose);
+int svc_addsock(struct svc_serv *serv, const int fd,
+ char *name_return, const size_t len);
void svc_init_xprt_sock(void);
void svc_cleanup_xprt_sock(void);
+struct svc_xprt *svc_sock_create(struct svc_serv *serv, int prot);
+void svc_sock_destroy(struct svc_xprt *);
/*
* svc_makesock socket characteristics
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 49e1eb45446..d8910b68e1b 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -69,27 +69,27 @@ struct xdr_buf {
* pre-xdr'ed macros.
*/
-#define xdr_zero __constant_htonl(0)
-#define xdr_one __constant_htonl(1)
-#define xdr_two __constant_htonl(2)
-
-#define rpc_success __constant_htonl(RPC_SUCCESS)
-#define rpc_prog_unavail __constant_htonl(RPC_PROG_UNAVAIL)
-#define rpc_prog_mismatch __constant_htonl(RPC_PROG_MISMATCH)
-#define rpc_proc_unavail __constant_htonl(RPC_PROC_UNAVAIL)
-#define rpc_garbage_args __constant_htonl(RPC_GARBAGE_ARGS)
-#define rpc_system_err __constant_htonl(RPC_SYSTEM_ERR)
-#define rpc_drop_reply __constant_htonl(RPC_DROP_REPLY)
-
-#define rpc_auth_ok __constant_htonl(RPC_AUTH_OK)
-#define rpc_autherr_badcred __constant_htonl(RPC_AUTH_BADCRED)
-#define rpc_autherr_rejectedcred __constant_htonl(RPC_AUTH_REJECTEDCRED)
-#define rpc_autherr_badverf __constant_htonl(RPC_AUTH_BADVERF)
-#define rpc_autherr_rejectedverf __constant_htonl(RPC_AUTH_REJECTEDVERF)
-#define rpc_autherr_tooweak __constant_htonl(RPC_AUTH_TOOWEAK)
-#define rpcsec_gsserr_credproblem __constant_htonl(RPCSEC_GSS_CREDPROBLEM)
-#define rpcsec_gsserr_ctxproblem __constant_htonl(RPCSEC_GSS_CTXPROBLEM)
-#define rpc_autherr_oldseqnum __constant_htonl(101)
+#define xdr_zero cpu_to_be32(0)
+#define xdr_one cpu_to_be32(1)
+#define xdr_two cpu_to_be32(2)
+
+#define rpc_success cpu_to_be32(RPC_SUCCESS)
+#define rpc_prog_unavail cpu_to_be32(RPC_PROG_UNAVAIL)
+#define rpc_prog_mismatch cpu_to_be32(RPC_PROG_MISMATCH)
+#define rpc_proc_unavail cpu_to_be32(RPC_PROC_UNAVAIL)
+#define rpc_garbage_args cpu_to_be32(RPC_GARBAGE_ARGS)
+#define rpc_system_err cpu_to_be32(RPC_SYSTEM_ERR)
+#define rpc_drop_reply cpu_to_be32(RPC_DROP_REPLY)
+
+#define rpc_auth_ok cpu_to_be32(RPC_AUTH_OK)
+#define rpc_autherr_badcred cpu_to_be32(RPC_AUTH_BADCRED)
+#define rpc_autherr_rejectedcred cpu_to_be32(RPC_AUTH_REJECTEDCRED)
+#define rpc_autherr_badverf cpu_to_be32(RPC_AUTH_BADVERF)
+#define rpc_autherr_rejectedverf cpu_to_be32(RPC_AUTH_REJECTEDVERF)
+#define rpc_autherr_tooweak cpu_to_be32(RPC_AUTH_TOOWEAK)
+#define rpcsec_gsserr_credproblem cpu_to_be32(RPCSEC_GSS_CREDPROBLEM)
+#define rpcsec_gsserr_ctxproblem cpu_to_be32(RPCSEC_GSS_CTXPROBLEM)
+#define rpc_autherr_oldseqnum cpu_to_be32(101)
/*
* Miscellaneous XDR helper functions
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 11fc71d50c1..1175d58efc2 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -67,7 +67,8 @@ struct rpc_rqst {
struct rpc_task * rq_task; /* RPC task data */
__be32 rq_xid; /* request XID */
int rq_cong; /* has incremented xprt->cong */
- int rq_received; /* receive completed */
+ int rq_reply_bytes_recvd; /* number of reply */
+ /* bytes received */
u32 rq_seqno; /* gss seq no. used on req. */
int rq_enc_pages_num;
struct page **rq_enc_pages; /* scratch pages for use by
@@ -97,6 +98,12 @@ struct rpc_rqst {
unsigned long rq_xtime; /* when transmitted */
int rq_ntrans;
+
+#if defined(CONFIG_NFS_V4_1)
+ struct list_head rq_bc_list; /* Callback service list */
+ unsigned long rq_bc_pa_state; /* Backchannel prealloc state */
+ struct list_head rq_bc_pa_list; /* Backchannel prealloc list */
+#endif /* CONFIG_NFS_V4_1 */
};
#define rq_svec rq_snd_buf.head
#define rq_slen rq_snd_buf.len
@@ -174,6 +181,15 @@ struct rpc_xprt {
spinlock_t reserve_lock; /* lock slot table */
u32 xid; /* Next XID value to use */
struct rpc_task * snd_task; /* Task blocked in send */
+#if defined(CONFIG_NFS_V4_1)
+ struct svc_serv *bc_serv; /* The RPC service which will */
+ /* process the callback */
+ unsigned int bc_alloc_count; /* Total number of preallocs */
+ spinlock_t bc_pa_lock; /* Protects the preallocated
+ * items */
+ struct list_head bc_pa_list; /* List of preallocated
+ * backchannel rpc_rqst's */
+#endif /* CONFIG_NFS_V4_1 */
struct list_head recv;
struct {
@@ -192,6 +208,26 @@ struct rpc_xprt {
const char *address_strings[RPC_DISPLAY_MAX];
};
+#if defined(CONFIG_NFS_V4_1)
+/*
+ * Backchannel flags
+ */
+#define RPC_BC_PA_IN_USE 0x0001 /* Preallocated backchannel */
+ /* buffer in use */
+#endif /* CONFIG_NFS_V4_1 */
+
+#if defined(CONFIG_NFS_V4_1)
+static inline int bc_prealloc(struct rpc_rqst *req)
+{
+ return test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
+}
+#else
+static inline int bc_prealloc(struct rpc_rqst *req)
+{
+ return 0;
+}
+#endif /* CONFIG_NFS_V4_1 */
+
struct xprt_create {
int ident; /* XPRT_TRANSPORT identifier */
struct sockaddr * srcaddr; /* optional local address */
@@ -235,6 +271,7 @@ static inline __be32 *xprt_skip_transport_header(struct rpc_xprt *xprt, __be32 *
*/
int xprt_register_transport(struct xprt_class *type);
int xprt_unregister_transport(struct xprt_class *type);
+int xprt_load_transport(const char *);
void xprt_set_retrans_timeout_def(struct rpc_task *task);
void xprt_set_retrans_timeout_rtt(struct rpc_task *task);
void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status);
@@ -259,6 +296,8 @@ void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie);
#define XPRT_BOUND (4)
#define XPRT_BINDING (5)
#define XPRT_CLOSING (6)
+#define XPRT_CONNECTION_ABORT (7)
+#define XPRT_CONNECTION_CLOSE (8)
static inline void xprt_set_connected(struct rpc_xprt *xprt)
{
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index c7d9bb1832b..cd15df6c63c 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -1,9 +1,6 @@
#ifndef _LINUX_SUSPEND_H
#define _LINUX_SUSPEND_H
-#if defined(CONFIG_X86) || defined(CONFIG_FRV) || defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
-#include <asm/suspend.h>
-#endif
#include <linux/swap.h>
#include <linux/notifier.h>
#include <linux/init.h>
@@ -61,10 +58,17 @@ typedef int __bitwise suspend_state_t;
* by @begin().
* @prepare() is called right after devices have been suspended (ie. the
* appropriate .suspend() method has been executed for each device) and
- * before the nonboot CPUs are disabled (it is executed with IRQs enabled).
- * This callback is optional. It returns 0 on success or a negative
- * error code otherwise, in which case the system cannot enter the desired
- * sleep state (@enter() and @finish() will not be called in that case).
+ * before device drivers' late suspend callbacks are executed. It returns
+ * 0 on success or a negative error code otherwise, in which case the
+ * system cannot enter the desired sleep state (@prepare_late(), @enter(),
+ * @wake(), and @finish() will not be called in that case).
+ *
+ * @prepare_late: Finish preparing the platform for entering the system sleep
+ * state indicated by @begin().
+ * @prepare_late is called before disabling nonboot CPUs and after
+ * device drivers' late suspend callbacks have been executed. It returns
+ * 0 on success or a negative error code otherwise, in which case the
+ * system cannot enter the desired sleep state (@enter() and @wake()).
*
* @enter: Enter the system sleep state indicated by @begin() or represented by
* the argument if @begin() is not implemented.
@@ -72,19 +76,26 @@ typedef int __bitwise suspend_state_t;
* error code otherwise, in which case the system cannot enter the desired
* sleep state.
*
- * @finish: Called when the system has just left a sleep state, right after
- * the nonboot CPUs have been enabled and before devices are resumed (it is
- * executed with IRQs enabled).
+ * @wake: Called when the system has just left a sleep state, right after
+ * the nonboot CPUs have been enabled and before device drivers' early
+ * resume callbacks are executed.
+ * This callback is optional, but should be implemented by the platforms
+ * that implement @prepare_late(). If implemented, it is always called
+ * after @enter(), even if @enter() fails.
+ *
+ * @finish: Finish wake-up of the platform.
+ * @finish is called right prior to calling device drivers' regular suspend
+ * callbacks.
* This callback is optional, but should be implemented by the platforms
* that implement @prepare(). If implemented, it is always called after
- * @enter() (even if @enter() fails).
+ * @enter() and @wake(), if implemented, even if any of them fails.
*
* @end: Called by the PM core right after resuming devices, to indicate to
* the platform that the system has returned to the working state or
* the transition to the sleep state has been aborted.
* This callback is optional, but should be implemented by the platforms
- * that implement @begin(), but platforms implementing @begin() should
- * also provide a @end() which cleans up transitions aborted before
+ * that implement @begin(). Accordingly, platforms implementing @begin()
+ * should also provide a @end() which cleans up transitions aborted before
* @enter().
*
* @recover: Recover the platform from a suspend failure.
@@ -96,7 +107,9 @@ struct platform_suspend_ops {
int (*valid)(suspend_state_t state);
int (*begin)(suspend_state_t state);
int (*prepare)(void);
+ int (*prepare_late)(void);
int (*enter)(suspend_state_t state);
+ void (*wake)(void);
void (*finish)(void);
void (*end)(void);
void (*recover)(void);
@@ -232,11 +245,6 @@ extern unsigned long get_safe_page(gfp_t gfp_mask);
extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
extern int hibernate(void);
-extern int hibernate_nvs_register(unsigned long start, unsigned long size);
-extern int hibernate_nvs_alloc(void);
-extern void hibernate_nvs_free(void);
-extern void hibernate_nvs_save(void);
-extern void hibernate_nvs_restore(void);
extern bool system_entering_hibernation(void);
#else /* CONFIG_HIBERNATION */
static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
@@ -245,6 +253,16 @@ static inline void swsusp_unset_page_free(struct page *p) {}
static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
static inline int hibernate(void) { return -ENOSYS; }
+static inline bool system_entering_hibernation(void) { return false; }
+#endif /* CONFIG_HIBERNATION */
+
+#ifdef CONFIG_HIBERNATION_NVS
+extern int hibernate_nvs_register(unsigned long start, unsigned long size);
+extern int hibernate_nvs_alloc(void);
+extern void hibernate_nvs_free(void);
+extern void hibernate_nvs_save(void);
+extern void hibernate_nvs_restore(void);
+#else /* CONFIG_HIBERNATION_NVS */
static inline int hibernate_nvs_register(unsigned long a, unsigned long b)
{
return 0;
@@ -253,8 +271,7 @@ static inline int hibernate_nvs_alloc(void) { return 0; }
static inline void hibernate_nvs_free(void) {}
static inline void hibernate_nvs_save(void) {}
static inline void hibernate_nvs_restore(void) {}
-static inline bool system_entering_hibernation(void) { return false; }
-#endif /* CONFIG_HIBERNATION */
+#endif /* CONFIG_HIBERNATION_NVS */
#ifdef CONFIG_PM_SLEEP
void save_processor_state(void);
diff --git a/include/linux/suspend_ioctls.h b/include/linux/suspend_ioctls.h
index 2c6faec96bd..0b30382984f 100644
--- a/include/linux/suspend_ioctls.h
+++ b/include/linux/suspend_ioctls.h
@@ -1,14 +1,15 @@
#ifndef _LINUX_SUSPEND_IOCTLS_H
#define _LINUX_SUSPEND_IOCTLS_H
+#include <linux/types.h>
/*
* This structure is used to pass the values needed for the identification
* of the resume swap area from a user space to the kernel via the
* SNAPSHOT_SET_SWAP_AREA ioctl
*/
struct resume_swap_area {
- loff_t offset;
- u_int32_t dev;
+ __kernel_loff_t offset;
+ __u32 dev;
} __attribute__((packed));
#define SNAPSHOT_IOC_MAGIC '3'
@@ -20,13 +21,13 @@ struct resume_swap_area {
#define SNAPSHOT_S2RAM _IO(SNAPSHOT_IOC_MAGIC, 11)
#define SNAPSHOT_SET_SWAP_AREA _IOW(SNAPSHOT_IOC_MAGIC, 13, \
struct resume_swap_area)
-#define SNAPSHOT_GET_IMAGE_SIZE _IOR(SNAPSHOT_IOC_MAGIC, 14, loff_t)
+#define SNAPSHOT_GET_IMAGE_SIZE _IOR(SNAPSHOT_IOC_MAGIC, 14, __kernel_loff_t)
#define SNAPSHOT_PLATFORM_SUPPORT _IO(SNAPSHOT_IOC_MAGIC, 15)
#define SNAPSHOT_POWER_OFF _IO(SNAPSHOT_IOC_MAGIC, 16)
#define SNAPSHOT_CREATE_IMAGE _IOW(SNAPSHOT_IOC_MAGIC, 17, int)
#define SNAPSHOT_PREF_IMAGE_SIZE _IO(SNAPSHOT_IOC_MAGIC, 18)
-#define SNAPSHOT_AVAIL_SWAP_SIZE _IOR(SNAPSHOT_IOC_MAGIC, 19, loff_t)
-#define SNAPSHOT_ALLOC_SWAP_PAGE _IOR(SNAPSHOT_IOC_MAGIC, 20, loff_t)
+#define SNAPSHOT_AVAIL_SWAP_SIZE _IOR(SNAPSHOT_IOC_MAGIC, 19, __kernel_loff_t)
+#define SNAPSHOT_ALLOC_SWAP_PAGE _IOR(SNAPSHOT_IOC_MAGIC, 20, __kernel_loff_t)
#define SNAPSHOT_IOC_MAXNR 20
#endif /* _LINUX_SUSPEND_IOCTLS_H */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index d3021557887..c88b36665f7 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -129,9 +129,10 @@ enum {
#define SWAP_CLUSTER_MAX 32
-#define SWAP_MAP_MAX 0x7fff
-#define SWAP_MAP_BAD 0x8000
-
+#define SWAP_MAP_MAX 0x7ffe
+#define SWAP_MAP_BAD 0x7fff
+#define SWAP_HAS_CACHE 0x8000 /* There is a swap cache of entry. */
+#define SWAP_COUNT_MASK (~SWAP_HAS_CACHE)
/*
* The in-memory structure used to track swap areas.
*/
@@ -212,7 +213,7 @@ static inline void lru_cache_add_active_file(struct page *page)
/* linux/mm/vmscan.c */
extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
- gfp_t gfp_mask);
+ gfp_t gfp_mask, nodemask_t *mask);
extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
gfp_t gfp_mask, bool noswap,
unsigned int swappiness);
@@ -235,7 +236,6 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
}
#endif
-#ifdef CONFIG_UNEVICTABLE_LRU
extern int page_evictable(struct page *page, struct vm_area_struct *vma);
extern void scan_mapping_unevictable_pages(struct address_space *);
@@ -244,24 +244,6 @@ extern int scan_unevictable_handler(struct ctl_table *, int, struct file *,
void __user *, size_t *, loff_t *);
extern int scan_unevictable_register_node(struct node *node);
extern void scan_unevictable_unregister_node(struct node *node);
-#else
-static inline int page_evictable(struct page *page,
- struct vm_area_struct *vma)
-{
- return 1;
-}
-
-static inline void scan_mapping_unevictable_pages(struct address_space *mapping)
-{
-}
-
-static inline int scan_unevictable_register_node(struct node *node)
-{
- return 0;
-}
-
-static inline void scan_unevictable_unregister_node(struct node *node) { }
-#endif
extern int kswapd_run(int nid);
@@ -274,7 +256,7 @@ extern void swap_unplug_io_fn(struct backing_dev_info *, struct page *);
#ifdef CONFIG_SWAP
/* linux/mm/page_io.c */
-extern int swap_readpage(struct file *, struct page *);
+extern int swap_readpage(struct page *);
extern int swap_writepage(struct page *page, struct writeback_control *wbc);
extern void end_swap_bio_read(struct bio *bio, int err);
@@ -300,9 +282,11 @@ extern long total_swap_pages;
extern void si_swapinfo(struct sysinfo *);
extern swp_entry_t get_swap_page(void);
extern swp_entry_t get_swap_page_of_type(int);
-extern int swap_duplicate(swp_entry_t);
+extern void swap_duplicate(swp_entry_t);
+extern int swapcache_prepare(swp_entry_t);
extern int valid_swaphandles(swp_entry_t, unsigned long *);
extern void swap_free(swp_entry_t);
+extern void swapcache_free(swp_entry_t, struct page *page);
extern int free_swap_and_cache(swp_entry_t);
extern int swap_type_of(dev_t, sector_t, struct block_device **);
extern unsigned int count_swap_pages(int, int);
@@ -335,10 +319,11 @@ static inline void disable_swap_token(void)
}
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
-extern void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent);
+extern void
+mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout);
#else
static inline void
-mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
+mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
{
}
#endif
@@ -370,18 +355,31 @@ static inline void show_swap_cache_info(void)
}
#define free_swap_and_cache(swp) is_migration_entry(swp)
-#define swap_duplicate(swp) is_migration_entry(swp)
+#define swapcache_prepare(swp) is_migration_entry(swp)
+
+static inline void swap_duplicate(swp_entry_t swp)
+{
+}
static inline void swap_free(swp_entry_t swp)
{
}
+static inline void swapcache_free(swp_entry_t swp, struct page *page)
+{
+}
+
static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
struct vm_area_struct *vma, unsigned long addr)
{
return NULL;
}
+static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
+{
+ return 0;
+}
+
static inline struct page *lookup_swap_cache(swp_entry_t swp)
{
return NULL;
@@ -426,10 +424,9 @@ static inline swp_entry_t get_swap_page(void)
#define has_swap_token(x) 0
#define disable_swap_token() do { } while(0)
-static inline int mem_cgroup_cache_charge_swapin(struct page *page,
- struct mm_struct *mm, gfp_t mask, bool locked)
+static inline void
+mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
{
- return 0;
}
#endif /* CONFIG_SWAP */
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index dedd3c0cfe3..cb1a6631b8f 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -29,9 +29,10 @@ extern void *swiotlb_alloc(unsigned order, unsigned long nslabs);
extern dma_addr_t swiotlb_phys_to_bus(struct device *hwdev,
phys_addr_t address);
-extern phys_addr_t swiotlb_bus_to_phys(dma_addr_t address);
+extern phys_addr_t swiotlb_bus_to_phys(struct device *hwdev,
+ dma_addr_t address);
-extern int swiotlb_arch_range_needs_mapping(void *ptr, size_t size);
+extern int swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size);
extern void
*swiotlb_alloc_coherent(struct device *hwdev, size_t size,
@@ -41,20 +42,13 @@ extern void
swiotlb_free_coherent(struct device *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle);
-extern dma_addr_t
-swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir);
-
-extern void
-swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, int dir);
-
-extern dma_addr_t
-swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
- int dir, struct dma_attrs *attrs);
-
-extern void
-swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, int dir, struct dma_attrs *attrs);
+extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs);
+extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs);
extern int
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
@@ -66,36 +60,38 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
extern int
swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
- int dir, struct dma_attrs *attrs);
+ enum dma_data_direction dir, struct dma_attrs *attrs);
extern void
swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
- int nelems, int dir, struct dma_attrs *attrs);
+ int nelems, enum dma_data_direction dir,
+ struct dma_attrs *attrs);
extern void
swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, int dir);
+ size_t size, enum dma_data_direction dir);
extern void
swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
- int nelems, int dir);
+ int nelems, enum dma_data_direction dir);
extern void
swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, int dir);
+ size_t size, enum dma_data_direction dir);
extern void
swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
- int nelems, int dir);
+ int nelems, enum dma_data_direction dir);
extern void
swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
- unsigned long offset, size_t size, int dir);
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir);
extern void
swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
unsigned long offset, size_t size,
- int dir);
+ enum dma_data_direction dir);
extern int
swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
diff --git a/include/linux/synclink.h b/include/linux/synclink.h
index 99b8bdb17b2..0ff2779c44d 100644
--- a/include/linux/synclink.h
+++ b/include/linux/synclink.h
@@ -125,6 +125,7 @@
#define MGSL_MODE_MONOSYNC 3
#define MGSL_MODE_BISYNC 4
#define MGSL_MODE_RAW 6
+#define MGSL_MODE_BASE_CLOCK 7
#define MGSL_BUS_TYPE_ISA 1
#define MGSL_BUS_TYPE_EISA 2
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index f9f900cfd06..fa4242cdade 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -55,6 +55,7 @@ struct compat_timeval;
struct robust_list_head;
struct getcpu_cache;
struct old_linux_dirent;
+struct perf_counter_attr;
#include <linux/types.h>
#include <linux/aio_abi.h>
@@ -65,6 +66,7 @@ struct old_linux_dirent;
#include <asm/signal.h>
#include <linux/quota.h>
#include <linux/key.h>
+#include <trace/syscall.h>
#define __SC_DECL1(t1, a1) t1 a1
#define __SC_DECL2(t2, a2, ...) t2 a2, __SC_DECL1(__VA_ARGS__)
@@ -95,7 +97,46 @@ struct old_linux_dirent;
#define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__)
#define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__)
+#ifdef CONFIG_FTRACE_SYSCALLS
+#define __SC_STR_ADECL1(t, a) #a
+#define __SC_STR_ADECL2(t, a, ...) #a, __SC_STR_ADECL1(__VA_ARGS__)
+#define __SC_STR_ADECL3(t, a, ...) #a, __SC_STR_ADECL2(__VA_ARGS__)
+#define __SC_STR_ADECL4(t, a, ...) #a, __SC_STR_ADECL3(__VA_ARGS__)
+#define __SC_STR_ADECL5(t, a, ...) #a, __SC_STR_ADECL4(__VA_ARGS__)
+#define __SC_STR_ADECL6(t, a, ...) #a, __SC_STR_ADECL5(__VA_ARGS__)
+
+#define __SC_STR_TDECL1(t, a) #t
+#define __SC_STR_TDECL2(t, a, ...) #t, __SC_STR_TDECL1(__VA_ARGS__)
+#define __SC_STR_TDECL3(t, a, ...) #t, __SC_STR_TDECL2(__VA_ARGS__)
+#define __SC_STR_TDECL4(t, a, ...) #t, __SC_STR_TDECL3(__VA_ARGS__)
+#define __SC_STR_TDECL5(t, a, ...) #t, __SC_STR_TDECL4(__VA_ARGS__)
+#define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__)
+
+#define SYSCALL_METADATA(sname, nb) \
+ static const struct syscall_metadata __used \
+ __attribute__((__aligned__(4))) \
+ __attribute__((section("__syscalls_metadata"))) \
+ __syscall_meta_##sname = { \
+ .name = "sys"#sname, \
+ .nb_args = nb, \
+ .types = types_##sname, \
+ .args = args_##sname, \
+ }
+
+#define SYSCALL_DEFINE0(sname) \
+ static const struct syscall_metadata __used \
+ __attribute__((__aligned__(4))) \
+ __attribute__((section("__syscalls_metadata"))) \
+ __syscall_meta_##sname = { \
+ .name = "sys_"#sname, \
+ .nb_args = 0, \
+ }; \
+ asmlinkage long sys_##sname(void)
+
+#else
#define SYSCALL_DEFINE0(name) asmlinkage long sys_##name(void)
+#endif
+
#define SYSCALL_DEFINE1(name, ...) SYSCALL_DEFINEx(1, _##name, __VA_ARGS__)
#define SYSCALL_DEFINE2(name, ...) SYSCALL_DEFINEx(2, _##name, __VA_ARGS__)
#define SYSCALL_DEFINE3(name, ...) SYSCALL_DEFINEx(3, _##name, __VA_ARGS__)
@@ -108,7 +149,7 @@ struct old_linux_dirent;
asm ("\t.globl " #alias "\n\t.set " #alias ", " #name "\n" \
"\t.globl ." #alias "\n\t.set ." #alias ", ." #name)
#else
-#ifdef CONFIG_ALPHA
+#if defined(CONFIG_ALPHA) || defined(CONFIG_MIPS)
#define SYSCALL_ALIAS(alias, name) \
asm ( #alias " = " #name "\n\t.globl " #alias)
#else
@@ -117,10 +158,26 @@ struct old_linux_dirent;
#endif
#endif
+#ifdef CONFIG_FTRACE_SYSCALLS
+#define SYSCALL_DEFINEx(x, sname, ...) \
+ static const char *types_##sname[] = { \
+ __SC_STR_TDECL##x(__VA_ARGS__) \
+ }; \
+ static const char *args_##sname[] = { \
+ __SC_STR_ADECL##x(__VA_ARGS__) \
+ }; \
+ SYSCALL_METADATA(sname, x); \
+ __SYSCALL_DEFINEx(x, sname, __VA_ARGS__)
+#else
+#define SYSCALL_DEFINEx(x, sname, ...) \
+ __SYSCALL_DEFINEx(x, sname, __VA_ARGS__)
+#endif
+
#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
#define SYSCALL_DEFINE(name) static inline long SYSC_##name
-#define SYSCALL_DEFINEx(x, name, ...) \
+
+#define __SYSCALL_DEFINEx(x, name, ...) \
asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__)); \
static inline long SYSC##name(__SC_DECL##x(__VA_ARGS__)); \
asmlinkage long SyS##name(__SC_LONG##x(__VA_ARGS__)) \
@@ -134,7 +191,7 @@ struct old_linux_dirent;
#else /* CONFIG_HAVE_SYSCALL_WRAPPERS */
#define SYSCALL_DEFINE(name) asmlinkage long sys_##name
-#define SYSCALL_DEFINEx(x, name, ...) \
+#define __SYSCALL_DEFINEx(x, name, ...) \
asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__))
#endif /* CONFIG_HAVE_SYSCALL_WRAPPERS */
@@ -377,6 +434,8 @@ asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg);
asmlinkage long sys_fcntl64(unsigned int fd,
unsigned int cmd, unsigned long arg);
#endif
+asmlinkage long sys_pipe(int __user *fildes);
+asmlinkage long sys_pipe2(int __user *fildes, int flags);
asmlinkage long sys_dup(unsigned int fildes);
asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd);
asmlinkage long sys_dup3(unsigned int oldfd, unsigned int newfd, int flags);
@@ -461,6 +520,10 @@ asmlinkage long sys_pread64(unsigned int fd, char __user *buf,
size_t count, loff_t pos);
asmlinkage long sys_pwrite64(unsigned int fd, const char __user *buf,
size_t count, loff_t pos);
+asmlinkage long sys_preadv(unsigned long fd, const struct iovec __user *vec,
+ unsigned long vlen, unsigned long pos_l, unsigned long pos_h);
+asmlinkage long sys_pwritev(unsigned long fd, const struct iovec __user *vec,
+ unsigned long vlen, unsigned long pos_l, unsigned long pos_h);
asmlinkage long sys_getcwd(char __user *buf, unsigned long size);
asmlinkage long sys_mkdir(const char __user *pathname, int mode);
asmlinkage long sys_chdir(const char __user *filename);
@@ -689,9 +752,11 @@ asmlinkage long sys_pselect6(int, fd_set __user *, fd_set __user *,
asmlinkage long sys_ppoll(struct pollfd __user *, unsigned int,
struct timespec __user *, const sigset_t __user *,
size_t);
-asmlinkage long sys_pipe2(int __user *, int);
-asmlinkage long sys_pipe(int __user *);
int kernel_execve(const char *filename, char *const argv[], char *const envp[]);
+
+asmlinkage long sys_perf_counter_open(
+ struct perf_counter_attr __user *attr_uptr,
+ pid_t pid, int cpu, int group_fd, unsigned long flags);
#endif
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 39d471d1163..e76d3b22a46 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -490,6 +490,7 @@ enum
NET_IPV4_CONF_ARP_IGNORE=19,
NET_IPV4_CONF_PROMOTE_SECONDARIES=20,
NET_IPV4_CONF_ARP_ACCEPT=21,
+ NET_IPV4_CONF_ARP_NOTIFY=22,
__NET_IPV4_CONF_MAX
};
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index fe77e1499ab..8afac76cd74 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -69,16 +69,16 @@ union tcp_word_hdr {
#define tcp_flag_word(tp) ( ((union tcp_word_hdr *)(tp))->words [3])
enum {
- TCP_FLAG_CWR = __constant_htonl(0x00800000),
- TCP_FLAG_ECE = __constant_htonl(0x00400000),
- TCP_FLAG_URG = __constant_htonl(0x00200000),
- TCP_FLAG_ACK = __constant_htonl(0x00100000),
- TCP_FLAG_PSH = __constant_htonl(0x00080000),
- TCP_FLAG_RST = __constant_htonl(0x00040000),
- TCP_FLAG_SYN = __constant_htonl(0x00020000),
- TCP_FLAG_FIN = __constant_htonl(0x00010000),
- TCP_RESERVED_BITS = __constant_htonl(0x0F000000),
- TCP_DATA_OFFSET = __constant_htonl(0xF0000000)
+ TCP_FLAG_CWR = __cpu_to_be32(0x00800000),
+ TCP_FLAG_ECE = __cpu_to_be32(0x00400000),
+ TCP_FLAG_URG = __cpu_to_be32(0x00200000),
+ TCP_FLAG_ACK = __cpu_to_be32(0x00100000),
+ TCP_FLAG_PSH = __cpu_to_be32(0x00080000),
+ TCP_FLAG_RST = __cpu_to_be32(0x00040000),
+ TCP_FLAG_SYN = __cpu_to_be32(0x00020000),
+ TCP_FLAG_FIN = __cpu_to_be32(0x00010000),
+ TCP_RESERVED_BITS = __cpu_to_be32(0x0F000000),
+ TCP_DATA_OFFSET = __cpu_to_be32(0xF0000000)
};
/* TCP socket options */
@@ -218,7 +218,6 @@ struct tcp_options_received {
snd_wscale : 4, /* Window scaling received from sender */
rcv_wscale : 4; /* Window scaling to send to receiver */
/* SACKs data */
- u8 eff_sacks; /* Size of SACK array to send with next packet */
u8 num_sacks; /* Number of SACK blocks */
u16 user_mss; /* mss requested by user in ioctl */
u16 mss_clamp; /* Maximal mss, negotiated at connection setup */
@@ -249,7 +248,7 @@ struct tcp_sock {
/* inet_connection_sock has to be the first member of tcp_sock */
struct inet_connection_sock inet_conn;
u16 tcp_header_len; /* Bytes of tcp header to send */
- u16 xmit_size_goal; /* Goal for segmenting output packets */
+ u16 xmit_size_goal_segs; /* Goal for segmenting output packets */
/*
* Header prediction flags
@@ -378,7 +377,7 @@ struct tcp_sock {
unsigned int keepalive_time; /* time before keep alive takes place */
unsigned int keepalive_intvl; /* time interval between keep alive probes */
- unsigned long last_synq_overflow;
+ int linger2;
/* Receiver side RTT estimation */
struct {
@@ -407,8 +406,6 @@ struct tcp_sock {
/* TCP MD5 Signagure Option information */
struct tcp_md5sig_info *md5sig_info;
#endif
-
- int linger2;
};
static inline struct tcp_sock *tcp_sk(const struct sock *sk)
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 917707e6151..1de8b9eb841 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -27,27 +27,46 @@
#include <linux/idr.h>
#include <linux/device.h>
+#include <linux/workqueue.h>
struct thermal_zone_device;
struct thermal_cooling_device;
+enum thermal_device_mode {
+ THERMAL_DEVICE_DISABLED = 0,
+ THERMAL_DEVICE_ENABLED,
+};
+
+enum thermal_trip_type {
+ THERMAL_TRIP_ACTIVE = 0,
+ THERMAL_TRIP_PASSIVE,
+ THERMAL_TRIP_HOT,
+ THERMAL_TRIP_CRITICAL,
+};
+
struct thermal_zone_device_ops {
int (*bind) (struct thermal_zone_device *,
struct thermal_cooling_device *);
int (*unbind) (struct thermal_zone_device *,
struct thermal_cooling_device *);
- int (*get_temp) (struct thermal_zone_device *, char *);
- int (*get_mode) (struct thermal_zone_device *, char *);
- int (*set_mode) (struct thermal_zone_device *, const char *);
- int (*get_trip_type) (struct thermal_zone_device *, int, char *);
- int (*get_trip_temp) (struct thermal_zone_device *, int, char *);
+ int (*get_temp) (struct thermal_zone_device *, unsigned long *);
+ int (*get_mode) (struct thermal_zone_device *,
+ enum thermal_device_mode *);
+ int (*set_mode) (struct thermal_zone_device *,
+ enum thermal_device_mode);
+ int (*get_trip_type) (struct thermal_zone_device *, int,
+ enum thermal_trip_type *);
+ int (*get_trip_temp) (struct thermal_zone_device *, int,
+ unsigned long *);
int (*get_crit_temp) (struct thermal_zone_device *, unsigned long *);
+ int (*notify) (struct thermal_zone_device *, int,
+ enum thermal_trip_type);
};
struct thermal_cooling_device_ops {
- int (*get_max_state) (struct thermal_cooling_device *, char *);
- int (*get_cur_state) (struct thermal_cooling_device *, char *);
- int (*set_cur_state) (struct thermal_cooling_device *, unsigned int);
+ int (*get_max_state) (struct thermal_cooling_device *, unsigned long *);
+ int (*get_cur_state) (struct thermal_cooling_device *, unsigned long *);
+ int (*set_cur_state) (struct thermal_cooling_device *, unsigned long);
};
#define THERMAL_TRIPS_NONE -1
@@ -88,11 +107,19 @@ struct thermal_zone_device {
struct device device;
void *devdata;
int trips;
+ int tc1;
+ int tc2;
+ int passive_delay;
+ int polling_delay;
+ int last_temperature;
+ bool passive;
+ unsigned int forced_passive;
struct thermal_zone_device_ops *ops;
struct list_head cooling_devices;
struct idr idr;
struct mutex lock; /* protect cooling devices list */
struct list_head node;
+ struct delayed_work poll_queue;
#if defined(CONFIG_THERMAL_HWMON)
struct list_head hwmon_node;
struct thermal_hwmon_device *hwmon;
@@ -104,13 +131,16 @@ struct thermal_zone_device {
struct thermal_zone_device *thermal_zone_device_register(char *, int, void *,
struct
thermal_zone_device_ops
- *);
+ *, int tc1, int tc2,
+ int passive_freq,
+ int polling_freq);
void thermal_zone_device_unregister(struct thermal_zone_device *);
int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int,
struct thermal_cooling_device *);
int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int,
struct thermal_cooling_device *);
+void thermal_zone_device_update(struct thermal_zone_device *);
struct thermal_cooling_device *thermal_cooling_device_register(char *, void *,
struct
thermal_cooling_device_ops
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index e6b820f8b56..a8cc4e13434 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -21,13 +21,14 @@ struct restart_block {
struct {
unsigned long arg0, arg1, arg2, arg3;
};
- /* For futex_wait */
+ /* For futex_wait and futex_wait_requeue_pi */
struct {
u32 *uaddr;
u32 val;
u32 flags;
u32 bitset;
u64 time;
+ u32 *uaddr2;
} futex;
/* For nanosleep */
struct {
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 469b82d88b3..0482229c07d 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -97,10 +97,12 @@ extern void tick_clock_notify(void);
extern int tick_check_oneshot_change(int allow_nohz);
extern struct tick_sched *tick_get_tick_sched(int cpu);
extern void tick_check_idle(int cpu);
+extern int tick_oneshot_mode_active(void);
# else
static inline void tick_clock_notify(void) { }
static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
static inline void tick_check_idle(int cpu) { }
+static inline int tick_oneshot_mode_active(void) { return 0; }
# endif
#else /* CONFIG_GENERIC_CLOCKEVENTS */
@@ -109,6 +111,7 @@ static inline void tick_cancel_sched_timer(int cpu) { }
static inline void tick_clock_notify(void) { }
static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
static inline void tick_check_idle(int cpu) { }
+static inline int tick_oneshot_mode_active(void) { return 0; }
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
# ifdef CONFIG_NO_HZ
diff --git a/include/linux/time.h b/include/linux/time.h
index fbbd2a1c92b..ea16c1a01d5 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -12,14 +12,14 @@
#ifndef _STRUCT_TIMESPEC
#define _STRUCT_TIMESPEC
struct timespec {
- time_t tv_sec; /* seconds */
- long tv_nsec; /* nanoseconds */
+ __kernel_time_t tv_sec; /* seconds */
+ long tv_nsec; /* nanoseconds */
};
#endif
struct timeval {
- time_t tv_sec; /* seconds */
- suseconds_t tv_usec; /* microseconds */
+ __kernel_time_t tv_sec; /* seconds */
+ __kernel_suseconds_t tv_usec; /* microseconds */
};
struct timezone {
@@ -113,6 +113,21 @@ struct timespec current_kernel_time(void);
#define CURRENT_TIME (current_kernel_time())
#define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 })
+/* Some architectures do not supply their own clocksource.
+ * This is mainly the case in architectures that get their
+ * inter-tick times by reading the counter on their interval
+ * timer. Since these timers wrap every tick, they're not really
+ * useful as clocksources. Wrapping them to act like one is possible
+ * but not very efficient. So we provide a callout these arches
+ * can implement for use with the jiffies clocksource to provide
+ * finer then tick granular time.
+ */
+#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
+extern u32 arch_gettimeoffset(void);
+#else
+static inline u32 arch_gettimeoffset(void) { return 0; }
+#endif
+
extern void do_gettimeofday(struct timeval *tv);
extern int do_settimeofday(struct timespec *tv);
extern int do_sys_settimeofday(struct timespec *tv, struct timezone *tz);
diff --git a/include/linux/timecompare.h b/include/linux/timecompare.h
new file mode 100644
index 00000000000..546e2234e4b
--- /dev/null
+++ b/include/linux/timecompare.h
@@ -0,0 +1,125 @@
+/*
+ * Utility code which helps transforming between two different time
+ * bases, called "source" and "target" time in this code.
+ *
+ * Source time has to be provided via the timecounter API while target
+ * time is accessed via a function callback whose prototype
+ * intentionally matches ktime_get() and ktime_get_real(). These
+ * interfaces where chosen like this so that the code serves its
+ * initial purpose without additional glue code.
+ *
+ * This purpose is synchronizing a hardware clock in a NIC with system
+ * time, in order to implement the Precision Time Protocol (PTP,
+ * IEEE1588) with more accurate hardware assisted time stamping. In
+ * that context only synchronization against system time (=
+ * ktime_get_real()) is currently needed. But this utility code might
+ * become useful in other situations, which is why it was written as
+ * general purpose utility code.
+ *
+ * The source timecounter is assumed to return monotonically
+ * increasing time (but this code does its best to compensate if that
+ * is not the case) whereas target time may jump.
+ *
+ * The target time corresponding to a source time is determined by
+ * reading target time, reading source time, reading target time
+ * again, then assuming that average target time corresponds to source
+ * time. In other words, the assumption is that reading the source
+ * time is slow and involves equal time for sending the request and
+ * receiving the reply, whereas reading target time is assumed to be
+ * fast.
+ *
+ * Copyright (C) 2009 Intel Corporation.
+ * Author: Patrick Ohly <patrick.ohly@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef _LINUX_TIMECOMPARE_H
+#define _LINUX_TIMECOMPARE_H
+
+#include <linux/clocksource.h>
+#include <linux/ktime.h>
+
+/**
+ * struct timecompare - stores state and configuration for the two clocks
+ *
+ * Initialize to zero, then set source/target/num_samples.
+ *
+ * Transformation between source time and target time is done with:
+ * target_time = source_time + offset +
+ * (source_time - last_update) * skew /
+ * TIMECOMPARE_SKEW_RESOLUTION
+ *
+ * @source: used to get source time stamps via timecounter_read()
+ * @target: function returning target time (for example, ktime_get
+ * for monotonic time, or ktime_get_real for wall clock)
+ * @num_samples: number of times that source time and target time are to
+ * be compared when determining their offset
+ * @offset: (target time - source time) at the time of the last update
+ * @skew: average (target time - source time) / delta source time *
+ * TIMECOMPARE_SKEW_RESOLUTION
+ * @last_update: last source time stamp when time offset was measured
+ */
+struct timecompare {
+ struct timecounter *source;
+ ktime_t (*target)(void);
+ int num_samples;
+
+ s64 offset;
+ s64 skew;
+ u64 last_update;
+};
+
+/**
+ * timecompare_transform - transform source time stamp into target time base
+ * @sync: context for time sync
+ * @source_tstamp: the result of timecounter_read() or
+ * timecounter_cyc2time()
+ */
+extern ktime_t timecompare_transform(struct timecompare *sync,
+ u64 source_tstamp);
+
+/**
+ * timecompare_offset - measure current (target time - source time) offset
+ * @sync: context for time sync
+ * @offset: average offset during sample period returned here
+ * @source_tstamp: average source time during sample period returned here
+ *
+ * Returns number of samples used. Might be zero (= no result) in the
+ * unlikely case that target time was monotonically decreasing for all
+ * samples (= broken).
+ */
+extern int timecompare_offset(struct timecompare *sync,
+ s64 *offset,
+ u64 *source_tstamp);
+
+extern void __timecompare_update(struct timecompare *sync,
+ u64 source_tstamp);
+
+/**
+ * timecompare_update - update offset and skew by measuring current offset
+ * @sync: context for time sync
+ * @source_tstamp: the result of timecounter_read() or
+ * timecounter_cyc2time(), pass zero to force update
+ *
+ * Updates are only done at most once per second.
+ */
+static inline void timecompare_update(struct timecompare *sync,
+ u64 source_tstamp)
+{
+ if (!source_tstamp ||
+ (s64)(source_tstamp - sync->last_update) >= NSEC_PER_SEC)
+ __timecompare_update(sync, source_tstamp);
+}
+
+#endif /* _LINUX_TIMECOMPARE_H */
diff --git a/include/linux/timer.h b/include/linux/timer.h
index daf9685b861..ccf882eed8f 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -5,6 +5,7 @@
#include <linux/ktime.h>
#include <linux/stddef.h>
#include <linux/debugobjects.h>
+#include <linux/stringify.h>
struct tvec_base;
@@ -21,52 +22,126 @@ struct timer_list {
char start_comm[16];
int start_pid;
#endif
+#ifdef CONFIG_LOCKDEP
+ struct lockdep_map lockdep_map;
+#endif
};
extern struct tvec_base boot_tvec_bases;
+#ifdef CONFIG_LOCKDEP
+/*
+ * NB: because we have to copy the lockdep_map, setting the lockdep_map key
+ * (second argument) here is required, otherwise it could be initialised to
+ * the copy of the lockdep_map later! We use the pointer to and the string
+ * "<file>:<line>" as the key resp. the name of the lockdep_map.
+ */
+#define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn) \
+ .lockdep_map = STATIC_LOCKDEP_MAP_INIT(_kn, &_kn),
+#else
+#define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn)
+#endif
+
#define TIMER_INITIALIZER(_function, _expires, _data) { \
.entry = { .prev = TIMER_ENTRY_STATIC }, \
.function = (_function), \
.expires = (_expires), \
.data = (_data), \
.base = &boot_tvec_bases, \
+ __TIMER_LOCKDEP_MAP_INITIALIZER( \
+ __FILE__ ":" __stringify(__LINE__)) \
}
#define DEFINE_TIMER(_name, _function, _expires, _data) \
struct timer_list _name = \
TIMER_INITIALIZER(_function, _expires, _data)
-void init_timer(struct timer_list *timer);
-void init_timer_deferrable(struct timer_list *timer);
+void init_timer_key(struct timer_list *timer,
+ const char *name,
+ struct lock_class_key *key);
+void init_timer_deferrable_key(struct timer_list *timer,
+ const char *name,
+ struct lock_class_key *key);
+
+#ifdef CONFIG_LOCKDEP
+#define init_timer(timer) \
+ do { \
+ static struct lock_class_key __key; \
+ init_timer_key((timer), #timer, &__key); \
+ } while (0)
+
+#define init_timer_deferrable(timer) \
+ do { \
+ static struct lock_class_key __key; \
+ init_timer_deferrable_key((timer), #timer, &__key); \
+ } while (0)
+
+#define init_timer_on_stack(timer) \
+ do { \
+ static struct lock_class_key __key; \
+ init_timer_on_stack_key((timer), #timer, &__key); \
+ } while (0)
+
+#define setup_timer(timer, fn, data) \
+ do { \
+ static struct lock_class_key __key; \
+ setup_timer_key((timer), #timer, &__key, (fn), (data));\
+ } while (0)
+
+#define setup_timer_on_stack(timer, fn, data) \
+ do { \
+ static struct lock_class_key __key; \
+ setup_timer_on_stack_key((timer), #timer, &__key, \
+ (fn), (data)); \
+ } while (0)
+#else
+#define init_timer(timer)\
+ init_timer_key((timer), NULL, NULL)
+#define init_timer_deferrable(timer)\
+ init_timer_deferrable_key((timer), NULL, NULL)
+#define init_timer_on_stack(timer)\
+ init_timer_on_stack_key((timer), NULL, NULL)
+#define setup_timer(timer, fn, data)\
+ setup_timer_key((timer), NULL, NULL, (fn), (data))
+#define setup_timer_on_stack(timer, fn, data)\
+ setup_timer_on_stack_key((timer), NULL, NULL, (fn), (data))
+#endif
#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
-extern void init_timer_on_stack(struct timer_list *timer);
+extern void init_timer_on_stack_key(struct timer_list *timer,
+ const char *name,
+ struct lock_class_key *key);
extern void destroy_timer_on_stack(struct timer_list *timer);
#else
static inline void destroy_timer_on_stack(struct timer_list *timer) { }
-static inline void init_timer_on_stack(struct timer_list *timer)
+static inline void init_timer_on_stack_key(struct timer_list *timer,
+ const char *name,
+ struct lock_class_key *key)
{
- init_timer(timer);
+ init_timer_key(timer, name, key);
}
#endif
-static inline void setup_timer(struct timer_list * timer,
+static inline void setup_timer_key(struct timer_list * timer,
+ const char *name,
+ struct lock_class_key *key,
void (*function)(unsigned long),
unsigned long data)
{
timer->function = function;
timer->data = data;
- init_timer(timer);
+ init_timer_key(timer, name, key);
}
-static inline void setup_timer_on_stack(struct timer_list *timer,
+static inline void setup_timer_on_stack_key(struct timer_list *timer,
+ const char *name,
+ struct lock_class_key *key,
void (*function)(unsigned long),
unsigned long data)
{
timer->function = function;
timer->data = data;
- init_timer_on_stack(timer);
+ init_timer_on_stack_key(timer, name, key);
}
/**
@@ -86,9 +161,12 @@ static inline int timer_pending(const struct timer_list * timer)
extern void add_timer_on(struct timer_list *timer, int cpu);
extern int del_timer(struct timer_list * timer);
-extern int __mod_timer(struct timer_list *timer, unsigned long expires);
extern int mod_timer(struct timer_list *timer, unsigned long expires);
+extern int mod_timer_pending(struct timer_list *timer, unsigned long expires);
+extern int mod_timer_pinned(struct timer_list *timer, unsigned long expires);
+#define TIMER_NOT_PINNED 0
+#define TIMER_PINNED 1
/*
* The jiffies value which is added to now, when there is no timer
* in the timer wheel:
@@ -146,25 +224,7 @@ static inline void timer_stats_timer_clear_start_info(struct timer_list *timer)
}
#endif
-/**
- * add_timer - start a timer
- * @timer: the timer to be added
- *
- * The kernel will do a ->function(->data) callback from the
- * timer interrupt at the ->expires point in the future. The
- * current time is 'jiffies'.
- *
- * The timer's ->expires, ->function (and if the handler uses it, ->data)
- * fields must be set prior calling this function.
- *
- * Timers with an ->expires field in the past will be executed in the next
- * timer tick.
- */
-static inline void add_timer(struct timer_list *timer)
-{
- BUG_ON(timer_pending(timer));
- __mod_timer(timer, timer->expires);
-}
+extern void add_timer(struct timer_list *timer);
#ifdef CONFIG_SMP
extern int try_to_del_timer_sync(struct timer_list *timer);
diff --git a/include/linux/timeriomem-rng.h b/include/linux/timeriomem-rng.h
new file mode 100644
index 00000000000..3e08a1c8683
--- /dev/null
+++ b/include/linux/timeriomem-rng.h
@@ -0,0 +1,21 @@
+/*
+ * linux/include/linux/timeriomem-rng.h
+ *
+ * Copyright (c) 2009 Alexander Clouter <alex@digriz.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/completion.h>
+
+struct timeriomem_rng_data {
+ struct completion completion;
+ unsigned int present:1;
+
+ void __iomem *address;
+
+ /* measures in usecs */
+ unsigned int period;
+};
diff --git a/include/linux/times.h b/include/linux/times.h
index e2d3020742a..87b62615ced 100644
--- a/include/linux/times.h
+++ b/include/linux/times.h
@@ -4,10 +4,10 @@
#include <linux/types.h>
struct tms {
- clock_t tms_utime;
- clock_t tms_stime;
- clock_t tms_cutime;
- clock_t tms_cstime;
+ __kernel_clock_t tms_utime;
+ __kernel_clock_t tms_stime;
+ __kernel_clock_t tms_cutime;
+ __kernel_clock_t tms_cstime;
};
#endif
diff --git a/include/linux/timex.h b/include/linux/timex.h
index 998a55d80ac..e6967d10d9e 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -170,17 +170,37 @@ struct timex {
#include <asm/timex.h>
/*
- * SHIFT_KG and SHIFT_KF establish the damping of the PLL and are chosen
- * for a slightly underdamped convergence characteristic. SHIFT_KH
- * establishes the damping of the FLL and is chosen by wisdom and black
- * art.
+ * SHIFT_PLL is used as a dampening factor to define how much we
+ * adjust the frequency correction for a given offset in PLL mode.
+ * It also used in dampening the offset correction, to define how
+ * much of the current value in time_offset we correct for each
+ * second. Changing this value changes the stiffness of the ntp
+ * adjustment code. A lower value makes it more flexible, reducing
+ * NTP convergence time. A higher value makes it stiffer, increasing
+ * convergence time, but making the clock more stable.
*
- * MAXTC establishes the maximum time constant of the PLL. With the
- * SHIFT_KG and SHIFT_KF values given and a time constant range from
- * zero to MAXTC, the PLL will converge in 15 minutes to 16 hours,
- * respectively.
+ * In David Mills' nanokernel reference implementation SHIFT_PLL is 4.
+ * However this seems to increase convergence time much too long.
+ *
+ * https://lists.ntp.org/pipermail/hackers/2008-January/003487.html
+ *
+ * In the above mailing list discussion, it seems the value of 4
+ * was appropriate for other Unix systems with HZ=100, and that
+ * SHIFT_PLL should be decreased as HZ increases. However, Linux's
+ * clock steering implementation is HZ independent.
+ *
+ * Through experimentation, a SHIFT_PLL value of 2 was found to allow
+ * for fast convergence (very similar to the NTPv3 code used prior to
+ * v2.6.19), with good clock stability.
+ *
+ *
+ * SHIFT_FLL is used as a dampening factor to define how much we
+ * adjust the frequency correction for a given offset in FLL mode.
+ * In David Mills' nanokernel reference implementation SHIFT_FLL is 2.
+ *
+ * MAXTC establishes the maximum time constant of the PLL.
*/
-#define SHIFT_PLL 4 /* PLL frequency factor (shift) */
+#define SHIFT_PLL 2 /* PLL frequency factor (shift) */
#define SHIFT_FLL 2 /* FLL frequency factor (shift) */
#define MAXTC 10 /* maximum time constant (shift) */
@@ -190,12 +210,12 @@ struct timex {
* offset and maximum frequency tolerance.
*/
#define SHIFT_USEC 16 /* frequency offset scale (shift) */
-#define PPM_SCALE (NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC))
+#define PPM_SCALE ((s64)NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC))
#define PPM_SCALE_INV_SHIFT 19
-#define PPM_SCALE_INV ((1ll << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \
+#define PPM_SCALE_INV ((1LL << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \
PPM_SCALE + 1)
-#define MAXPHASE 500000000l /* max phase error (ns) */
+#define MAXPHASE 500000000L /* max phase error (ns) */
#define MAXFREQ 500000 /* max frequency error (ns/s) */
#define MAXFREQ_SCALED ((s64)MAXFREQ << NTP_SCALE_SHIFT)
#define MINSEC 256 /* min interval between updates (s) */
@@ -260,6 +280,9 @@ extern int do_adjtimex(struct timex *);
int read_current_timer(unsigned long *timer_val);
+/* The clock frequency of the i8253/i8254 PIT */
+#define PIT_TICK_RATE 1193182ul
+
#endif /* KERNEL */
#endif /* LINUX_TIMEX_H */
diff --git a/include/linux/topology.h b/include/linux/topology.h
index e632d29f054..7402c1a27c4 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -38,11 +38,7 @@
#endif
#ifndef nr_cpus_node
-#define nr_cpus_node(node) \
- ({ \
- node_to_cpumask_ptr(__tmp__, node); \
- cpus_weight(*__tmp__); \
- })
+#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node))
#endif
#define for_each_node_with_cpus(node) \
@@ -193,5 +189,16 @@ int arch_update_cpu_topology(void);
#ifndef topology_core_siblings
#define topology_core_siblings(cpu) cpumask_of_cpu(cpu)
#endif
+#ifndef topology_thread_cpumask
+#define topology_thread_cpumask(cpu) cpumask_of(cpu)
+#endif
+#ifndef topology_core_cpumask
+#define topology_core_cpumask(cpu) cpumask_of(cpu)
+#endif
+
+/* Returns the number of the current Node. */
+#ifndef numa_node_id
+#define numa_node_id() (cpu_to_node(raw_smp_processor_id()))
+#endif
#endif /* _LINUX_TOPOLOGY_H */
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
new file mode 100644
index 00000000000..3338b3f5c21
--- /dev/null
+++ b/include/linux/tpm.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2004,2007,2008 IBM Corporation
+ *
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Dave Safford <safford@watson.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ * Debora Velarde <dvelarde@us.ibm.com>
+ *
+ * Maintained by: <tpmdd_devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ */
+#ifndef __LINUX_TPM_H__
+#define __LINUX_TPM_H__
+
+/*
+ * Chip num is this value or a valid tpm idx
+ */
+#define TPM_ANY_NUM 0xFFFF
+
+#if defined(CONFIG_TCG_TPM)
+
+extern int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf);
+extern int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash);
+#endif
+#endif
diff --git a/include/linux/trace_clock.h b/include/linux/trace_clock.h
new file mode 100644
index 00000000000..7a813038408
--- /dev/null
+++ b/include/linux/trace_clock.h
@@ -0,0 +1,19 @@
+#ifndef _LINUX_TRACE_CLOCK_H
+#define _LINUX_TRACE_CLOCK_H
+
+/*
+ * 3 trace clock variants, with differing scalability/precision
+ * tradeoffs:
+ *
+ * - local: CPU-local trace clock
+ * - medium: scalable global clock with some jitter
+ * - global: globally monotonic, serialized clock
+ */
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+extern u64 notrace trace_clock_local(void);
+extern u64 notrace trace_clock(void);
+extern u64 notrace trace_clock_global(void);
+
+#endif /* _LINUX_TRACE_CLOCK_H */
diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h
new file mode 100644
index 00000000000..c134dd1fe6b
--- /dev/null
+++ b/include/linux/trace_seq.h
@@ -0,0 +1,94 @@
+#ifndef _LINUX_TRACE_SEQ_H
+#define _LINUX_TRACE_SEQ_H
+
+#include <linux/fs.h>
+
+#include <asm/page.h>
+
+/*
+ * Trace sequences are used to allow a function to call several other functions
+ * to create a string of data to use (up to a max of PAGE_SIZE.
+ */
+
+struct trace_seq {
+ unsigned char buffer[PAGE_SIZE];
+ unsigned int len;
+ unsigned int readpos;
+};
+
+static inline void
+trace_seq_init(struct trace_seq *s)
+{
+ s->len = 0;
+ s->readpos = 0;
+}
+
+/*
+ * Currently only defined when tracing is enabled.
+ */
+#ifdef CONFIG_TRACING
+extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
+ __attribute__ ((format (printf, 2, 3)));
+extern int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
+ __attribute__ ((format (printf, 2, 0)));
+extern int
+trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary);
+extern void trace_print_seq(struct seq_file *m, struct trace_seq *s);
+extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
+ size_t cnt);
+extern int trace_seq_puts(struct trace_seq *s, const char *str);
+extern int trace_seq_putc(struct trace_seq *s, unsigned char c);
+extern int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len);
+extern int trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
+ size_t len);
+extern void *trace_seq_reserve(struct trace_seq *s, size_t len);
+extern int trace_seq_path(struct trace_seq *s, struct path *path);
+
+#else /* CONFIG_TRACING */
+static inline int trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
+{
+ return 0;
+}
+static inline int
+trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
+{
+ return 0;
+}
+
+static inline void trace_print_seq(struct seq_file *m, struct trace_seq *s)
+{
+}
+static inline ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
+ size_t cnt)
+{
+ return 0;
+}
+static inline int trace_seq_puts(struct trace_seq *s, const char *str)
+{
+ return 0;
+}
+static inline int trace_seq_putc(struct trace_seq *s, unsigned char c)
+{
+ return 0;
+}
+static inline int
+trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len)
+{
+ return 0;
+}
+static inline int trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
+ size_t len)
+{
+ return 0;
+}
+static inline void *trace_seq_reserve(struct trace_seq *s, size_t len)
+{
+ return NULL;
+}
+static inline int trace_seq_path(struct trace_seq *s, struct path *path)
+{
+ return 0;
+}
+#endif /* CONFIG_TRACING */
+
+#endif /* _LINUX_TRACE_SEQ_H */
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
index 6186a789d6c..17ba82efa48 100644
--- a/include/linux/tracehook.h
+++ b/include/linux/tracehook.h
@@ -143,7 +143,7 @@ static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step)
*
* Return %LSM_UNSAFE_* bits applied to an exec because of tracing.
*
- * Called with task_lock() held on @task.
+ * @task->cred_guard_mutex is held by the caller through the do_execve().
*/
static inline int tracehook_unsafe_exec(struct task_struct *task)
{
@@ -259,14 +259,12 @@ static inline void tracehook_finish_clone(struct task_struct *child,
/**
* tracehook_report_clone - in parent, new child is about to start running
- * @trace: return value from tracehook_prepare_clone()
* @regs: parent's user register state
* @clone_flags: flags from parent's system call
* @pid: new child's PID in the parent's namespace
* @child: new child task
*
- * Called after a child is set up, but before it has been started
- * running. @trace is the value returned by tracehook_prepare_clone().
+ * Called after a child is set up, but before it has been started running.
* This is not a good place to block, because the child has not started
* yet. Suspend the child here if desired, and then block in
* tracehook_report_clone_complete(). This must prevent the child from
@@ -276,13 +274,14 @@ static inline void tracehook_finish_clone(struct task_struct *child,
*
* Called with no locks held, but the child cannot run until this returns.
*/
-static inline void tracehook_report_clone(int trace, struct pt_regs *regs,
+static inline void tracehook_report_clone(struct pt_regs *regs,
unsigned long clone_flags,
pid_t pid, struct task_struct *child)
{
- if (unlikely(trace) || unlikely(clone_flags & CLONE_PTRACE)) {
+ if (unlikely(task_ptrace(child))) {
/*
- * The child starts up with an immediate SIGSTOP.
+ * It doesn't matter who attached/attaching to this
+ * task, the pending SIGSTOP is right in any case.
*/
sigaddset(&child->pending.signal, SIGSTOP);
set_tsk_thread_flag(child, TIF_SIGPENDING);
@@ -388,17 +387,14 @@ static inline void tracehook_signal_handler(int sig, siginfo_t *info,
* tracehook_consider_ignored_signal - suppress short-circuit of ignored signal
* @task: task receiving the signal
* @sig: signal number being sent
- * @handler: %SIG_IGN or %SIG_DFL
*
* Return zero iff tracing doesn't care to examine this ignored signal,
* so it can short-circuit normal delivery and never even get queued.
- * Either @handler is %SIG_DFL and @sig's default is ignore, or it's %SIG_IGN.
*
* Called with @task->sighand->siglock held.
*/
static inline int tracehook_consider_ignored_signal(struct task_struct *task,
- int sig,
- void __user *handler)
+ int sig)
{
return (task_ptrace(task) & PT_PTRACED) != 0;
}
@@ -407,19 +403,17 @@ static inline int tracehook_consider_ignored_signal(struct task_struct *task,
* tracehook_consider_fatal_signal - suppress special handling of fatal signal
* @task: task receiving the signal
* @sig: signal number being sent
- * @handler: %SIG_DFL or %SIG_IGN
*
* Return nonzero to prevent special handling of this termination signal.
- * Normally @handler is %SIG_DFL. It can be %SIG_IGN if @sig is ignored,
- * in which case force_sig() is about to reset it to %SIG_DFL.
+ * Normally handler for signal is %SIG_DFL. It can be %SIG_IGN if @sig is
+ * ignored, in which case force_sig() is about to reset it to %SIG_DFL.
* When this returns zero, this signal might cause a quick termination
* that does not give the debugger a chance to intercept the signal.
*
* Called with or without @task->sighand->siglock held.
*/
static inline int tracehook_consider_fatal_signal(struct task_struct *task,
- int sig,
- void __user *handler)
+ int sig)
{
return (task_ptrace(task) & PT_PTRACED) != 0;
}
@@ -507,7 +501,7 @@ static inline int tracehook_notify_jctl(int notify, int why)
static inline int tracehook_notify_death(struct task_struct *task,
void **death_cookie, int group_dead)
{
- if (task->exit_signal == -1)
+ if (task_detached(task))
return task->ptrace ? SIGCHLD : DEATH_REAP;
/*
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 75700545836..b9dc4ca0246 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -31,8 +31,10 @@ struct tracepoint {
* Keep in sync with vmlinux.lds.h.
*/
-#define TPPROTO(args...) args
-#define TPARGS(args...) args
+#ifndef DECLARE_TRACE
+
+#define TP_PROTO(args...) args
+#define TP_ARGS(args...) args
#ifdef CONFIG_TRACEPOINTS
@@ -65,7 +67,7 @@ struct tracepoint {
{ \
if (unlikely(__tracepoint_##name.state)) \
__DO_TRACE(&__tracepoint_##name, \
- TPPROTO(proto), TPARGS(args)); \
+ TP_PROTO(proto), TP_ARGS(args)); \
} \
static inline int register_trace_##name(void (*probe)(proto)) \
{ \
@@ -114,6 +116,7 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin,
struct tracepoint *end)
{ }
#endif /* CONFIG_TRACEPOINTS */
+#endif /* DECLARE_TRACE */
/*
* Connect a probe to a tracepoint.
@@ -153,4 +156,113 @@ static inline void tracepoint_synchronize_unregister(void)
synchronize_sched();
}
+#define PARAMS(args...) args
+
+#ifndef TRACE_EVENT
+/*
+ * For use with the TRACE_EVENT macro:
+ *
+ * We define a tracepoint, its arguments, its printk format
+ * and its 'fast binay record' layout.
+ *
+ * Firstly, name your tracepoint via TRACE_EVENT(name : the
+ * 'subsystem_event' notation is fine.
+ *
+ * Think about this whole construct as the
+ * 'trace_sched_switch() function' from now on.
+ *
+ *
+ * TRACE_EVENT(sched_switch,
+ *
+ * *
+ * * A function has a regular function arguments
+ * * prototype, declare it via TP_PROTO():
+ * *
+ *
+ * TP_PROTO(struct rq *rq, struct task_struct *prev,
+ * struct task_struct *next),
+ *
+ * *
+ * * Define the call signature of the 'function'.
+ * * (Design sidenote: we use this instead of a
+ * * TP_PROTO1/TP_PROTO2/TP_PROTO3 ugliness.)
+ * *
+ *
+ * TP_ARGS(rq, prev, next),
+ *
+ * *
+ * * Fast binary tracing: define the trace record via
+ * * TP_STRUCT__entry(). You can think about it like a
+ * * regular C structure local variable definition.
+ * *
+ * * This is how the trace record is structured and will
+ * * be saved into the ring buffer. These are the fields
+ * * that will be exposed to user-space in
+ * * /sys/kernel/debug/tracing/events/<*>/format.
+ * *
+ * * The declared 'local variable' is called '__entry'
+ * *
+ * * __field(pid_t, prev_prid) is equivalent to a standard declariton:
+ * *
+ * * pid_t prev_pid;
+ * *
+ * * __array(char, prev_comm, TASK_COMM_LEN) is equivalent to:
+ * *
+ * * char prev_comm[TASK_COMM_LEN];
+ * *
+ *
+ * TP_STRUCT__entry(
+ * __array( char, prev_comm, TASK_COMM_LEN )
+ * __field( pid_t, prev_pid )
+ * __field( int, prev_prio )
+ * __array( char, next_comm, TASK_COMM_LEN )
+ * __field( pid_t, next_pid )
+ * __field( int, next_prio )
+ * ),
+ *
+ * *
+ * * Assign the entry into the trace record, by embedding
+ * * a full C statement block into TP_fast_assign(). You
+ * * can refer to the trace record as '__entry' -
+ * * otherwise you can put arbitrary C code in here.
+ * *
+ * * Note: this C code will execute every time a trace event
+ * * happens, on an active tracepoint.
+ * *
+ *
+ * TP_fast_assign(
+ * memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
+ * __entry->prev_pid = prev->pid;
+ * __entry->prev_prio = prev->prio;
+ * memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
+ * __entry->next_pid = next->pid;
+ * __entry->next_prio = next->prio;
+ * )
+ *
+ * *
+ * * Formatted output of a trace record via TP_printk().
+ * * This is how the tracepoint will appear under ftrace
+ * * plugins that make use of this tracepoint.
+ * *
+ * * (raw-binary tracing wont actually perform this step.)
+ * *
+ *
+ * TP_printk("task %s:%d [%d] ==> %s:%d [%d]",
+ * __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
+ * __entry->next_comm, __entry->next_pid, __entry->next_prio),
+ *
+ * );
+ *
+ * This macro construct is thus used for the regular printk format
+ * tracing setup, it is used to construct a function pointer based
+ * tracepoint callback (this is used by programmatic plugins and
+ * can also by used by generic instrumentation like SystemTap), and
+ * it is also used to expose a structured trace record in
+ * /sys/kernel/debug/tracing/events/.
+ */
+
+#define TRACE_EVENT(name, proto, args, struct, assign, print) \
+ DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
+#endif
+
#endif
diff --git a/include/linux/tty.h b/include/linux/tty.h
index fc39db95499..1488d8c81aa 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -185,7 +185,7 @@ struct tty_port;
struct tty_port_operations {
/* Return 1 if the carrier is raised */
int (*carrier_raised)(struct tty_port *port);
- void (*raise_dtr_rts)(struct tty_port *port);
+ void (*dtr_rts)(struct tty_port *port, int raise);
};
struct tty_port {
@@ -201,6 +201,9 @@ struct tty_port {
unsigned char *xmit_buf; /* Optional buffer */
int close_delay; /* Close port delay */
int closing_wait; /* Delay for output */
+ int drain_delay; /* Set to zero if no pure time
+ based drain is needed else
+ set to size of fifo */
};
/*
@@ -223,8 +226,11 @@ struct tty_struct {
struct tty_driver *driver;
const struct tty_operations *ops;
int index;
- /* The ldisc objects are protected by tty_ldisc_lock at the moment */
- struct tty_ldisc ldisc;
+
+ /* Protects ldisc changes: Lock tty not pty */
+ struct mutex ldisc_mutex;
+ struct tty_ldisc *ldisc;
+
struct mutex termios_mutex;
spinlock_t ctrl_lock;
/* Termios values are protected by the termios mutex */
@@ -311,6 +317,7 @@ struct tty_struct {
#define TTY_CLOSING 7 /* ->close() in progress */
#define TTY_LDISC 9 /* Line discipline attached */
#define TTY_LDISC_CHANGING 10 /* Line discipline changing */
+#define TTY_LDISC_OPEN 11 /* Line discipline is open */
#define TTY_HW_COOK_OUT 14 /* Hardware can do output cooking */
#define TTY_HW_COOK_IN 15 /* Hardware can do input cooking */
#define TTY_PTY_LOCK 16 /* pty private */
@@ -403,6 +410,7 @@ extern int tty_termios_hw_change(struct ktermios *a, struct ktermios *b);
extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *);
extern void tty_ldisc_deref(struct tty_ldisc *);
extern struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *);
+extern void tty_ldisc_hangup(struct tty_struct *tty);
extern const struct file_operations tty_ldiscs_proc_fops;
extern void tty_wakeup(struct tty_struct *tty);
@@ -425,6 +433,9 @@ extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx,
extern void tty_release_dev(struct file *filp);
extern int tty_init_termios(struct tty_struct *tty);
+extern struct tty_struct *tty_pair_get_tty(struct tty_struct *tty);
+extern struct tty_struct *tty_pair_get_pty(struct tty_struct *tty);
+
extern struct mutex tty_mutex;
extern void tty_write_unlock(struct tty_struct *tty);
@@ -438,6 +449,7 @@ extern struct tty_struct *tty_port_tty_get(struct tty_port *port);
extern void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty);
extern int tty_port_carrier_raised(struct tty_port *port);
extern void tty_port_raise_dtr_rts(struct tty_port *port);
+extern void tty_port_lower_dtr_rts(struct tty_port *port);
extern void tty_port_hangup(struct tty_port *port);
extern int tty_port_block_til_ready(struct tty_port *port,
struct tty_struct *tty, struct file *filp);
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index 08e088334db..3566129384a 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -127,7 +127,8 @@
* the line discipline are close to full, and it should somehow
* signal that no more characters should be sent to the tty.
*
- * Optional: Always invoke via tty_throttle();
+ * Optional: Always invoke via tty_throttle(), called under the
+ * termios lock.
*
* void (*unthrottle)(struct tty_struct * tty);
*
@@ -135,7 +136,8 @@
* that characters can now be sent to the tty without fear of
* overrunning the input buffers of the line disciplines.
*
- * Optional: Always invoke via tty_unthrottle();
+ * Optional: Always invoke via tty_unthrottle(), called under the
+ * termios lock.
*
* void (*stop)(struct tty_struct *tty);
*
@@ -252,8 +254,6 @@ struct tty_operations {
void (*set_ldisc)(struct tty_struct *tty);
void (*wait_until_sent)(struct tty_struct *tty, int timeout);
void (*send_xchar)(struct tty_struct *tty, char ch);
- int (*read_proc)(char *page, char **start, off_t off,
- int count, int *eof, void *data);
int (*tiocmget)(struct tty_struct *tty, struct file *file);
int (*tiocmset)(struct tty_struct *tty, struct file *file,
unsigned int set, unsigned int clear);
@@ -264,6 +264,7 @@ struct tty_operations {
int (*poll_get_char)(struct tty_driver *driver, int line);
void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
#endif
+ const struct file_operations *proc_fops;
};
struct tty_driver {
@@ -310,7 +311,8 @@ extern void tty_set_operations(struct tty_driver *driver,
extern struct tty_driver *tty_find_polling_driver(char *name, int *line);
extern void tty_driver_kref_put(struct tty_driver *driver);
-extern inline struct tty_driver *tty_driver_kref_get(struct tty_driver *d)
+
+static inline struct tty_driver *tty_driver_kref_get(struct tty_driver *d)
{
kref_get(&d->kref);
return d;
diff --git a/include/linux/types.h b/include/linux/types.h
index 712ca53bc34..c42724f8c80 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -1,6 +1,9 @@
#ifndef _LINUX_TYPES_H
#define _LINUX_TYPES_H
+#include <asm/types.h>
+
+#ifndef __ASSEMBLY__
#ifdef __KERNEL__
#define DECLARE_BITMAP(name,bits) \
@@ -9,9 +12,8 @@
#endif
#include <linux/posix_types.h>
-#include <asm/types.h>
-#ifndef __KERNEL_STRICT_NAMES
+#ifdef __KERNEL__
typedef __u32 __kernel_dev_t;
@@ -29,7 +31,6 @@ typedef __kernel_timer_t timer_t;
typedef __kernel_clockid_t clockid_t;
typedef __kernel_mqd_t mqd_t;
-#ifdef __KERNEL__
typedef _Bool bool;
typedef __kernel_uid32_t uid_t;
@@ -45,14 +46,6 @@ typedef __kernel_old_uid_t old_uid_t;
typedef __kernel_old_gid_t old_gid_t;
#endif /* CONFIG_UID16 */
-/* libc5 includes this file to define uid_t, thus uid_t can never change
- * when it is included by non-kernel code
- */
-#else
-typedef __kernel_uid_t uid_t;
-typedef __kernel_gid_t gid_t;
-#endif /* __KERNEL__ */
-
#if defined(__GNUC__)
typedef __kernel_loff_t loff_t;
#endif
@@ -138,7 +131,7 @@ typedef __s64 int64_t;
*
* blkcnt_t is the type of the inode's block count.
*/
-#ifdef CONFIG_LBD
+#ifdef CONFIG_LBDAF
typedef u64 sector_t;
typedef u64 blkcnt_t;
#else
@@ -154,7 +147,7 @@ typedef unsigned long blkcnt_t;
#define pgoff_t unsigned long
#endif
-#endif /* __KERNEL_STRICT_NAMES */
+#endif /* __KERNEL__ */
/*
* Below are truly Linux-specific types that should never collide with
@@ -212,5 +205,5 @@ struct ustat {
};
#endif /* __KERNEL__ */
-
+#endif /* __ASSEMBLY__ */
#endif /* _LINUX_TYPES_H */
diff --git a/include/linux/ucb1400.h b/include/linux/ucb1400.h
index 970473bf8d5..ed889f4168f 100644
--- a/include/linux/ucb1400.h
+++ b/include/linux/ucb1400.h
@@ -134,28 +134,13 @@ static inline void ucb1400_adc_enable(struct snd_ac97 *ac97)
ucb1400_reg_write(ac97, UCB_ADC_CR, UCB_ADC_ENA);
}
-static unsigned int ucb1400_adc_read(struct snd_ac97 *ac97, u16 adc_channel,
- int adcsync)
-{
- unsigned int val;
-
- if (adcsync)
- adc_channel |= UCB_ADC_SYNC_ENA;
-
- ucb1400_reg_write(ac97, UCB_ADC_CR, UCB_ADC_ENA | adc_channel);
- ucb1400_reg_write(ac97, UCB_ADC_CR, UCB_ADC_ENA | adc_channel |
- UCB_ADC_START);
-
- while (!((val = ucb1400_reg_read(ac97, UCB_ADC_DATA))
- & UCB_ADC_DAT_VALID))
- schedule_timeout_uninterruptible(1);
-
- return val & UCB_ADC_DAT_MASK;
-}
-
static inline void ucb1400_adc_disable(struct snd_ac97 *ac97)
{
ucb1400_reg_write(ac97, UCB_ADC_CR, 0);
}
+
+unsigned int ucb1400_adc_read(struct snd_ac97 *ac97, u16 adc_channel,
+ int adcsync);
+
#endif
diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
index a0bb6bd2e5c..5dcc9ff72f6 100644
--- a/include/linux/uio_driver.h
+++ b/include/linux/uio_driver.h
@@ -22,6 +22,7 @@ struct uio_map;
/**
* struct uio_mem - description of a UIO memory region
+ * @name: name of the memory region for identification
* @addr: address of the device's memory
* @size: size of IO
* @memtype: type of memory addr points to
@@ -29,6 +30,7 @@ struct uio_map;
* @map: for use by the UIO core only.
*/
struct uio_mem {
+ const char *name;
unsigned long addr;
unsigned long size;
int memtype;
@@ -42,12 +44,14 @@ struct uio_portio;
/**
* struct uio_port - description of a UIO port region
+ * @name: name of the port region for identification
* @start: start of port region
* @size: size of port region
* @porttype: type of port (see UIO_PORT_* below)
* @portio: for use by the UIO core only.
*/
struct uio_port {
+ const char *name;
unsigned long start;
unsigned long size;
int porttype;
diff --git a/include/linux/ultrasound.h b/include/linux/ultrasound.h
index 6b7703e75ce..71339dc531c 100644
--- a/include/linux/ultrasound.h
+++ b/include/linux/ultrasound.h
@@ -34,7 +34,7 @@
* _GUS_VOICEOFF - Stops voice (no parameters)
* _GUS_VOICEFADE - Stops the voice smoothly.
* _GUS_VOICEMODE - Alters the voice mode, don't start or stop voice (P1=voice mode)
- * _GUS_VOICEBALA - Sets voice balence (P1, 0=left, 7=middle and 15=right, default 7)
+ * _GUS_VOICEBALA - Sets voice balance (P1, 0=left, 7=middle and 15=right, default 7)
* _GUS_VOICEFREQ - Sets voice (sample) playback frequency (P1=Hz)
* _GUS_VOICEVOL - Sets voice volume (P1=volume, 0xfff=max, 0xeff=half, 0x000=off)
* _GUS_VOICEVOL2 - Sets voice volume (P1=volume, 0xfff=max, 0xeff=half, 0x000=off)
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 88079fd6023..84929e91403 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -36,6 +36,7 @@ struct wusb_dev;
* - configs have one (often) or more interfaces;
* - interfaces have one (usually) or more settings;
* - each interface setting has zero or (usually) more endpoints.
+ * - a SuperSpeed endpoint has a companion descriptor
*
* And there might be other descriptors mixed in with those.
*
@@ -44,6 +45,19 @@ struct wusb_dev;
struct ep_device;
+/* For SS devices */
+/**
+ * struct usb_host_ss_ep_comp - Valid for SuperSpeed devices only
+ * @desc: endpoint companion descriptor, wMaxPacketSize in native byteorder
+ * @extra: descriptors following this endpoint companion descriptor
+ * @extralen: how many bytes of "extra" are valid
+ */
+struct usb_host_ss_ep_comp {
+ struct usb_ss_ep_comp_descriptor desc;
+ unsigned char *extra; /* Extra descriptors */
+ int extralen;
+};
+
/**
* struct usb_host_endpoint - host-side endpoint descriptor and queue
* @desc: descriptor for this endpoint, wMaxPacketSize in native byteorder
@@ -51,6 +65,7 @@ struct ep_device;
* @hcpriv: for use by HCD; typically holds hardware dma queue head (QH)
* with one or more transfer descriptors (TDs) per urb
* @ep_dev: ep_device for sysfs info
+ * @ss_ep_comp: companion descriptor information for this endpoint
* @extra: descriptors following this endpoint in the configuration
* @extralen: how many bytes of "extra" are valid
* @enabled: URBs may be submitted to this endpoint
@@ -63,6 +78,7 @@ struct usb_host_endpoint {
struct list_head urb_list;
void *hcpriv;
struct ep_device *ep_dev; /* For sysfs info */
+ struct usb_host_ss_ep_comp *ss_ep_comp; /* For SS devices */
unsigned char *extra; /* Extra descriptors */
int extralen;
@@ -336,7 +352,6 @@ struct usb_bus {
#ifdef CONFIG_USB_DEVICEFS
struct dentry *usbfs_dentry; /* usbfs dentry entry for the bus */
#endif
- struct device *dev; /* device for this bus */
#if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
struct mon_bus *mon_bus; /* non-null when associated */
@@ -363,6 +378,7 @@ struct usb_tt;
* struct usb_device - kernel's representation of a USB device
* @devnum: device number; address on a USB bus
* @devpath: device ID string for use in messages (e.g., /port/...)
+ * @route: tree topology hex string for use with xHCI
* @state: device state: configured, not attached, etc.
* @speed: device speed: high/full/low (or error)
* @tt: Transaction Translator info; used with low/full speed dev, highspeed hub
@@ -420,6 +436,7 @@ struct usb_tt;
* @skip_sys_resume: skip the next system resume
* @wusb_dev: if this is a Wireless USB device, link to the WUSB
* specific data for the device.
+ * @slot_id: Slot ID assigned by xHCI
*
* Notes:
* Usbcore drivers should not set usbdev->state directly. Instead use
@@ -428,6 +445,7 @@ struct usb_tt;
struct usb_device {
int devnum;
char devpath [16];
+ u32 route;
enum usb_device_state state;
enum usb_device_speed speed;
@@ -503,6 +521,7 @@ struct usb_device {
unsigned skip_sys_resume:1;
#endif
struct wusb_dev *wusb_dev;
+ int slot_id;
};
#define to_usb_device(d) container_of(d, struct usb_device, dev)
@@ -643,186 +662,6 @@ static inline int usb_make_path(struct usb_device *dev, char *buf, size_t size)
/*-------------------------------------------------------------------------*/
-/**
- * usb_endpoint_num - get the endpoint's number
- * @epd: endpoint to be checked
- *
- * Returns @epd's number: 0 to 15.
- */
-static inline int usb_endpoint_num(const struct usb_endpoint_descriptor *epd)
-{
- return epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
-}
-
-/**
- * usb_endpoint_type - get the endpoint's transfer type
- * @epd: endpoint to be checked
- *
- * Returns one of USB_ENDPOINT_XFER_{CONTROL, ISOC, BULK, INT} according
- * to @epd's transfer type.
- */
-static inline int usb_endpoint_type(const struct usb_endpoint_descriptor *epd)
-{
- return epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
-}
-
-/**
- * usb_endpoint_dir_in - check if the endpoint has IN direction
- * @epd: endpoint to be checked
- *
- * Returns true if the endpoint is of type IN, otherwise it returns false.
- */
-static inline int usb_endpoint_dir_in(const struct usb_endpoint_descriptor *epd)
-{
- return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN);
-}
-
-/**
- * usb_endpoint_dir_out - check if the endpoint has OUT direction
- * @epd: endpoint to be checked
- *
- * Returns true if the endpoint is of type OUT, otherwise it returns false.
- */
-static inline int usb_endpoint_dir_out(
- const struct usb_endpoint_descriptor *epd)
-{
- return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT);
-}
-
-/**
- * usb_endpoint_xfer_bulk - check if the endpoint has bulk transfer type
- * @epd: endpoint to be checked
- *
- * Returns true if the endpoint is of type bulk, otherwise it returns false.
- */
-static inline int usb_endpoint_xfer_bulk(
- const struct usb_endpoint_descriptor *epd)
-{
- return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
- USB_ENDPOINT_XFER_BULK);
-}
-
-/**
- * usb_endpoint_xfer_control - check if the endpoint has control transfer type
- * @epd: endpoint to be checked
- *
- * Returns true if the endpoint is of type control, otherwise it returns false.
- */
-static inline int usb_endpoint_xfer_control(
- const struct usb_endpoint_descriptor *epd)
-{
- return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
- USB_ENDPOINT_XFER_CONTROL);
-}
-
-/**
- * usb_endpoint_xfer_int - check if the endpoint has interrupt transfer type
- * @epd: endpoint to be checked
- *
- * Returns true if the endpoint is of type interrupt, otherwise it returns
- * false.
- */
-static inline int usb_endpoint_xfer_int(
- const struct usb_endpoint_descriptor *epd)
-{
- return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
- USB_ENDPOINT_XFER_INT);
-}
-
-/**
- * usb_endpoint_xfer_isoc - check if the endpoint has isochronous transfer type
- * @epd: endpoint to be checked
- *
- * Returns true if the endpoint is of type isochronous, otherwise it returns
- * false.
- */
-static inline int usb_endpoint_xfer_isoc(
- const struct usb_endpoint_descriptor *epd)
-{
- return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
- USB_ENDPOINT_XFER_ISOC);
-}
-
-/**
- * usb_endpoint_is_bulk_in - check if the endpoint is bulk IN
- * @epd: endpoint to be checked
- *
- * Returns true if the endpoint has bulk transfer type and IN direction,
- * otherwise it returns false.
- */
-static inline int usb_endpoint_is_bulk_in(
- const struct usb_endpoint_descriptor *epd)
-{
- return (usb_endpoint_xfer_bulk(epd) && usb_endpoint_dir_in(epd));
-}
-
-/**
- * usb_endpoint_is_bulk_out - check if the endpoint is bulk OUT
- * @epd: endpoint to be checked
- *
- * Returns true if the endpoint has bulk transfer type and OUT direction,
- * otherwise it returns false.
- */
-static inline int usb_endpoint_is_bulk_out(
- const struct usb_endpoint_descriptor *epd)
-{
- return (usb_endpoint_xfer_bulk(epd) && usb_endpoint_dir_out(epd));
-}
-
-/**
- * usb_endpoint_is_int_in - check if the endpoint is interrupt IN
- * @epd: endpoint to be checked
- *
- * Returns true if the endpoint has interrupt transfer type and IN direction,
- * otherwise it returns false.
- */
-static inline int usb_endpoint_is_int_in(
- const struct usb_endpoint_descriptor *epd)
-{
- return (usb_endpoint_xfer_int(epd) && usb_endpoint_dir_in(epd));
-}
-
-/**
- * usb_endpoint_is_int_out - check if the endpoint is interrupt OUT
- * @epd: endpoint to be checked
- *
- * Returns true if the endpoint has interrupt transfer type and OUT direction,
- * otherwise it returns false.
- */
-static inline int usb_endpoint_is_int_out(
- const struct usb_endpoint_descriptor *epd)
-{
- return (usb_endpoint_xfer_int(epd) && usb_endpoint_dir_out(epd));
-}
-
-/**
- * usb_endpoint_is_isoc_in - check if the endpoint is isochronous IN
- * @epd: endpoint to be checked
- *
- * Returns true if the endpoint has isochronous transfer type and IN direction,
- * otherwise it returns false.
- */
-static inline int usb_endpoint_is_isoc_in(
- const struct usb_endpoint_descriptor *epd)
-{
- return (usb_endpoint_xfer_isoc(epd) && usb_endpoint_dir_in(epd));
-}
-
-/**
- * usb_endpoint_is_isoc_out - check if the endpoint is isochronous OUT
- * @epd: endpoint to be checked
- *
- * Returns true if the endpoint has isochronous transfer type and OUT direction,
- * otherwise it returns false.
- */
-static inline int usb_endpoint_is_isoc_out(
- const struct usb_endpoint_descriptor *epd)
-{
- return (usb_endpoint_xfer_isoc(epd) && usb_endpoint_dir_out(epd));
-}
-
-/*-------------------------------------------------------------------------*/
-
#define USB_DEVICE_ID_MATCH_DEVICE \
(USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_PRODUCT)
#define USB_DEVICE_ID_MATCH_DEV_RANGE \
@@ -1049,6 +888,8 @@ struct usb_driver {
* struct usb_device_driver - identifies USB device driver to usbcore
* @name: The driver name should be unique among USB drivers,
* and should normally be the same as the module name.
+ * @nodename: Callback to provide a naming hint for a possible
+ * device node to create.
* @probe: Called to see if the driver is willing to manage a particular
* device. If it is, probe returns zero and uses dev_set_drvdata()
* to associate driver-specific data with the device. If unwilling
@@ -1092,6 +933,7 @@ extern struct bus_type usb_bus_type;
*/
struct usb_class_driver {
char *name;
+ char *(*nodename)(struct device *dev);
const struct file_operations *fops;
int minor_base;
};
@@ -1221,7 +1063,9 @@ typedef void (*usb_complete_t)(struct urb *);
* @setup_dma: For control transfers with URB_NO_SETUP_DMA_MAP set, the
* device driver has provided this DMA address for the setup packet.
* The host controller driver should use this in preference to
- * setup_packet.
+ * setup_packet, but the HCD may chose to ignore the address if it must
+ * copy the setup packet into internal structures. Therefore, setup_packet
+ * must always point to a valid buffer.
* @start_frame: Returns the initial frame for isochronous transfers.
* @number_of_packets: Lists the number of ISO transfer buffers.
* @interval: Specifies the polling interval for interrupt or isochronous
@@ -1357,8 +1201,10 @@ struct urb {
unsigned int transfer_flags; /* (in) URB_SHORT_NOT_OK | ...*/
void *transfer_buffer; /* (in) associated data buffer */
dma_addr_t transfer_dma; /* (in) dma addr for transfer_buffer */
- int transfer_buffer_length; /* (in) data buffer length */
- int actual_length; /* (return) actual transfer length */
+ struct usb_sg_request *sg; /* (in) scatter gather buffer list */
+ int num_sgs; /* (in) number of entries in the sg list */
+ u32 transfer_buffer_length; /* (in) data buffer length */
+ u32 actual_length; /* (return) actual transfer length */
unsigned char *setup_packet; /* (in) setup packet (control only) */
dma_addr_t setup_dma; /* (in) dma addr for setup_packet */
int start_frame; /* (modify) start frame (ISO) */
@@ -1567,6 +1413,7 @@ extern int usb_string(struct usb_device *dev, int index,
extern int usb_clear_halt(struct usb_device *dev, int pipe);
extern int usb_reset_configuration(struct usb_device *dev);
extern int usb_set_interface(struct usb_device *dev, int ifnum, int alternate);
+extern void usb_reset_endpoint(struct usb_device *dev, unsigned int epaddr);
/* this request isn't really synchronous, but it belongs with the others */
extern int usb_driver_set_configuration(struct usb_device *udev, int config);
@@ -1601,8 +1448,8 @@ struct usb_sg_request {
int status;
size_t bytes;
- /*
- * members below are private: to usbcore,
+ /* private:
+ * members below are private to usbcore,
* and are not provided for driver access!
*/
spinlock_t lock;
@@ -1671,14 +1518,6 @@ void usb_sg_wait(struct usb_sg_request *io);
#define usb_pipecontrol(pipe) (usb_pipetype((pipe)) == PIPE_CONTROL)
#define usb_pipebulk(pipe) (usb_pipetype((pipe)) == PIPE_BULK)
-/* The D0/D1 toggle bits ... USE WITH CAUTION (they're almost hcd-internal) */
-#define usb_gettoggle(dev, ep, out) (((dev)->toggle[out] >> (ep)) & 1)
-#define usb_dotoggle(dev, ep, out) ((dev)->toggle[out] ^= (1 << (ep)))
-#define usb_settoggle(dev, ep, out, bit) \
- ((dev)->toggle[out] = ((dev)->toggle[out] & ~(1 << (ep))) | \
- ((bit) << (ep)))
-
-
static inline unsigned int __create_pipe(struct usb_device *dev,
unsigned int endpoint)
{
@@ -1745,6 +1584,9 @@ extern void usb_unregister_notify(struct notifier_block *nb);
#define err(format, arg...) printk(KERN_ERR KBUILD_MODNAME ": " \
format "\n" , ## arg)
+/* debugfs stuff */
+extern struct dentry *usb_debug_root;
+
#endif /* __KERNEL__ */
#endif
diff --git a/include/linux/usb/audio.h b/include/linux/usb/audio.h
index 8cb025fef63..b5744bc218a 100644
--- a/include/linux/usb/audio.h
+++ b/include/linux/usb/audio.h
@@ -24,10 +24,75 @@
#define USB_SUBCLASS_AUDIOCONTROL 0x01
#define USB_SUBCLASS_AUDIOSTREAMING 0x02
#define USB_SUBCLASS_MIDISTREAMING 0x03
+#define USB_SUBCLASS_VENDOR_SPEC 0xff
+/* A.5 Audio Class-Specific AC interface Descriptor Subtypes*/
+#define HEADER 0x01
+#define INPUT_TERMINAL 0x02
+#define OUTPUT_TERMINAL 0x03
+#define MIXER_UNIT 0x04
+#define SELECTOR_UNIT 0x05
+#define FEATURE_UNIT 0x06
+#define PROCESSING_UNIT 0x07
+#define EXTENSION_UNIT 0x08
+
+#define AS_GENERAL 0x01
+#define FORMAT_TYPE 0x02
+#define FORMAT_SPECIFIC 0x03
+
+#define EP_GENERAL 0x01
+
+#define MS_GENERAL 0x01
+#define MIDI_IN_JACK 0x02
+#define MIDI_OUT_JACK 0x03
+
+/* endpoint attributes */
+#define EP_ATTR_MASK 0x0c
+#define EP_ATTR_ASYNC 0x04
+#define EP_ATTR_ADAPTIVE 0x08
+#define EP_ATTR_SYNC 0x0c
+
+/* cs endpoint attributes */
+#define EP_CS_ATTR_SAMPLE_RATE 0x01
+#define EP_CS_ATTR_PITCH_CONTROL 0x02
+#define EP_CS_ATTR_FILL_MAX 0x80
+
+/* Audio Class specific Request Codes */
+#define USB_AUDIO_SET_INTF 0x21
+#define USB_AUDIO_SET_ENDPOINT 0x22
+#define USB_AUDIO_GET_INTF 0xa1
+#define USB_AUDIO_GET_ENDPOINT 0xa2
+
+#define SET_ 0x00
+#define GET_ 0x80
+
+#define _CUR 0x1
+#define _MIN 0x2
+#define _MAX 0x3
+#define _RES 0x4
+#define _MEM 0x5
+
+#define SET_CUR (SET_ | _CUR)
+#define GET_CUR (GET_ | _CUR)
+#define SET_MIN (SET_ | _MIN)
+#define GET_MIN (GET_ | _MIN)
+#define SET_MAX (SET_ | _MAX)
+#define GET_MAX (GET_ | _MAX)
+#define SET_RES (SET_ | _RES)
+#define GET_RES (GET_ | _RES)
+#define SET_MEM (SET_ | _MEM)
+#define GET_MEM (GET_ | _MEM)
+
+#define GET_STAT 0xff
+
+#define USB_AC_TERMINAL_UNDEFINED 0x100
+#define USB_AC_TERMINAL_STREAMING 0x101
+#define USB_AC_TERMINAL_VENDOR_SPEC 0x1FF
+
+/* Terminal Control Selectors */
/* 4.3.2 Class-Specific AC Interface Descriptor */
struct usb_ac_header_descriptor {
- __u8 bLength; /* 8+n */
+ __u8 bLength; /* 8 + n */
__u8 bDescriptorType; /* USB_DT_CS_INTERFACE */
__u8 bDescriptorSubtype; /* USB_MS_HEADER */
__le16 bcdADC; /* 0x0100 */
@@ -36,7 +101,7 @@ struct usb_ac_header_descriptor {
__u8 baInterfaceNr[]; /* [n] */
} __attribute__ ((packed));
-#define USB_DT_AC_HEADER_SIZE(n) (8+(n))
+#define USB_DT_AC_HEADER_SIZE(n) (8 + (n))
/* As above, but more useful for defining your own descriptors: */
#define DECLARE_USB_AC_HEADER_DESCRIPTOR(n) \
@@ -50,4 +115,200 @@ struct usb_ac_header_descriptor_##n { \
__u8 baInterfaceNr[n]; \
} __attribute__ ((packed))
+/* 4.3.2.1 Input Terminal Descriptor */
+struct usb_input_terminal_descriptor {
+ __u8 bLength; /* in bytes: 12 */
+ __u8 bDescriptorType; /* CS_INTERFACE descriptor type */
+ __u8 bDescriptorSubtype; /* INPUT_TERMINAL descriptor subtype */
+ __u8 bTerminalID; /* Constant uniquely terminal ID */
+ __le16 wTerminalType; /* USB Audio Terminal Types */
+ __u8 bAssocTerminal; /* ID of the Output Terminal associated */
+ __u8 bNrChannels; /* Number of logical output channels */
+ __le16 wChannelConfig;
+ __u8 iChannelNames;
+ __u8 iTerminal;
+} __attribute__ ((packed));
+
+#define USB_DT_AC_INPUT_TERMINAL_SIZE 12
+
+#define USB_AC_INPUT_TERMINAL_UNDEFINED 0x200
+#define USB_AC_INPUT_TERMINAL_MICROPHONE 0x201
+#define USB_AC_INPUT_TERMINAL_DESKTOP_MICROPHONE 0x202
+#define USB_AC_INPUT_TERMINAL_PERSONAL_MICROPHONE 0x203
+#define USB_AC_INPUT_TERMINAL_OMNI_DIR_MICROPHONE 0x204
+#define USB_AC_INPUT_TERMINAL_MICROPHONE_ARRAY 0x205
+#define USB_AC_INPUT_TERMINAL_PROC_MICROPHONE_ARRAY 0x206
+
+/* 4.3.2.2 Output Terminal Descriptor */
+struct usb_output_terminal_descriptor {
+ __u8 bLength; /* in bytes: 9 */
+ __u8 bDescriptorType; /* CS_INTERFACE descriptor type */
+ __u8 bDescriptorSubtype; /* OUTPUT_TERMINAL descriptor subtype */
+ __u8 bTerminalID; /* Constant uniquely terminal ID */
+ __le16 wTerminalType; /* USB Audio Terminal Types */
+ __u8 bAssocTerminal; /* ID of the Input Terminal associated */
+ __u8 bSourceID; /* ID of the connected Unit or Terminal*/
+ __u8 iTerminal;
+} __attribute__ ((packed));
+
+#define USB_DT_AC_OUTPUT_TERMINAL_SIZE 9
+
+#define USB_AC_OUTPUT_TERMINAL_UNDEFINED 0x300
+#define USB_AC_OUTPUT_TERMINAL_SPEAKER 0x301
+#define USB_AC_OUTPUT_TERMINAL_HEADPHONES 0x302
+#define USB_AC_OUTPUT_TERMINAL_HEAD_MOUNTED_DISPLAY_AUDIO 0x303
+#define USB_AC_OUTPUT_TERMINAL_DESKTOP_SPEAKER 0x304
+#define USB_AC_OUTPUT_TERMINAL_ROOM_SPEAKER 0x305
+#define USB_AC_OUTPUT_TERMINAL_COMMUNICATION_SPEAKER 0x306
+#define USB_AC_OUTPUT_TERMINAL_LOW_FREQ_EFFECTS_SPEAKER 0x307
+
+/* Set bControlSize = 2 as default setting */
+#define USB_DT_AC_FEATURE_UNIT_SIZE(ch) (7 + ((ch) + 1) * 2)
+
+/* As above, but more useful for defining your own descriptors: */
+#define DECLARE_USB_AC_FEATURE_UNIT_DESCRIPTOR(ch) \
+struct usb_ac_feature_unit_descriptor_##ch { \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubtype; \
+ __u8 bUnitID; \
+ __u8 bSourceID; \
+ __u8 bControlSize; \
+ __le16 bmaControls[ch + 1]; \
+ __u8 iFeature; \
+} __attribute__ ((packed))
+
+/* 4.5.2 Class-Specific AS Interface Descriptor */
+struct usb_as_header_descriptor {
+ __u8 bLength; /* in bytes: 7 */
+ __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */
+ __u8 bDescriptorSubtype; /* AS_GENERAL */
+ __u8 bTerminalLink; /* Terminal ID of connected Terminal */
+ __u8 bDelay; /* Delay introduced by the data path */
+ __le16 wFormatTag; /* The Audio Data Format */
+} __attribute__ ((packed));
+
+#define USB_DT_AS_HEADER_SIZE 7
+
+#define USB_AS_AUDIO_FORMAT_TYPE_I_UNDEFINED 0x0
+#define USB_AS_AUDIO_FORMAT_TYPE_I_PCM 0x1
+#define USB_AS_AUDIO_FORMAT_TYPE_I_PCM8 0x2
+#define USB_AS_AUDIO_FORMAT_TYPE_I_IEEE_FLOAT 0x3
+#define USB_AS_AUDIO_FORMAT_TYPE_I_ALAW 0x4
+#define USB_AS_AUDIO_FORMAT_TYPE_I_MULAW 0x5
+
+struct usb_as_format_type_i_continuous_descriptor {
+ __u8 bLength; /* in bytes: 8 + (ns * 3) */
+ __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */
+ __u8 bDescriptorSubtype; /* FORMAT_TYPE */
+ __u8 bFormatType; /* FORMAT_TYPE_1 */
+ __u8 bNrChannels; /* physical channels in the stream */
+ __u8 bSubframeSize; /* */
+ __u8 bBitResolution;
+ __u8 bSamFreqType;
+ __u8 tLowerSamFreq[3];
+ __u8 tUpperSamFreq[3];
+} __attribute__ ((packed));
+
+#define USB_AS_FORMAT_TYPE_I_CONTINUOUS_DESC_SIZE 14
+
+struct usb_as_formate_type_i_discrete_descriptor {
+ __u8 bLength; /* in bytes: 8 + (ns * 3) */
+ __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */
+ __u8 bDescriptorSubtype; /* FORMAT_TYPE */
+ __u8 bFormatType; /* FORMAT_TYPE_1 */
+ __u8 bNrChannels; /* physical channels in the stream */
+ __u8 bSubframeSize; /* */
+ __u8 bBitResolution;
+ __u8 bSamFreqType;
+ __u8 tSamFreq[][3];
+} __attribute__ ((packed));
+
+#define DECLARE_USB_AS_FORMAT_TYPE_I_DISCRETE_DESC(n) \
+struct usb_as_formate_type_i_discrete_descriptor_##n { \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubtype; \
+ __u8 bFormatType; \
+ __u8 bNrChannels; \
+ __u8 bSubframeSize; \
+ __u8 bBitResolution; \
+ __u8 bSamFreqType; \
+ __u8 tSamFreq[n][3]; \
+} __attribute__ ((packed))
+
+#define USB_AS_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(n) (8 + (n * 3))
+
+#define USB_AS_FORMAT_TYPE_UNDEFINED 0x0
+#define USB_AS_FORMAT_TYPE_I 0x1
+#define USB_AS_FORMAT_TYPE_II 0x2
+#define USB_AS_FORMAT_TYPE_III 0x3
+
+#define USB_AS_ENDPOINT_ASYNC (1 << 2)
+#define USB_AS_ENDPOINT_ADAPTIVE (2 << 2)
+#define USB_AS_ENDPOINT_SYNC (3 << 2)
+
+struct usb_as_iso_endpoint_descriptor {
+ __u8 bLength; /* in bytes: 7 */
+ __u8 bDescriptorType; /* USB_DT_CS_ENDPOINT */
+ __u8 bDescriptorSubtype; /* EP_GENERAL */
+ __u8 bmAttributes;
+ __u8 bLockDelayUnits;
+ __le16 wLockDelay;
+};
+#define USB_AS_ISO_ENDPOINT_DESC_SIZE 7
+
+#define FU_CONTROL_UNDEFINED 0x00
+#define MUTE_CONTROL 0x01
+#define VOLUME_CONTROL 0x02
+#define BASS_CONTROL 0x03
+#define MID_CONTROL 0x04
+#define TREBLE_CONTROL 0x05
+#define GRAPHIC_EQUALIZER_CONTROL 0x06
+#define AUTOMATIC_GAIN_CONTROL 0x07
+#define DELAY_CONTROL 0x08
+#define BASS_BOOST_CONTROL 0x09
+#define LOUDNESS_CONTROL 0x0a
+
+#define FU_MUTE (1 << (MUTE_CONTROL - 1))
+#define FU_VOLUME (1 << (VOLUME_CONTROL - 1))
+#define FU_BASS (1 << (BASS_CONTROL - 1))
+#define FU_MID (1 << (MID_CONTROL - 1))
+#define FU_TREBLE (1 << (TREBLE_CONTROL - 1))
+#define FU_GRAPHIC_EQ (1 << (GRAPHIC_EQUALIZER_CONTROL - 1))
+#define FU_AUTO_GAIN (1 << (AUTOMATIC_GAIN_CONTROL - 1))
+#define FU_DELAY (1 << (DELAY_CONTROL - 1))
+#define FU_BASS_BOOST (1 << (BASS_BOOST_CONTROL - 1))
+#define FU_LOUDNESS (1 << (LOUDNESS_CONTROL - 1))
+
+struct usb_audio_control {
+ struct list_head list;
+ const char *name;
+ u8 type;
+ int data[5];
+ int (*set)(struct usb_audio_control *con, u8 cmd, int value);
+ int (*get)(struct usb_audio_control *con, u8 cmd);
+};
+
+static inline int generic_set_cmd(struct usb_audio_control *con, u8 cmd, int value)
+{
+ con->data[cmd] = value;
+
+ return 0;
+}
+
+static inline int generic_get_cmd(struct usb_audio_control *con, u8 cmd)
+{
+ return con->data[cmd];
+}
+
+struct usb_audio_control_selector {
+ struct list_head list;
+ struct list_head control;
+ u8 id;
+ const char *name;
+ u8 type;
+ struct usb_descriptor_header *desc;
+};
+
#endif /* __LINUX_USB_AUDIO_H */
diff --git a/include/linux/usb/cdc.h b/include/linux/usb/cdc.h
index 3c86ed25a04..c24124a42ce 100644
--- a/include/linux/usb/cdc.h
+++ b/include/linux/usb/cdc.h
@@ -17,6 +17,7 @@
#define USB_CDC_SUBCLASS_DMM 0x09
#define USB_CDC_SUBCLASS_MDLM 0x0a
#define USB_CDC_SUBCLASS_OBEX 0x0b
+#define USB_CDC_SUBCLASS_EEM 0x0c
#define USB_CDC_PROTO_NONE 0
@@ -28,6 +29,8 @@
#define USB_CDC_ACM_PROTO_AT_CDMA 6
#define USB_CDC_ACM_PROTO_VENDOR 0xff
+#define USB_CDC_PROTO_EEM 7
+
/*-------------------------------------------------------------------------*/
/*
diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h
index 9b42baed390..93223638f70 100644
--- a/include/linux/usb/ch9.h
+++ b/include/linux/usb/ch9.h
@@ -102,7 +102,7 @@
#define USB_REQ_LOOPBACK_DATA_READ 0x16
#define USB_REQ_SET_INTERFACE_DS 0x17
-/* The Link Power Mangement (LPM) ECN defines USB_REQ_TEST_AND_SET command,
+/* The Link Power Management (LPM) ECN defines USB_REQ_TEST_AND_SET command,
* used by hubs to put ports into a new L1 suspend state, except that it
* forgot to define its number ...
*/
@@ -191,6 +191,8 @@ struct usb_ctrlrequest {
#define USB_DT_WIRE_ADAPTER 0x21
#define USB_DT_RPIPE 0x22
#define USB_DT_CS_RADIO_CONTROL 0x23
+/* From the USB 3.0 spec */
+#define USB_DT_SS_ENDPOINT_COMP 0x30
/* Conventional codes for class-specific descriptors. The convention is
* defined in the USB "Common Class" Spec (3.11). Individual class specs
@@ -353,6 +355,199 @@ struct usb_endpoint_descriptor {
#define USB_ENDPOINT_XFER_INT 3
#define USB_ENDPOINT_MAX_ADJUSTABLE 0x80
+/*-------------------------------------------------------------------------*/
+
+/**
+ * usb_endpoint_num - get the endpoint's number
+ * @epd: endpoint to be checked
+ *
+ * Returns @epd's number: 0 to 15.
+ */
+static inline int usb_endpoint_num(const struct usb_endpoint_descriptor *epd)
+{
+ return epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
+}
+
+/**
+ * usb_endpoint_type - get the endpoint's transfer type
+ * @epd: endpoint to be checked
+ *
+ * Returns one of USB_ENDPOINT_XFER_{CONTROL, ISOC, BULK, INT} according
+ * to @epd's transfer type.
+ */
+static inline int usb_endpoint_type(const struct usb_endpoint_descriptor *epd)
+{
+ return epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+}
+
+/**
+ * usb_endpoint_dir_in - check if the endpoint has IN direction
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint is of type IN, otherwise it returns false.
+ */
+static inline int usb_endpoint_dir_in(const struct usb_endpoint_descriptor *epd)
+{
+ return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN);
+}
+
+/**
+ * usb_endpoint_dir_out - check if the endpoint has OUT direction
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint is of type OUT, otherwise it returns false.
+ */
+static inline int usb_endpoint_dir_out(
+ const struct usb_endpoint_descriptor *epd)
+{
+ return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT);
+}
+
+/**
+ * usb_endpoint_xfer_bulk - check if the endpoint has bulk transfer type
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint is of type bulk, otherwise it returns false.
+ */
+static inline int usb_endpoint_xfer_bulk(
+ const struct usb_endpoint_descriptor *epd)
+{
+ return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
+ USB_ENDPOINT_XFER_BULK);
+}
+
+/**
+ * usb_endpoint_xfer_control - check if the endpoint has control transfer type
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint is of type control, otherwise it returns false.
+ */
+static inline int usb_endpoint_xfer_control(
+ const struct usb_endpoint_descriptor *epd)
+{
+ return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
+ USB_ENDPOINT_XFER_CONTROL);
+}
+
+/**
+ * usb_endpoint_xfer_int - check if the endpoint has interrupt transfer type
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint is of type interrupt, otherwise it returns
+ * false.
+ */
+static inline int usb_endpoint_xfer_int(
+ const struct usb_endpoint_descriptor *epd)
+{
+ return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
+ USB_ENDPOINT_XFER_INT);
+}
+
+/**
+ * usb_endpoint_xfer_isoc - check if the endpoint has isochronous transfer type
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint is of type isochronous, otherwise it returns
+ * false.
+ */
+static inline int usb_endpoint_xfer_isoc(
+ const struct usb_endpoint_descriptor *epd)
+{
+ return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
+ USB_ENDPOINT_XFER_ISOC);
+}
+
+/**
+ * usb_endpoint_is_bulk_in - check if the endpoint is bulk IN
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint has bulk transfer type and IN direction,
+ * otherwise it returns false.
+ */
+static inline int usb_endpoint_is_bulk_in(
+ const struct usb_endpoint_descriptor *epd)
+{
+ return (usb_endpoint_xfer_bulk(epd) && usb_endpoint_dir_in(epd));
+}
+
+/**
+ * usb_endpoint_is_bulk_out - check if the endpoint is bulk OUT
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint has bulk transfer type and OUT direction,
+ * otherwise it returns false.
+ */
+static inline int usb_endpoint_is_bulk_out(
+ const struct usb_endpoint_descriptor *epd)
+{
+ return (usb_endpoint_xfer_bulk(epd) && usb_endpoint_dir_out(epd));
+}
+
+/**
+ * usb_endpoint_is_int_in - check if the endpoint is interrupt IN
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint has interrupt transfer type and IN direction,
+ * otherwise it returns false.
+ */
+static inline int usb_endpoint_is_int_in(
+ const struct usb_endpoint_descriptor *epd)
+{
+ return (usb_endpoint_xfer_int(epd) && usb_endpoint_dir_in(epd));
+}
+
+/**
+ * usb_endpoint_is_int_out - check if the endpoint is interrupt OUT
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint has interrupt transfer type and OUT direction,
+ * otherwise it returns false.
+ */
+static inline int usb_endpoint_is_int_out(
+ const struct usb_endpoint_descriptor *epd)
+{
+ return (usb_endpoint_xfer_int(epd) && usb_endpoint_dir_out(epd));
+}
+
+/**
+ * usb_endpoint_is_isoc_in - check if the endpoint is isochronous IN
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint has isochronous transfer type and IN direction,
+ * otherwise it returns false.
+ */
+static inline int usb_endpoint_is_isoc_in(
+ const struct usb_endpoint_descriptor *epd)
+{
+ return (usb_endpoint_xfer_isoc(epd) && usb_endpoint_dir_in(epd));
+}
+
+/**
+ * usb_endpoint_is_isoc_out - check if the endpoint is isochronous OUT
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint has isochronous transfer type and OUT direction,
+ * otherwise it returns false.
+ */
+static inline int usb_endpoint_is_isoc_out(
+ const struct usb_endpoint_descriptor *epd)
+{
+ return (usb_endpoint_xfer_isoc(epd) && usb_endpoint_dir_out(epd));
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* USB_DT_SS_ENDPOINT_COMP: SuperSpeed Endpoint Companion descriptor */
+struct usb_ss_ep_comp_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+
+ __u8 bMaxBurst;
+ __u8 bmAttributes;
+ __u16 wBytesPerInterval;
+} __attribute__ ((packed));
+
+#define USB_DT_SS_EP_COMP_SIZE 6
/*-------------------------------------------------------------------------*/
@@ -573,6 +768,7 @@ enum usb_device_speed {
USB_SPEED_LOW, USB_SPEED_FULL, /* usb 1.1 */
USB_SPEED_HIGH, /* usb 2.0 */
USB_SPEED_VARIABLE, /* wireless (usb 2.5) */
+ USB_SPEED_SUPER, /* usb 3.0 */
};
enum usb_device_state {
@@ -584,8 +780,8 @@ enum usb_device_state {
/* chapter 9 and authentication (wireless) device states */
USB_STATE_ATTACHED,
USB_STATE_POWERED, /* wired */
- USB_STATE_UNAUTHENTICATED, /* auth */
USB_STATE_RECONNECTING, /* auth */
+ USB_STATE_UNAUTHENTICATED, /* auth */
USB_STATE_DEFAULT, /* limited function */
USB_STATE_ADDRESS,
USB_STATE_CONFIGURED, /* most functions */
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 935c380ffe4..4f6bb3d2160 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -124,6 +124,7 @@ struct usb_function {
void (*suspend)(struct usb_function *);
void (*resume)(struct usb_function *);
+ /* private: */
/* internals */
struct list_head list;
};
@@ -219,6 +220,7 @@ struct usb_configuration {
struct usb_composite_dev *cdev;
+ /* private: */
/* internals */
struct list_head list;
struct list_head functions;
@@ -244,6 +246,10 @@ int usb_add_config(struct usb_composite_dev *,
* value; it should return zero on successful initialization.
* @unbind: Reverses @bind(); called as a side effect of unregistering
* this driver.
+ * @suspend: Notifies when the host stops sending USB traffic,
+ * after function notifications
+ * @resume: Notifies configuration when the host restarts USB traffic,
+ * before function notifications
*
* Devices default to reporting self powered operation. Devices which rely
* on bus powered operation should report this in their @bind() method.
@@ -268,6 +274,10 @@ struct usb_composite_driver {
int (*bind)(struct usb_composite_dev *);
int (*unbind)(struct usb_composite_dev *);
+
+ /* global suspend hooks */
+ void (*suspend)(struct usb_composite_dev *);
+ void (*resume)(struct usb_composite_dev *);
};
extern int usb_composite_register(struct usb_composite_driver *);
@@ -313,6 +323,7 @@ struct usb_composite_dev {
struct usb_configuration *config;
+ /* private: */
/* internals */
struct usb_device_descriptor desc;
struct list_head configs;
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 0460a746480..bbf45d500b6 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -598,6 +598,7 @@ static inline int usb_gadget_clear_selfpowered(struct usb_gadget *gadget)
/**
* usb_gadget_vbus_connect - Notify controller that VBUS is powered
* @gadget:The device which now has VBUS power.
+ * Context: can sleep
*
* This call is used by a driver for an external transceiver (or GPIO)
* that detects a VBUS power session starting. Common responses include
@@ -636,6 +637,7 @@ static inline int usb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
/**
* usb_gadget_vbus_disconnect - notify controller about VBUS session end
* @gadget:the device whose VBUS supply is being described
+ * Context: can sleep
*
* This call is used by a driver for an external transceiver (or GPIO)
* that detects a VBUS power session ending. Common responses include
@@ -792,19 +794,20 @@ struct usb_gadget_driver {
/**
* usb_gadget_register_driver - register a gadget driver
* @driver:the driver being registered
+ * Context: can sleep
*
* Call this in your gadget driver's module initialization function,
* to tell the underlying usb controller driver about your driver.
* The driver's bind() function will be called to bind it to a
* gadget before this registration call returns. It's expected that
* the bind() functions will be in init sections.
- * This function must be called in a context that can sleep.
*/
int usb_gadget_register_driver(struct usb_gadget_driver *driver);
/**
* usb_gadget_unregister_driver - unregister a gadget driver
* @driver:the driver being unregistered
+ * Context: can sleep
*
* Call this in your gadget driver's module cleanup function,
* to tell the underlying usb controller that your driver is
@@ -813,7 +816,6 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver);
* to unbind() and clean up any device state, before this procedure
* finally returns. It's expected that the unbind() functions
* will in in exit sections, so may not be linked in some kernels.
- * This function must be called in a context that can sleep.
*/
int usb_gadget_unregister_driver(struct usb_gadget_driver *driver);
diff --git a/include/linux/usb/langwell_otg.h b/include/linux/usb/langwell_otg.h
new file mode 100644
index 00000000000..e115ae6df1d
--- /dev/null
+++ b/include/linux/usb/langwell_otg.h
@@ -0,0 +1,177 @@
+/*
+ * Intel Langwell USB OTG transceiver driver
+ * Copyright (C) 2008, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef __LANGWELL_OTG_H__
+#define __LANGWELL_OTG_H__
+
+/* notify transceiver driver about OTG events */
+extern void langwell_update_transceiver(void);
+/* HCD register bus driver */
+extern int langwell_register_host(struct pci_driver *host_driver);
+/* HCD unregister bus driver */
+extern void langwell_unregister_host(struct pci_driver *host_driver);
+/* DCD register bus driver */
+extern int langwell_register_peripheral(struct pci_driver *client_driver);
+/* DCD unregister bus driver */
+extern void langwell_unregister_peripheral(struct pci_driver *client_driver);
+/* No silent failure, output warning message */
+extern void langwell_otg_nsf_msg(unsigned long message);
+
+#define CI_USBCMD 0x30
+# define USBCMD_RST BIT(1)
+# define USBCMD_RS BIT(0)
+#define CI_USBSTS 0x34
+# define USBSTS_SLI BIT(8)
+# define USBSTS_URI BIT(6)
+# define USBSTS_PCI BIT(2)
+#define CI_PORTSC1 0x74
+# define PORTSC_PP BIT(12)
+# define PORTSC_LS (BIT(11) | BIT(10))
+# define PORTSC_SUSP BIT(7)
+# define PORTSC_CCS BIT(0)
+#define CI_HOSTPC1 0xb4
+# define HOSTPC1_PHCD BIT(22)
+#define CI_OTGSC 0xf4
+# define OTGSC_DPIE BIT(30)
+# define OTGSC_1MSE BIT(29)
+# define OTGSC_BSEIE BIT(28)
+# define OTGSC_BSVIE BIT(27)
+# define OTGSC_ASVIE BIT(26)
+# define OTGSC_AVVIE BIT(25)
+# define OTGSC_IDIE BIT(24)
+# define OTGSC_DPIS BIT(22)
+# define OTGSC_1MSS BIT(21)
+# define OTGSC_BSEIS BIT(20)
+# define OTGSC_BSVIS BIT(19)
+# define OTGSC_ASVIS BIT(18)
+# define OTGSC_AVVIS BIT(17)
+# define OTGSC_IDIS BIT(16)
+# define OTGSC_DPS BIT(14)
+# define OTGSC_1MST BIT(13)
+# define OTGSC_BSE BIT(12)
+# define OTGSC_BSV BIT(11)
+# define OTGSC_ASV BIT(10)
+# define OTGSC_AVV BIT(9)
+# define OTGSC_ID BIT(8)
+# define OTGSC_HABA BIT(7)
+# define OTGSC_HADP BIT(6)
+# define OTGSC_IDPU BIT(5)
+# define OTGSC_DP BIT(4)
+# define OTGSC_OT BIT(3)
+# define OTGSC_HAAR BIT(2)
+# define OTGSC_VC BIT(1)
+# define OTGSC_VD BIT(0)
+# define OTGSC_INTEN_MASK (0x7f << 24)
+# define OTGSC_INTSTS_MASK (0x7f << 16)
+#define CI_USBMODE 0xf8
+# define USBMODE_CM (BIT(1) | BIT(0))
+# define USBMODE_IDLE 0
+# define USBMODE_DEVICE 0x2
+# define USBMODE_HOST 0x3
+
+#define INTR_DUMMY_MASK (USBSTS_SLI | USBSTS_URI | USBSTS_PCI)
+
+struct otg_hsm {
+ /* Input */
+ int a_bus_resume;
+ int a_bus_suspend;
+ int a_conn;
+ int a_sess_vld;
+ int a_srp_det;
+ int a_vbus_vld;
+ int b_bus_resume;
+ int b_bus_suspend;
+ int b_conn;
+ int b_se0_srp;
+ int b_sess_end;
+ int b_sess_vld;
+ int id;
+
+ /* Internal variables */
+ int a_set_b_hnp_en;
+ int b_srp_done;
+ int b_hnp_enable;
+
+ /* Timeout indicator for timers */
+ int a_wait_vrise_tmout;
+ int a_wait_bcon_tmout;
+ int a_aidl_bdis_tmout;
+ int b_ase0_brst_tmout;
+ int b_bus_suspend_tmout;
+ int b_srp_res_tmout;
+
+ /* Informative variables */
+ int a_bus_drop;
+ int a_bus_req;
+ int a_clr_err;
+ int a_suspend_req;
+ int b_bus_req;
+
+ /* Output */
+ int drv_vbus;
+ int loc_conn;
+ int loc_sof;
+
+ /* Others */
+ int b_bus_suspend_vld;
+};
+
+#define TA_WAIT_VRISE 100
+#define TA_WAIT_BCON 30000
+#define TA_AIDL_BDIS 15000
+#define TB_ASE0_BRST 5000
+#define TB_SE0_SRP 2
+#define TB_SRP_RES 100
+#define TB_BUS_SUSPEND 500
+
+struct langwell_otg_timer {
+ unsigned long expires; /* Number of count increase to timeout */
+ unsigned long count; /* Tick counter */
+ void (*function)(unsigned long); /* Timeout function */
+ unsigned long data; /* Data passed to function */
+ struct list_head list;
+};
+
+struct langwell_otg {
+ struct otg_transceiver otg;
+ struct otg_hsm hsm;
+ void __iomem *regs;
+ unsigned region;
+ struct pci_driver *host_ops;
+ struct pci_driver *client_ops;
+ struct pci_dev *pdev;
+ struct work_struct work;
+ struct workqueue_struct *qwork;
+ spinlock_t lock;
+ spinlock_t wq_lock;
+};
+
+static inline struct langwell_otg *otg_to_langwell(struct otg_transceiver *otg)
+{
+ return container_of(otg, struct langwell_otg, otg);
+}
+
+#ifdef DEBUG
+#define otg_dbg(fmt, args...) \
+ printk(KERN_DEBUG fmt , ## args)
+#else
+#define otg_dbg(fmt, args...) \
+ do { } while (0)
+#endif /* DEBUG */
+#endif /* __LANGWELL_OTG_H__ */
diff --git a/include/linux/usb/langwell_udc.h b/include/linux/usb/langwell_udc.h
new file mode 100644
index 00000000000..c949178a653
--- /dev/null
+++ b/include/linux/usb/langwell_udc.h
@@ -0,0 +1,310 @@
+/*
+ * Intel Langwell USB Device Controller driver
+ * Copyright (C) 2008-2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef __LANGWELL_UDC_H
+#define __LANGWELL_UDC_H
+
+
+/* MACRO defines */
+#define CAP_REG_OFFSET 0x0
+#define OP_REG_OFFSET 0x28
+
+#define DMA_ADDR_INVALID (~(dma_addr_t)0)
+
+#define DQH_ALIGNMENT 2048
+#define DTD_ALIGNMENT 64
+#define DMA_BOUNDARY 4096
+
+#define EP0_MAX_PKT_SIZE 64
+#define EP_DIR_IN 1
+#define EP_DIR_OUT 0
+
+#define FLUSH_TIMEOUT 1000
+#define RESET_TIMEOUT 1000
+#define SETUPSTAT_TIMEOUT 100
+#define PRIME_TIMEOUT 100
+
+
+/* device memory space registers */
+
+/* Capability Registers, BAR0 + CAP_REG_OFFSET */
+struct langwell_cap_regs {
+ /* offset: 0x0 */
+ u8 caplength; /* offset of Operational Register */
+ u8 _reserved3;
+ u16 hciversion; /* H: BCD encoding of host version */
+ u32 hcsparams; /* H: host port steering logic capability */
+ u32 hccparams; /* H: host multiple mode control capability */
+#define HCC_LEN BIT(17) /* Link power management (LPM) capability */
+ u8 _reserved4[0x20-0xc];
+ /* offset: 0x20 */
+ u16 dciversion; /* BCD encoding of device version */
+ u8 _reserved5[0x24-0x22];
+ u32 dccparams; /* overall device controller capability */
+#define HOSTCAP BIT(8) /* host capable */
+#define DEVCAP BIT(7) /* device capable */
+#define DEN(d) \
+ (((d)>>0)&0x1f) /* bits 4:0, device endpoint number */
+} __attribute__ ((packed));
+
+
+/* Operational Registers, BAR0 + OP_REG_OFFSET */
+struct langwell_op_regs {
+ /* offset: 0x28 */
+ u32 extsts;
+#define EXTS_TI1 BIT(4) /* general purpose timer interrupt 1 */
+#define EXTS_TI1TI0 BIT(3) /* general purpose timer interrupt 0 */
+#define EXTS_TI1UPI BIT(2) /* USB host periodic interrupt */
+#define EXTS_TI1UAI BIT(1) /* USB host asynchronous interrupt */
+#define EXTS_TI1NAKI BIT(0) /* NAK interrupt */
+ u32 extintr;
+#define EXTI_TIE1 BIT(4) /* general purpose timer interrupt enable 1 */
+#define EXTI_TIE0 BIT(3) /* general purpose timer interrupt enable 0 */
+#define EXTI_UPIE BIT(2) /* USB host periodic interrupt enable */
+#define EXTI_UAIE BIT(1) /* USB host asynchronous interrupt enable */
+#define EXTI_NAKE BIT(0) /* NAK interrupt enable */
+ /* offset: 0x30 */
+ u32 usbcmd;
+#define CMD_HIRD(u) \
+ (((u)>>24)&0xf) /* bits 27:24, host init resume duration */
+#define CMD_ITC(u) \
+ (((u)>>16)&0xff) /* bits 23:16, interrupt threshold control */
+#define CMD_PPE BIT(15) /* per-port change events enable */
+#define CMD_ATDTW BIT(14) /* add dTD tripwire */
+#define CMD_SUTW BIT(13) /* setup tripwire */
+#define CMD_ASPE BIT(11) /* asynchronous schedule park mode enable */
+#define CMD_FS2 BIT(10) /* frame list size */
+#define CMD_ASP1 BIT(9) /* asynchronous schedule park mode count */
+#define CMD_ASP0 BIT(8)
+#define CMD_LR BIT(7) /* light host/device controller reset */
+#define CMD_IAA BIT(6) /* interrupt on async advance doorbell */
+#define CMD_ASE BIT(5) /* asynchronous schedule enable */
+#define CMD_PSE BIT(4) /* periodic schedule enable */
+#define CMD_FS1 BIT(3)
+#define CMD_FS0 BIT(2)
+#define CMD_RST BIT(1) /* controller reset */
+#define CMD_RUNSTOP BIT(0) /* run/stop */
+ u32 usbsts;
+#define STS_PPCI(u) \
+ (((u)>>16)&0xffff) /* bits 31:16, port-n change detect */
+#define STS_AS BIT(15) /* asynchronous schedule status */
+#define STS_PS BIT(14) /* periodic schedule status */
+#define STS_RCL BIT(13) /* reclamation */
+#define STS_HCH BIT(12) /* HC halted */
+#define STS_ULPII BIT(10) /* ULPI interrupt */
+#define STS_SLI BIT(8) /* DC suspend */
+#define STS_SRI BIT(7) /* SOF received */
+#define STS_URI BIT(6) /* USB reset received */
+#define STS_AAI BIT(5) /* interrupt on async advance */
+#define STS_SEI BIT(4) /* system error */
+#define STS_FRI BIT(3) /* frame list rollover */
+#define STS_PCI BIT(2) /* port change detect */
+#define STS_UEI BIT(1) /* USB error interrupt */
+#define STS_UI BIT(0) /* USB interrupt */
+ u32 usbintr;
+/* bits 31:16, per-port interrupt enable */
+#define INTR_PPCE(u) (((u)>>16)&0xffff)
+#define INTR_ULPIE BIT(10) /* ULPI enable */
+#define INTR_SLE BIT(8) /* DC sleep/suspend enable */
+#define INTR_SRE BIT(7) /* SOF received enable */
+#define INTR_URE BIT(6) /* USB reset enable */
+#define INTR_AAE BIT(5) /* interrupt on async advance enable */
+#define INTR_SEE BIT(4) /* system error enable */
+#define INTR_FRE BIT(3) /* frame list rollover enable */
+#define INTR_PCE BIT(2) /* port change detect enable */
+#define INTR_UEE BIT(1) /* USB error interrupt enable */
+#define INTR_UE BIT(0) /* USB interrupt enable */
+ u32 frindex; /* frame index */
+#define FRINDEX_MASK (0x3fff << 0)
+ u32 ctrldssegment; /* not used */
+ u32 deviceaddr;
+#define USBADR_SHIFT 25
+#define USBADR(d) \
+ (((d)>>25)&0x7f) /* bits 31:25, device address */
+#define USBADR_MASK (0x7f << 25)
+#define USBADRA BIT(24) /* device address advance */
+ u32 endpointlistaddr;/* endpoint list top memory address */
+/* bits 31:11, endpoint list pointer */
+#define EPBASE(d) (((d)>>11)&0x1fffff)
+#define ENDPOINTLISTADDR_MASK (0x1fffff << 11)
+ u32 ttctrl; /* H: TT operatin, not used */
+ /* offset: 0x50 */
+ u32 burstsize; /* burst size of data movement */
+#define TXPBURST(b) \
+ (((b)>>8)&0xff) /* bits 15:8, TX burst length */
+#define RXPBURST(b) \
+ (((b)>>0)&0xff) /* bits 7:0, RX burst length */
+ u32 txfilltuning; /* TX tuning */
+ u32 txttfilltuning; /* H: TX TT tuning */
+ u32 ic_usb; /* control the IC_USB FS/LS transceiver */
+ /* offset: 0x60 */
+ u32 ulpi_viewport; /* indirect access to ULPI PHY */
+#define ULPIWU BIT(31) /* ULPI wakeup */
+#define ULPIRUN BIT(30) /* ULPI read/write run */
+#define ULPIRW BIT(29) /* ULPI read/write control */
+#define ULPISS BIT(27) /* ULPI sync state */
+#define ULPIPORT(u) \
+ (((u)>>24)&7) /* bits 26:24, ULPI port number */
+#define ULPIADDR(u) \
+ (((u)>>16)&0xff) /* bits 23:16, ULPI data address */
+#define ULPIDATRD(u) \
+ (((u)>>8)&0xff) /* bits 15:8, ULPI data read */
+#define ULPIDATWR(u) \
+ (((u)>>0)&0xff) /* bits 7:0, ULPI date write */
+ u8 _reserved6[0x70-0x64];
+ /* offset: 0x70 */
+ u32 configflag; /* H: not used */
+ u32 portsc1; /* port status */
+#define DA(p) \
+ (((p)>>25)&0x7f) /* bits 31:25, device address */
+#define PORTS_SSTS (BIT(24) | BIT(23)) /* suspend status */
+#define PORTS_WKOC BIT(22) /* wake on over-current enable */
+#define PORTS_WKDS BIT(21) /* wake on disconnect enable */
+#define PORTS_WKCN BIT(20) /* wake on connect enable */
+#define PORTS_PTC(p) (((p)>>16)&0xf) /* bits 19:16, port test control */
+#define PORTS_PIC (BIT(15) | BIT(14)) /* port indicator control */
+#define PORTS_PO BIT(13) /* port owner */
+#define PORTS_PP BIT(12) /* port power */
+#define PORTS_LS (BIT(11) | BIT(10)) /* line status */
+#define PORTS_SLP BIT(9) /* suspend using L1 */
+#define PORTS_PR BIT(8) /* port reset */
+#define PORTS_SUSP BIT(7) /* suspend */
+#define PORTS_FPR BIT(6) /* force port resume */
+#define PORTS_OCC BIT(5) /* over-current change */
+#define PORTS_OCA BIT(4) /* over-current active */
+#define PORTS_PEC BIT(3) /* port enable/disable change */
+#define PORTS_PE BIT(2) /* port enable/disable */
+#define PORTS_CSC BIT(1) /* connect status change */
+#define PORTS_CCS BIT(0) /* current connect status */
+ u8 _reserved7[0xb4-0x78];
+ /* offset: 0xb4 */
+ u32 devlc; /* control LPM and each USB port behavior */
+/* bits 31:29, parallel transceiver select */
+#define LPM_PTS(d) (((d)>>29)&7)
+#define LPM_STS BIT(28) /* serial transceiver select */
+#define LPM_PTW BIT(27) /* parallel transceiver width */
+#define LPM_PSPD(d) (((d)>>25)&3) /* bits 26:25, port speed */
+#define LPM_PSPD_MASK (BIT(26) | BIT(25))
+#define LPM_SPEED_FULL 0
+#define LPM_SPEED_LOW 1
+#define LPM_SPEED_HIGH 2
+#define LPM_SRT BIT(24) /* shorten reset time */
+#define LPM_PFSC BIT(23) /* port force full speed connect */
+#define LPM_PHCD BIT(22) /* PHY low power suspend clock disable */
+#define LPM_STL BIT(16) /* STALL reply to LPM token */
+#define LPM_BA(d) \
+ (((d)>>1)&0x7ff) /* bits 11:1, BmAttributes */
+#define LPM_NYT_ACK BIT(0) /* NYET/ACK reply to LPM token */
+ u8 _reserved8[0xf4-0xb8];
+ /* offset: 0xf4 */
+ u32 otgsc; /* On-The-Go status and control */
+#define OTGSC_DPIE BIT(30) /* data pulse interrupt enable */
+#define OTGSC_MSE BIT(29) /* 1 ms timer interrupt enable */
+#define OTGSC_BSEIE BIT(28) /* B session end interrupt enable */
+#define OTGSC_BSVIE BIT(27) /* B session valid interrupt enable */
+#define OTGSC_ASVIE BIT(26) /* A session valid interrupt enable */
+#define OTGSC_AVVIE BIT(25) /* A VBUS valid interrupt enable */
+#define OTGSC_IDIE BIT(24) /* USB ID interrupt enable */
+#define OTGSC_DPIS BIT(22) /* data pulse interrupt status */
+#define OTGSC_MSS BIT(21) /* 1 ms timer interrupt status */
+#define OTGSC_BSEIS BIT(20) /* B session end interrupt status */
+#define OTGSC_BSVIS BIT(19) /* B session valid interrupt status */
+#define OTGSC_ASVIS BIT(18) /* A session valid interrupt status */
+#define OTGSC_AVVIS BIT(17) /* A VBUS valid interrupt status */
+#define OTGSC_IDIS BIT(16) /* USB ID interrupt status */
+#define OTGSC_DPS BIT(14) /* data bus pulsing status */
+#define OTGSC_MST BIT(13) /* 1 ms timer toggle */
+#define OTGSC_BSE BIT(12) /* B session end */
+#define OTGSC_BSV BIT(11) /* B session valid */
+#define OTGSC_ASV BIT(10) /* A session valid */
+#define OTGSC_AVV BIT(9) /* A VBUS valid */
+#define OTGSC_USBID BIT(8) /* USB ID */
+#define OTGSC_HABA BIT(7) /* hw assist B-disconnect to A-connect */
+#define OTGSC_HADP BIT(6) /* hw assist data pulse */
+#define OTGSC_IDPU BIT(5) /* ID pullup */
+#define OTGSC_DP BIT(4) /* data pulsing */
+#define OTGSC_OT BIT(3) /* OTG termination */
+#define OTGSC_HAAR BIT(2) /* hw assist auto reset */
+#define OTGSC_VC BIT(1) /* VBUS charge */
+#define OTGSC_VD BIT(0) /* VBUS discharge */
+ u32 usbmode;
+#define MODE_VBPS BIT(5) /* R/W VBUS power select */
+#define MODE_SDIS BIT(4) /* R/W stream disable mode */
+#define MODE_SLOM BIT(3) /* R/W setup lockout mode */
+#define MODE_ENSE BIT(2) /* endian select */
+#define MODE_CM(u) (((u)>>0)&3) /* bits 1:0, controller mode */
+#define MODE_IDLE 0
+#define MODE_DEVICE 2
+#define MODE_HOST 3
+ u8 _reserved9[0x100-0xfc];
+ /* offset: 0x100 */
+ u32 endptnak;
+#define EPTN(e) \
+ (((e)>>16)&0xffff) /* bits 31:16, TX endpoint NAK */
+#define EPRN(e) \
+ (((e)>>0)&0xffff) /* bits 15:0, RX endpoint NAK */
+ u32 endptnaken;
+#define EPTNE(e) \
+ (((e)>>16)&0xffff) /* bits 31:16, TX endpoint NAK enable */
+#define EPRNE(e) \
+ (((e)>>0)&0xffff) /* bits 15:0, RX endpoint NAK enable */
+ u32 endptsetupstat;
+#define SETUPSTAT_MASK (0xffff << 0) /* bits 15:0 */
+#define EP0SETUPSTAT_MASK 1
+ u32 endptprime;
+/* bits 31:16, prime endpoint transmit buffer */
+#define PETB(e) (((e)>>16)&0xffff)
+/* bits 15:0, prime endpoint receive buffer */
+#define PERB(e) (((e)>>0)&0xffff)
+ /* offset: 0x110 */
+ u32 endptflush;
+/* bits 31:16, flush endpoint transmit buffer */
+#define FETB(e) (((e)>>16)&0xffff)
+/* bits 15:0, flush endpoint receive buffer */
+#define FERB(e) (((e)>>0)&0xffff)
+ u32 endptstat;
+/* bits 31:16, endpoint transmit buffer ready */
+#define ETBR(e) (((e)>>16)&0xffff)
+/* bits 15:0, endpoint receive buffer ready */
+#define ERBR(e) (((e)>>0)&0xffff)
+ u32 endptcomplete;
+/* bits 31:16, endpoint transmit complete event */
+#define ETCE(e) (((e)>>16)&0xffff)
+/* bits 15:0, endpoint receive complete event */
+#define ERCE(e) (((e)>>0)&0xffff)
+ /* offset: 0x11c */
+ u32 endptctrl[16];
+#define EPCTRL_TXE BIT(23) /* TX endpoint enable */
+#define EPCTRL_TXR BIT(22) /* TX data toggle reset */
+#define EPCTRL_TXI BIT(21) /* TX data toggle inhibit */
+#define EPCTRL_TXT(e) (((e)>>18)&3) /* bits 19:18, TX endpoint type */
+#define EPCTRL_TXT_SHIFT 18
+#define EPCTRL_TXD BIT(17) /* TX endpoint data source */
+#define EPCTRL_TXS BIT(16) /* TX endpoint STALL */
+#define EPCTRL_RXE BIT(7) /* RX endpoint enable */
+#define EPCTRL_RXR BIT(6) /* RX data toggle reset */
+#define EPCTRL_RXI BIT(5) /* RX data toggle inhibit */
+#define EPCTRL_RXT(e) (((e)>>2)&3) /* bits 3:2, RX endpoint type */
+#define EPCTRL_RXT_SHIFT 2 /* bits 19:18, TX endpoint type */
+#define EPCTRL_RXD BIT(1) /* RX endpoint data sink */
+#define EPCTRL_RXS BIT(0) /* RX endpoint STALL */
+} __attribute__ ((packed));
+
+#endif /* __LANGWELL_UDC_H */
+
diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h
index d6aad0ea603..d4375566926 100644
--- a/include/linux/usb/musb.h
+++ b/include/linux/usb/musb.h
@@ -7,6 +7,9 @@
* key configuration differences between boards.
*/
+#ifndef __LINUX_USB_MUSB_H
+#define __LINUX_USB_MUSB_H
+
/* The USB role is defined by the connector used on the board, so long as
* standards are being followed. (Developer boards sometimes won't.)
*/
@@ -101,3 +104,5 @@ extern int __init tusb6010_setup_interface(
extern int tusb6010_platform_retime(unsigned is_refclk);
#endif /* OMAP2 */
+
+#endif /* __LINUX_USB_MUSB_H */
diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h
index 94df4fe6c6c..2443c0e7a80 100644
--- a/include/linux/usb/otg.h
+++ b/include/linux/usb/otg.h
@@ -81,11 +81,16 @@ struct otg_transceiver {
/* for board-specific init logic */
extern int otg_set_transceiver(struct otg_transceiver *);
+/* sometimes transceivers are accessed only through e.g. ULPI */
+extern void usb_nop_xceiv_register(void);
+extern void usb_nop_xceiv_unregister(void);
+
/* for usb host and peripheral controller drivers */
extern struct otg_transceiver *otg_get_transceiver(void);
extern void otg_put_transceiver(struct otg_transceiver *);
+/* Context: can sleep */
static inline int
otg_start_hnp(struct otg_transceiver *otg)
{
@@ -102,6 +107,8 @@ otg_set_host(struct otg_transceiver *otg, struct usb_bus *host)
/* for usb peripheral controller drivers */
+
+/* Context: can sleep */
static inline int
otg_set_peripheral(struct otg_transceiver *otg, struct usb_gadget *periph)
{
@@ -114,6 +121,7 @@ otg_set_power(struct otg_transceiver *otg, unsigned mA)
return otg->set_power(otg, mA);
}
+/* Context: can sleep */
static inline int
otg_set_suspend(struct otg_transceiver *otg, int suspend)
{
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index 7f6c603db65..2526f3bbd27 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -16,4 +16,7 @@
/* device can't handle Set-Interface requests */
#define USB_QUIRK_NO_SET_INTF 0x00000004
+/* device can't handle its Configuration or Interface strings */
+#define USB_QUIRK_CONFIG_INTF_STRINGS 0x00000008
+
#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/linux/usb/r8a66597.h b/include/linux/usb/r8a66597.h
new file mode 100644
index 00000000000..e9f0384fa20
--- /dev/null
+++ b/include/linux/usb/r8a66597.h
@@ -0,0 +1,44 @@
+/*
+ * R8A66597 driver platform data
+ *
+ * Copyright (C) 2009 Renesas Solutions Corp.
+ *
+ * Author : Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#ifndef __LINUX_USB_R8A66597_H
+#define __LINUX_USB_R8A66597_H
+
+#define R8A66597_PLATDATA_XTAL_12MHZ 0x01
+#define R8A66597_PLATDATA_XTAL_24MHZ 0x02
+#define R8A66597_PLATDATA_XTAL_48MHZ 0x03
+
+struct r8a66597_platdata {
+ /* This ops can controll port power instead of DVSTCTR register. */
+ void (*port_power)(int port, int power);
+
+ /* (external controller only) set R8A66597_PLATDATA_XTAL_nnMHZ */
+ unsigned xtal:2;
+
+ /* set one = 3.3V, set zero = 1.5V */
+ unsigned vif:1;
+
+ /* set one = big endian, set zero = little endian */
+ unsigned endian:1;
+};
+#endif
+
diff --git a/include/linux/usb/rndis_host.h b/include/linux/usb/rndis_host.h
index 0a6e6d4b929..37836b937d9 100644
--- a/include/linux/usb/rndis_host.h
+++ b/include/linux/usb/rndis_host.h
@@ -49,48 +49,45 @@ struct rndis_msg_hdr {
*/
#define RNDIS_CONTROL_TIMEOUT_MS (5 * 1000)
-
-#define ccpu2 __constant_cpu_to_le32
-
-#define RNDIS_MSG_COMPLETION ccpu2(0x80000000)
+#define RNDIS_MSG_COMPLETION cpu_to_le32(0x80000000)
/* codes for "msg_type" field of rndis messages;
* only the data channel uses packet messages (maybe batched);
* everything else goes on the control channel.
*/
-#define RNDIS_MSG_PACKET ccpu2(0x00000001) /* 1-N packets */
-#define RNDIS_MSG_INIT ccpu2(0x00000002)
+#define RNDIS_MSG_PACKET cpu_to_le32(0x00000001) /* 1-N packets */
+#define RNDIS_MSG_INIT cpu_to_le32(0x00000002)
#define RNDIS_MSG_INIT_C (RNDIS_MSG_INIT|RNDIS_MSG_COMPLETION)
-#define RNDIS_MSG_HALT ccpu2(0x00000003)
-#define RNDIS_MSG_QUERY ccpu2(0x00000004)
+#define RNDIS_MSG_HALT cpu_to_le32(0x00000003)
+#define RNDIS_MSG_QUERY cpu_to_le32(0x00000004)
#define RNDIS_MSG_QUERY_C (RNDIS_MSG_QUERY|RNDIS_MSG_COMPLETION)
-#define RNDIS_MSG_SET ccpu2(0x00000005)
+#define RNDIS_MSG_SET cpu_to_le32(0x00000005)
#define RNDIS_MSG_SET_C (RNDIS_MSG_SET|RNDIS_MSG_COMPLETION)
-#define RNDIS_MSG_RESET ccpu2(0x00000006)
+#define RNDIS_MSG_RESET cpu_to_le32(0x00000006)
#define RNDIS_MSG_RESET_C (RNDIS_MSG_RESET|RNDIS_MSG_COMPLETION)
-#define RNDIS_MSG_INDICATE ccpu2(0x00000007)
-#define RNDIS_MSG_KEEPALIVE ccpu2(0x00000008)
+#define RNDIS_MSG_INDICATE cpu_to_le32(0x00000007)
+#define RNDIS_MSG_KEEPALIVE cpu_to_le32(0x00000008)
#define RNDIS_MSG_KEEPALIVE_C (RNDIS_MSG_KEEPALIVE|RNDIS_MSG_COMPLETION)
/* codes for "status" field of completion messages */
-#define RNDIS_STATUS_SUCCESS ccpu2(0x00000000)
-#define RNDIS_STATUS_FAILURE ccpu2(0xc0000001)
-#define RNDIS_STATUS_INVALID_DATA ccpu2(0xc0010015)
-#define RNDIS_STATUS_NOT_SUPPORTED ccpu2(0xc00000bb)
-#define RNDIS_STATUS_MEDIA_CONNECT ccpu2(0x4001000b)
-#define RNDIS_STATUS_MEDIA_DISCONNECT ccpu2(0x4001000c)
+#define RNDIS_STATUS_SUCCESS cpu_to_le32(0x00000000)
+#define RNDIS_STATUS_FAILURE cpu_to_le32(0xc0000001)
+#define RNDIS_STATUS_INVALID_DATA cpu_to_le32(0xc0010015)
+#define RNDIS_STATUS_NOT_SUPPORTED cpu_to_le32(0xc00000bb)
+#define RNDIS_STATUS_MEDIA_CONNECT cpu_to_le32(0x4001000b)
+#define RNDIS_STATUS_MEDIA_DISCONNECT cpu_to_le32(0x4001000c)
/* codes for OID_GEN_PHYSICAL_MEDIUM */
-#define RNDIS_PHYSICAL_MEDIUM_UNSPECIFIED ccpu2(0x00000000)
-#define RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN ccpu2(0x00000001)
-#define RNDIS_PHYSICAL_MEDIUM_CABLE_MODEM ccpu2(0x00000002)
-#define RNDIS_PHYSICAL_MEDIUM_PHONE_LINE ccpu2(0x00000003)
-#define RNDIS_PHYSICAL_MEDIUM_POWER_LINE ccpu2(0x00000004)
-#define RNDIS_PHYSICAL_MEDIUM_DSL ccpu2(0x00000005)
-#define RNDIS_PHYSICAL_MEDIUM_FIBRE_CHANNEL ccpu2(0x00000006)
-#define RNDIS_PHYSICAL_MEDIUM_1394 ccpu2(0x00000007)
-#define RNDIS_PHYSICAL_MEDIUM_WIRELESS_WAN ccpu2(0x00000008)
-#define RNDIS_PHYSICAL_MEDIUM_MAX ccpu2(0x00000009)
+#define RNDIS_PHYSICAL_MEDIUM_UNSPECIFIED cpu_to_le32(0x00000000)
+#define RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN cpu_to_le32(0x00000001)
+#define RNDIS_PHYSICAL_MEDIUM_CABLE_MODEM cpu_to_le32(0x00000002)
+#define RNDIS_PHYSICAL_MEDIUM_PHONE_LINE cpu_to_le32(0x00000003)
+#define RNDIS_PHYSICAL_MEDIUM_POWER_LINE cpu_to_le32(0x00000004)
+#define RNDIS_PHYSICAL_MEDIUM_DSL cpu_to_le32(0x00000005)
+#define RNDIS_PHYSICAL_MEDIUM_FIBRE_CHANNEL cpu_to_le32(0x00000006)
+#define RNDIS_PHYSICAL_MEDIUM_1394 cpu_to_le32(0x00000007)
+#define RNDIS_PHYSICAL_MEDIUM_WIRELESS_WAN cpu_to_le32(0x00000008)
+#define RNDIS_PHYSICAL_MEDIUM_MAX cpu_to_le32(0x00000009)
struct rndis_data_hdr {
__le32 msg_type; /* RNDIS_MSG_PACKET */
@@ -228,24 +225,24 @@ struct rndis_keepalive_c { /* IN (optionally OUT) */
* there are gobs more that may optionally be supported. We'll avoid as much
* of that mess as possible.
*/
-#define OID_802_3_PERMANENT_ADDRESS ccpu2(0x01010101)
-#define OID_GEN_MAXIMUM_FRAME_SIZE ccpu2(0x00010106)
-#define OID_GEN_CURRENT_PACKET_FILTER ccpu2(0x0001010e)
-#define OID_GEN_PHYSICAL_MEDIUM ccpu2(0x00010202)
+#define OID_802_3_PERMANENT_ADDRESS cpu_to_le32(0x01010101)
+#define OID_GEN_MAXIMUM_FRAME_SIZE cpu_to_le32(0x00010106)
+#define OID_GEN_CURRENT_PACKET_FILTER cpu_to_le32(0x0001010e)
+#define OID_GEN_PHYSICAL_MEDIUM cpu_to_le32(0x00010202)
/* packet filter bits used by OID_GEN_CURRENT_PACKET_FILTER */
-#define RNDIS_PACKET_TYPE_DIRECTED ccpu2(0x00000001)
-#define RNDIS_PACKET_TYPE_MULTICAST ccpu2(0x00000002)
-#define RNDIS_PACKET_TYPE_ALL_MULTICAST ccpu2(0x00000004)
-#define RNDIS_PACKET_TYPE_BROADCAST ccpu2(0x00000008)
-#define RNDIS_PACKET_TYPE_SOURCE_ROUTING ccpu2(0x00000010)
-#define RNDIS_PACKET_TYPE_PROMISCUOUS ccpu2(0x00000020)
-#define RNDIS_PACKET_TYPE_SMT ccpu2(0x00000040)
-#define RNDIS_PACKET_TYPE_ALL_LOCAL ccpu2(0x00000080)
-#define RNDIS_PACKET_TYPE_GROUP ccpu2(0x00001000)
-#define RNDIS_PACKET_TYPE_ALL_FUNCTIONAL ccpu2(0x00002000)
-#define RNDIS_PACKET_TYPE_FUNCTIONAL ccpu2(0x00004000)
-#define RNDIS_PACKET_TYPE_MAC_FRAME ccpu2(0x00008000)
+#define RNDIS_PACKET_TYPE_DIRECTED cpu_to_le32(0x00000001)
+#define RNDIS_PACKET_TYPE_MULTICAST cpu_to_le32(0x00000002)
+#define RNDIS_PACKET_TYPE_ALL_MULTICAST cpu_to_le32(0x00000004)
+#define RNDIS_PACKET_TYPE_BROADCAST cpu_to_le32(0x00000008)
+#define RNDIS_PACKET_TYPE_SOURCE_ROUTING cpu_to_le32(0x00000010)
+#define RNDIS_PACKET_TYPE_PROMISCUOUS cpu_to_le32(0x00000020)
+#define RNDIS_PACKET_TYPE_SMT cpu_to_le32(0x00000040)
+#define RNDIS_PACKET_TYPE_ALL_LOCAL cpu_to_le32(0x00000080)
+#define RNDIS_PACKET_TYPE_GROUP cpu_to_le32(0x00001000)
+#define RNDIS_PACKET_TYPE_ALL_FUNCTIONAL cpu_to_le32(0x00002000)
+#define RNDIS_PACKET_TYPE_FUNCTIONAL cpu_to_le32(0x00004000)
+#define RNDIS_PACKET_TYPE_MAC_FRAME cpu_to_le32(0x00008000)
/* default filter used with RNDIS devices */
#define RNDIS_DEFAULT_FILTER ( \
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 0b8617a9176..44801d26a37 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -15,6 +15,7 @@
#include <linux/kref.h>
#include <linux/mutex.h>
+#include <linux/sysrq.h>
#define SERIAL_TTY_MAJOR 188 /* Nice legal number now */
#define SERIAL_TTY_MINORS 254 /* loads of devices :) */
@@ -26,10 +27,17 @@
/* parity check flag */
#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
+enum port_dev_state {
+ PORT_UNREGISTERED,
+ PORT_REGISTERING,
+ PORT_REGISTERED,
+ PORT_UNREGISTERING,
+};
+
/**
* usb_serial_port: structure for the specific ports of a device.
* @serial: pointer back to the struct usb_serial owner of this port.
- * @tty: pointer to the corresponding tty for this port.
+ * @port: pointer to the corresponding tty_port for this port.
* @lock: spinlock to grab when updating portions of this structure.
* @mutex: mutex used to synchronize serial_open() and serial_close()
* access for this port.
@@ -44,19 +52,22 @@
* @interrupt_out_endpointAddress: endpoint address for the interrupt out pipe
* for this port.
* @bulk_in_buffer: pointer to the bulk in buffer for this port.
+ * @bulk_in_size: the size of the bulk_in_buffer, in bytes.
* @read_urb: pointer to the bulk in struct urb for this port.
* @bulk_in_endpointAddress: endpoint address for the bulk in pipe for this
* port.
* @bulk_out_buffer: pointer to the bulk out buffer for this port.
* @bulk_out_size: the size of the bulk_out_buffer, in bytes.
* @write_urb: pointer to the bulk out struct urb for this port.
+ * @write_urb_busy: port`s writing status
* @bulk_out_endpointAddress: endpoint address for the bulk out pipe for this
* port.
* @write_wait: a wait_queue_head_t used by the port.
* @work: work queue entry for the line discipline waking up.
- * @open_count: number of times this port has been opened.
* @throttled: nonzero if the read urb is inactive to throttle the device
* @throttle_req: nonzero if the tty wants to throttle us
+ * @console: attached usb serial console
+ * @dev: pointer to the serial device
*
* This structure is used by the usb-serial core and drivers for the specific
* ports of a device.
@@ -88,12 +99,17 @@ struct usb_serial_port {
int write_urb_busy;
__u8 bulk_out_endpointAddress;
+ int tx_bytes_flight;
+ int urbs_in_flight;
+
wait_queue_head_t write_wait;
struct work_struct work;
char throttled;
char throttle_req;
char console;
+ unsigned long sysrq; /* sysrq timeout */
struct device dev;
+ enum port_dev_state dev_state;
};
#define to_usb_serial_port(d) container_of(d, struct usb_serial_port, dev)
@@ -130,7 +146,8 @@ struct usb_serial {
struct usb_device *dev;
struct usb_serial_driver *type;
struct usb_interface *interface;
- unsigned char disconnected;
+ unsigned char disconnected:1;
+ unsigned char suspending:1;
unsigned char minor;
unsigned char num_ports;
unsigned char num_port_pointers;
@@ -177,8 +194,10 @@ static inline void usb_set_serial_data(struct usb_serial *serial, void *data)
* This will be called when the struct usb_serial structure is fully set
* set up. Do any local initialization of the device, or any private
* memory structure allocation at this point in time.
- * @shutdown: pointer to the driver's shutdown function. This will be
- * called when the device is removed from the system.
+ * @disconnect: pointer to the driver's disconnect function. This will be
+ * called when the device is unplugged or unbound from the driver.
+ * @release: pointer to the driver's release function. This will be called
+ * when the usb_serial data structure is about to be destroyed.
* @usb_driver: pointer to the struct usb_driver that controls this
* device. This is necessary to allow dynamic ids to be added to
* the driver from sysfs.
@@ -203,12 +222,14 @@ struct usb_serial_driver {
struct device_driver driver;
struct usb_driver *usb_driver;
struct usb_dynids dynids;
+ int max_in_flight_urbs;
int (*probe)(struct usb_serial *serial, const struct usb_device_id *id);
int (*attach)(struct usb_serial *serial);
int (*calc_num_ports) (struct usb_serial *serial);
- void (*shutdown)(struct usb_serial *serial);
+ void (*disconnect)(struct usb_serial *serial);
+ void (*release)(struct usb_serial *serial);
int (*port_probe)(struct usb_serial_port *port);
int (*port_remove)(struct usb_serial_port *port);
@@ -220,8 +241,7 @@ struct usb_serial_driver {
/* Called by console with tty = NULL and by tty */
int (*open)(struct tty_struct *tty,
struct usb_serial_port *port, struct file *filp);
- void (*close)(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp);
+ void (*close)(struct usb_serial_port *port);
int (*write)(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
/* Called only by the tty layer */
@@ -237,6 +257,10 @@ struct usb_serial_driver {
int (*tiocmget)(struct tty_struct *tty, struct file *file);
int (*tiocmset)(struct tty_struct *tty, struct file *file,
unsigned int set, unsigned int clear);
+ /* Called by the tty layer for port level work. There may or may not
+ be an attached tty at this point */
+ void (*dtr_rts)(struct usb_serial_port *port, int on);
+ int (*carrier_raised)(struct usb_serial_port *port);
/* USB events */
void (*read_int_callback)(struct urb *urb);
void (*write_int_callback)(struct urb *urb);
@@ -279,8 +303,7 @@ extern int usb_serial_generic_open(struct tty_struct *tty,
struct usb_serial_port *port, struct file *filp);
extern int usb_serial_generic_write(struct tty_struct *tty,
struct usb_serial_port *port, const unsigned char *buf, int count);
-extern void usb_serial_generic_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp);
+extern void usb_serial_generic_close(struct usb_serial_port *port);
extern int usb_serial_generic_resume(struct usb_serial *serial);
extern int usb_serial_generic_write_room(struct tty_struct *tty);
extern int usb_serial_generic_chars_in_buffer(struct tty_struct *tty);
@@ -288,9 +311,16 @@ extern void usb_serial_generic_read_bulk_callback(struct urb *urb);
extern void usb_serial_generic_write_bulk_callback(struct urb *urb);
extern void usb_serial_generic_throttle(struct tty_struct *tty);
extern void usb_serial_generic_unthrottle(struct tty_struct *tty);
-extern void usb_serial_generic_shutdown(struct usb_serial *serial);
+extern void usb_serial_generic_disconnect(struct usb_serial *serial);
+extern void usb_serial_generic_release(struct usb_serial *serial);
extern int usb_serial_generic_register(int debug);
extern void usb_serial_generic_deregister(void);
+extern void usb_serial_generic_resubmit_read_urb(struct usb_serial_port *port,
+ gfp_t mem_flags);
+extern int usb_serial_handle_sysrq_char(struct usb_serial_port *port,
+ unsigned int ch);
+extern int usb_serial_handle_break(struct usb_serial_port *port);
+
extern int usb_serial_bus_register(struct usb_serial_driver *device);
extern void usb_serial_bus_deregister(struct usb_serial_driver *device);
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 7d382224307..5d44059f6d6 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -176,8 +176,14 @@ struct skb_data { /* skb->cb is one of these */
size_t length;
};
+extern int usbnet_open (struct net_device *net);
+extern int usbnet_stop (struct net_device *net);
+extern int usbnet_start_xmit (struct sk_buff *skb, struct net_device *net);
+extern void usbnet_tx_timeout (struct net_device *net);
+extern int usbnet_change_mtu (struct net_device *net, int new_mtu);
extern int usbnet_get_endpoints(struct usbnet *, struct usb_interface *);
+extern int usbnet_get_ethernet_addr(struct usbnet *, int);
extern void usbnet_defer_kevent (struct usbnet *, int);
extern void usbnet_skb_return (struct usbnet *, struct sk_buff *);
extern void usbnet_unlink_rx_urbs(struct usbnet *);
diff --git a/include/linux/usb/wusb.h b/include/linux/usb/wusb.h
index 5f401b644ed..429c631d2aa 100644
--- a/include/linux/usb/wusb.h
+++ b/include/linux/usb/wusb.h
@@ -80,8 +80,7 @@ struct wusb_ckhdid {
u8 data[16];
} __attribute__((packed));
-const static
-struct wusb_ckhdid wusb_ckhdid_zero = { .data = { 0 } };
+static const struct wusb_ckhdid wusb_ckhdid_zero = { .data = { 0 } };
#define WUSB_CKHDID_STRSIZE (3 * sizeof(struct wusb_ckhdid) + 1)
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
index 1eea1ab68dc..3d15fb9bc11 100644
--- a/include/linux/usb_usual.h
+++ b/include/linux/usb_usual.h
@@ -96,39 +96,26 @@ enum { US_DO_ALL_FLAGS };
#define US_PR_CBI 0x00 /* Control/Bulk/Interrupt */
#define US_PR_CB 0x01 /* Control/Bulk w/o interrupt */
#define US_PR_BULK 0x50 /* bulk only */
-#ifdef CONFIG_USB_STORAGE_USBAT
+
#define US_PR_USBAT 0x80 /* SCM-ATAPI bridge */
-#endif
-#ifdef CONFIG_USB_STORAGE_SDDR09
#define US_PR_EUSB_SDDR09 0x81 /* SCM-SCSI bridge for SDDR-09 */
-#endif
-#ifdef CONFIG_USB_STORAGE_SDDR55
#define US_PR_SDDR55 0x82 /* SDDR-55 (made up) */
-#endif
#define US_PR_DPCM_USB 0xf0 /* Combination CB/SDDR09 */
-#ifdef CONFIG_USB_STORAGE_FREECOM
#define US_PR_FREECOM 0xf1 /* Freecom */
-#endif
-#ifdef CONFIG_USB_STORAGE_DATAFAB
#define US_PR_DATAFAB 0xf2 /* Datafab chipsets */
-#endif
-#ifdef CONFIG_USB_STORAGE_JUMPSHOT
#define US_PR_JUMPSHOT 0xf3 /* Lexar Jumpshot */
-#endif
-#ifdef CONFIG_USB_STORAGE_ALAUDA
#define US_PR_ALAUDA 0xf4 /* Alauda chipsets */
-#endif
-#ifdef CONFIG_USB_STORAGE_KARMA
#define US_PR_KARMA 0xf5 /* Rio Karma */
-#endif
#define US_PR_DEVICE 0xff /* Use device's value */
/*
*/
+extern int usb_usual_ignore_device(struct usb_interface *intf);
+extern struct usb_device_id usb_storage_usb_ids[];
+
#ifdef CONFIG_USB_LIBUSUAL
-extern struct usb_device_id storage_usb_ids[];
extern void usb_usual_set_present(int type);
extern void usb_usual_clear_present(int type);
extern int usb_usual_check_type(const struct usb_device_id *, int type);
diff --git a/include/linux/utime.h b/include/linux/utime.h
index 640be6a1959..5cdf673afbd 100644
--- a/include/linux/utime.h
+++ b/include/linux/utime.h
@@ -4,8 +4,8 @@
#include <linux/types.h>
struct utimbuf {
- time_t actime;
- time_t modtime;
+ __kernel_time_t actime;
+ __kernel_time_t modtime;
};
#endif
diff --git a/include/linux/utsname.h b/include/linux/utsname.h
index 11232676bff..3656b300de3 100644
--- a/include/linux/utsname.h
+++ b/include/linux/utsname.h
@@ -22,12 +22,12 @@ struct old_utsname {
};
struct new_utsname {
- char sysname[65];
- char nodename[65];
- char release[65];
- char version[65];
- char machine[65];
- char domainname[65];
+ char sysname[__NEW_UTS_LEN + 1];
+ char nodename[__NEW_UTS_LEN + 1];
+ char release[__NEW_UTS_LEN + 1];
+ char version[__NEW_UTS_LEN + 1];
+ char machine[__NEW_UTS_LEN + 1];
+ char domainname[__NEW_UTS_LEN + 1];
};
#ifdef __KERNEL__
diff --git a/include/linux/video_decoder.h b/include/linux/video_decoder.h
deleted file mode 100644
index e26c0c86a6e..00000000000
--- a/include/linux/video_decoder.h
+++ /dev/null
@@ -1,48 +0,0 @@
-#ifndef _LINUX_VIDEO_DECODER_H
-#define _LINUX_VIDEO_DECODER_H
-
-#include <linux/types.h>
-
-#define HAVE_VIDEO_DECODER 1
-
-struct video_decoder_capability { /* this name is too long */
- __u32 flags;
-#define VIDEO_DECODER_PAL 1 /* can decode PAL signal */
-#define VIDEO_DECODER_NTSC 2 /* can decode NTSC */
-#define VIDEO_DECODER_SECAM 4 /* can decode SECAM */
-#define VIDEO_DECODER_AUTO 8 /* can autosense norm */
-#define VIDEO_DECODER_CCIR 16 /* CCIR-601 pixel rate (720 pixels per line) instead of square pixel rate */
- int inputs; /* number of inputs */
- int outputs; /* number of outputs */
-};
-
-/*
-DECODER_GET_STATUS returns the following flags. The only one you need is
-DECODER_STATUS_GOOD, the others are just nice things to know.
-*/
-#define DECODER_STATUS_GOOD 1 /* receiving acceptable input */
-#define DECODER_STATUS_COLOR 2 /* receiving color information */
-#define DECODER_STATUS_PAL 4 /* auto detected */
-#define DECODER_STATUS_NTSC 8 /* auto detected */
-#define DECODER_STATUS_SECAM 16 /* auto detected */
-
-struct video_decoder_init {
- unsigned char len;
- const unsigned char *data;
-};
-
-#define DECODER_GET_CAPABILITIES _IOR('d', 1, struct video_decoder_capability)
-#define DECODER_GET_STATUS _IOR('d', 2, int)
-#define DECODER_SET_NORM _IOW('d', 3, int)
-#define DECODER_SET_INPUT _IOW('d', 4, int) /* 0 <= input < #inputs */
-#define DECODER_SET_OUTPUT _IOW('d', 5, int) /* 0 <= output < #outputs */
-#define DECODER_ENABLE_OUTPUT _IOW('d', 6, int) /* boolean output enable control */
-#define DECODER_SET_PICTURE _IOW('d', 7, struct video_picture)
-#define DECODER_SET_GPIO _IOW('d', 8, int) /* switch general purpose pin */
-#define DECODER_INIT _IOW('d', 9, struct video_decoder_init) /* init internal registers at once */
-#define DECODER_SET_VBI_BYPASS _IOW('d', 10, int) /* switch vbi bypass */
-
-#define DECODER_DUMP _IO('d', 192) /* debug hook */
-
-
-#endif
diff --git a/include/linux/video_encoder.h b/include/linux/video_encoder.h
deleted file mode 100644
index b7b6423bbb8..00000000000
--- a/include/linux/video_encoder.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef _LINUX_VIDEO_ENCODER_H
-#define _LINUX_VIDEO_ENCODER_H
-
-#include <linux/types.h>
-
-struct video_encoder_capability { /* this name is too long */
- __u32 flags;
-#define VIDEO_ENCODER_PAL 1 /* can encode PAL signal */
-#define VIDEO_ENCODER_NTSC 2 /* can encode NTSC */
-#define VIDEO_ENCODER_SECAM 4 /* can encode SECAM */
-#define VIDEO_ENCODER_CCIR 16 /* CCIR-601 pixel rate (720 pixels per line) instead of square pixel rate */
- int inputs; /* number of inputs */
- int outputs; /* number of outputs */
-};
-
-#define ENCODER_GET_CAPABILITIES _IOR('e', 1, struct video_encoder_capability)
-#define ENCODER_SET_NORM _IOW('e', 2, int)
-#define ENCODER_SET_INPUT _IOW('e', 3, int) /* 0 <= input < #inputs */
-#define ENCODER_SET_OUTPUT _IOW('e', 4, int) /* 0 <= output < #outputs */
-#define ENCODER_ENABLE_OUTPUT _IOW('e', 5, int) /* boolean output enable control */
-
-
-#endif
diff --git a/include/linux/videodev.h b/include/linux/videodev.h
index 837f392fbe9..b19eab14097 100644
--- a/include/linux/videodev.h
+++ b/include/linux/videodev.h
@@ -16,6 +16,23 @@
#include <linux/ioctl.h>
#include <linux/videodev2.h>
+#if defined(__MIN_V4L1) && defined (__KERNEL__)
+
+/*
+ * Used by those V4L2 core functions that need a minimum V4L1 support,
+ * in order to allow V4L1 Compatibilty code compilation.
+ */
+
+struct video_mbuf
+{
+ int size; /* Total memory to map */
+ int frames; /* Frames */
+ int offsets[VIDEO_MAX_FRAME];
+};
+
+#define VIDIOCGMBUF _IOR('v',20, struct video_mbuf) /* Memory map buffer info */
+
+#else
#if defined(CONFIG_VIDEO_V4L1_COMPAT) || !defined (__KERNEL__)
#define VID_TYPE_CAPTURE 1 /* Can capture */
@@ -312,6 +329,7 @@ struct video_code
#define VID_PLAY_END_MARK 14
#endif /* CONFIG_VIDEO_V4L1_COMPAT */
+#endif /* __MIN_V4L1 */
#endif /* __LINUX_VIDEODEV_H */
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 5571dbe1c0a..f24eceecc5a 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -344,8 +344,11 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_SPCA508 v4l2_fourcc('S', '5', '0', '8') /* YUVY per line */
#define V4L2_PIX_FMT_SPCA561 v4l2_fourcc('S', '5', '6', '1') /* compressed GBRG bayer */
#define V4L2_PIX_FMT_PAC207 v4l2_fourcc('P', '2', '0', '7') /* compressed BGGR bayer */
+#define V4L2_PIX_FMT_MR97310A v4l2_fourcc('M', '3', '1', '0') /* compressed BGGR bayer */
+#define V4L2_PIX_FMT_SQ905C v4l2_fourcc('9', '0', '5', 'C') /* compressed RGGB bayer */
#define V4L2_PIX_FMT_PJPG v4l2_fourcc('P', 'J', 'P', 'G') /* Pixart 73xx JPEG */
-#define V4L2_PIX_FMT_YVYU v4l2_fourcc('Y', 'V', 'Y', 'U') /* 16 YVU 4:2:2 */
+#define V4L2_PIX_FMT_YVYU v4l2_fourcc('Y', 'V', 'Y', 'U') /* 16 YVU 4:2:2 */
+#define V4L2_PIX_FMT_OV518 v4l2_fourcc('O', '5', '1', '8') /* ov518 JPEG */
/*
* F O R M A T E N U M E R A T I O N
@@ -735,6 +738,11 @@ struct v4l2_input {
#define V4L2_IN_ST_NO_SIGNAL 0x00000002
#define V4L2_IN_ST_NO_COLOR 0x00000004
+/* field 'status' - sensor orientation */
+/* If sensor is mounted upside down set both bits */
+#define V4L2_IN_ST_HFLIP 0x00000010 /* Frames are flipped horizontally */
+#define V4L2_IN_ST_VFLIP 0x00000020 /* Frames are flipped vertically */
+
/* field 'status' - analog */
#define V4L2_IN_ST_NO_H_LOCK 0x00000100 /* No horizontal sync lock */
#define V4L2_IN_ST_COLOR_KILL 0x00000200 /* Color killer is active */
@@ -829,6 +837,7 @@ struct v4l2_querymenu {
#define V4L2_CTRL_FLAG_UPDATE 0x0008
#define V4L2_CTRL_FLAG_INACTIVE 0x0010
#define V4L2_CTRL_FLAG_SLIDER 0x0020
+#define V4L2_CTRL_FLAG_WRITE_ONLY 0x0040
/* Query flag, to be ORed with the control ID */
#define V4L2_CTRL_FLAG_NEXT_CTRL 0x80000000
@@ -879,8 +888,15 @@ enum v4l2_power_line_frequency {
#define V4L2_CID_BACKLIGHT_COMPENSATION (V4L2_CID_BASE+28)
#define V4L2_CID_CHROMA_AGC (V4L2_CID_BASE+29)
#define V4L2_CID_COLOR_KILLER (V4L2_CID_BASE+30)
+#define V4L2_CID_COLORFX (V4L2_CID_BASE+31)
+enum v4l2_colorfx {
+ V4L2_COLORFX_NONE = 0,
+ V4L2_COLORFX_BW = 1,
+ V4L2_COLORFX_SEPIA = 2,
+};
+
/* last CID + 1 */
-#define V4L2_CID_LASTP1 (V4L2_CID_BASE+31)
+#define V4L2_CID_LASTP1 (V4L2_CID_BASE+32)
/* MPEG-class control IDs defined by V4L2 */
#define V4L2_CID_MPEG_BASE (V4L2_CTRL_CLASS_MPEG | 0x900)
@@ -1339,6 +1355,53 @@ struct v4l2_sliced_vbi_data {
};
/*
+ * Sliced VBI data inserted into MPEG Streams
+ */
+
+/*
+ * V4L2_MPEG_STREAM_VBI_FMT_IVTV:
+ *
+ * Structure of payload contained in an MPEG 2 Private Stream 1 PES Packet in an
+ * MPEG-2 Program Pack that contains V4L2_MPEG_STREAM_VBI_FMT_IVTV Sliced VBI
+ * data
+ *
+ * Note, the MPEG-2 Program Pack and Private Stream 1 PES packet header
+ * definitions are not included here. See the MPEG-2 specifications for details
+ * on these headers.
+ */
+
+/* Line type IDs */
+#define V4L2_MPEG_VBI_IVTV_TELETEXT_B (1)
+#define V4L2_MPEG_VBI_IVTV_CAPTION_525 (4)
+#define V4L2_MPEG_VBI_IVTV_WSS_625 (5)
+#define V4L2_MPEG_VBI_IVTV_VPS (7)
+
+struct v4l2_mpeg_vbi_itv0_line {
+ __u8 id; /* One of V4L2_MPEG_VBI_IVTV_* above */
+ __u8 data[42]; /* Sliced VBI data for the line */
+} __attribute__ ((packed));
+
+struct v4l2_mpeg_vbi_itv0 {
+ __le32 linemask[2]; /* Bitmasks of VBI service lines present */
+ struct v4l2_mpeg_vbi_itv0_line line[35];
+} __attribute__ ((packed));
+
+struct v4l2_mpeg_vbi_ITV0 {
+ struct v4l2_mpeg_vbi_itv0_line line[36];
+} __attribute__ ((packed));
+
+#define V4L2_MPEG_VBI_IVTV_MAGIC0 "itv0"
+#define V4L2_MPEG_VBI_IVTV_MAGIC1 "ITV0"
+
+struct v4l2_mpeg_vbi_fmt_ivtv {
+ __u8 magic[4];
+ union {
+ struct v4l2_mpeg_vbi_itv0 itv0;
+ struct v4l2_mpeg_vbi_ITV0 ITV0;
+ };
+} __attribute__ ((packed));
+
+/*
* A G G R E G A T E S T R U C T U R E S
*/
@@ -1403,14 +1466,6 @@ struct v4l2_dbg_chip_ident {
__u32 revision; /* chip revision, chip specific */
} __attribute__ ((packed));
-/* VIDIOC_G_CHIP_IDENT_OLD: Deprecated, do not use */
-struct v4l2_chip_ident_old {
- __u32 match_type; /* Match type */
- __u32 match_chip; /* Match this chip, meaning determined by match_type */
- __u32 ident; /* chip identifier as specified in <media/v4l2-chip-ident.h> */
- __u32 revision; /* chip revision, chip specific */
-};
-
/*
* I O C T L C O D E S F O R V I D E O D E V I C E S
*
@@ -1488,8 +1543,6 @@ struct v4l2_chip_ident_old {
/* Experimental, meant for debugging, testing and internal use.
Never use this ioctl in applications! */
#define VIDIOC_DBG_G_CHIP_IDENT _IOWR('V', 81, struct v4l2_dbg_chip_ident)
-/* This is deprecated and will go away in 2.6.30 */
-#define VIDIOC_G_CHIP_IDENT_OLD _IOWR('V', 81, struct v4l2_chip_ident_old)
#endif
#define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek)
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 06005fa9e98..4fca4f5440b 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -10,14 +10,17 @@
/**
* virtqueue - a queue to register buffers for sending or receiving.
+ * @list: the chain of virtqueues for this device
* @callback: the function to call when buffers are consumed (can be NULL).
+ * @name: the name of this virtqueue (mainly for debugging)
* @vdev: the virtio device this queue was created for.
* @vq_ops: the operations for this virtqueue (see below).
* @priv: a pointer for the virtqueue implementation to use.
*/
-struct virtqueue
-{
+struct virtqueue {
+ struct list_head list;
void (*callback)(struct virtqueue *vq);
+ const char *name;
struct virtio_device *vdev;
struct virtqueue_ops *vq_ops;
void *priv;
@@ -76,15 +79,16 @@ struct virtqueue_ops {
* @dev: underlying device.
* @id: the device type identification (used to match it with a driver).
* @config: the configuration ops for this device.
+ * @vqs: the list of virtqueues for this device.
* @features: the features supported by both driver and device.
* @priv: private pointer for the driver's use.
*/
-struct virtio_device
-{
+struct virtio_device {
int index;
struct device dev;
struct virtio_device_id id;
struct virtio_config_ops *config;
+ struct list_head vqs;
/* Note that this is a Linux set_bit-style bitmap. */
unsigned long features[1];
void *priv;
@@ -99,8 +103,7 @@ void unregister_virtio_device(struct virtio_device *dev);
* @id_table: the ids serviced by this driver.
* @feature_table: an array of feature numbers supported by this device.
* @feature_table_size: number of entries in the feature table array.
- * @probe: the function to call when a device is found. Returns a token for
- * remove, or PTR_ERR().
+ * @probe: the function to call when a device is found. Returns 0 or -errno.
* @remove: the function when a device is removed.
* @config_changed: optional function to call when the device configuration
* changes; may be called in interrupt context.
diff --git a/include/linux/virtio_blk.h b/include/linux/virtio_blk.h
index 94c56d29869..be7d255fc7c 100644
--- a/include/linux/virtio_blk.h
+++ b/include/linux/virtio_blk.h
@@ -15,6 +15,10 @@
#define VIRTIO_BLK_F_GEOMETRY 4 /* Legacy geometry available */
#define VIRTIO_BLK_F_RO 5 /* Disk is read-only */
#define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available*/
+#define VIRTIO_BLK_F_SCSI 7 /* Supports scsi command passthru */
+#define VIRTIO_BLK_F_IDENTIFY 8 /* ATA IDENTIFY supported */
+
+#define VIRTIO_BLK_ID_BYTES (sizeof(__u16[256])) /* IDENTIFY DATA */
struct virtio_blk_config
{
@@ -32,6 +36,7 @@ struct virtio_blk_config
} geometry;
/* block size of device (if VIRTIO_BLK_F_BLK_SIZE) */
__u32 blk_size;
+ __u8 identify[VIRTIO_BLK_ID_BYTES];
} __attribute__((packed));
/* These two define direction. */
@@ -55,6 +60,13 @@ struct virtio_blk_outhdr
__u64 sector;
};
+struct virtio_scsi_inhdr {
+ __u32 errors;
+ __u32 data_len;
+ __u32 sense_len;
+ __u32 residual;
+};
+
/* And this is the final byte of the write scatter-gather list. */
#define VIRTIO_BLK_S_OK 0
#define VIRTIO_BLK_S_IOERR 1
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index bf8ec283b23..99f514575f6 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -29,6 +29,7 @@
#define VIRTIO_F_NOTIFY_ON_EMPTY 24
#ifdef __KERNEL__
+#include <linux/err.h>
#include <linux/virtio.h>
/**
@@ -49,15 +50,26 @@
* @set_status: write the status byte
* vdev: the virtio_device
* status: the new status byte
+ * @request_vqs: request the specified number of virtqueues
+ * vdev: the virtio_device
+ * max_vqs: the max number of virtqueues we want
+ * If supplied, must call before any virtqueues are instantiated.
+ * To modify the max number of virtqueues after request_vqs has been
+ * called, call free_vqs and then request_vqs with a new value.
+ * @free_vqs: cleanup resources allocated by request_vqs
+ * vdev: the virtio_device
+ * If supplied, must call after all virtqueues have been deleted.
* @reset: reset the device
* vdev: the virtio device
* After this, status and feature negotiation must be done again
- * @find_vq: find a virtqueue and instantiate it.
+ * @find_vqs: find virtqueues and instantiate them.
* vdev: the virtio_device
- * index: the 0-based virtqueue number in case there's more than one.
- * callback: the virqtueue callback
- * Returns the new virtqueue or ERR_PTR() (eg. -ENOENT).
- * @del_vq: free a virtqueue found by find_vq().
+ * nvqs: the number of virtqueues to find
+ * vqs: on success, includes new virtqueues
+ * callbacks: array of callbacks, for each virtqueue
+ * names: array of virtqueue names (mainly for debugging)
+ * Returns 0 on success or error status
+ * @del_vqs: free virtqueues found by find_vqs().
* @get_features: get the array of feature bits for this device.
* vdev: the virtio_device
* Returns the first 32 feature bits (all we currently need).
@@ -66,6 +78,7 @@
* This gives the final feature bits for the device: it can change
* the dev->feature bits if it wants.
*/
+typedef void vq_callback_t(struct virtqueue *);
struct virtio_config_ops
{
void (*get)(struct virtio_device *vdev, unsigned offset,
@@ -75,10 +88,11 @@ struct virtio_config_ops
u8 (*get_status)(struct virtio_device *vdev);
void (*set_status)(struct virtio_device *vdev, u8 status);
void (*reset)(struct virtio_device *vdev);
- struct virtqueue *(*find_vq)(struct virtio_device *vdev,
- unsigned index,
- void (*callback)(struct virtqueue *));
- void (*del_vq)(struct virtqueue *vq);
+ int (*find_vqs)(struct virtio_device *, unsigned nvqs,
+ struct virtqueue *vqs[],
+ vq_callback_t *callbacks[],
+ const char *names[]);
+ void (*del_vqs)(struct virtio_device *);
u32 (*get_features)(struct virtio_device *vdev);
void (*finalize_features)(struct virtio_device *vdev);
};
@@ -99,7 +113,9 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev,
if (__builtin_constant_p(fbit))
BUILD_BUG_ON(fbit >= 32);
- virtio_check_driver_offered_feature(vdev, fbit);
+ if (fbit < VIRTIO_TRANSPORT_F_START)
+ virtio_check_driver_offered_feature(vdev, fbit);
+
return test_bit(fbit, vdev->features);
}
@@ -126,5 +142,18 @@ static inline int virtio_config_buf(struct virtio_device *vdev,
vdev->config->get(vdev, offset, buf, len);
return 0;
}
+
+static inline
+struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
+ vq_callback_t *c, const char *n)
+{
+ vq_callback_t *callbacks[] = { c };
+ const char *names[] = { n };
+ struct virtqueue *vq;
+ int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names);
+ if (err < 0)
+ return ERR_PTR(err);
+ return vq;
+}
#endif /* __KERNEL__ */
#endif /* _LINUX_VIRTIO_CONFIG_H */
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 3efa86c3ecb..cec79adbe3e 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -4,6 +4,7 @@
* compatible drivers/servers. */
#include <linux/types.h>
#include <linux/virtio_config.h>
+#include <linux/if_ether.h>
/* The ID for virtio_net */
#define VIRTIO_ID_NET 1
@@ -22,11 +23,19 @@
#define VIRTIO_NET_F_HOST_ECN 13 /* Host can handle TSO[6] w/ ECN in. */
#define VIRTIO_NET_F_HOST_UFO 14 /* Host can handle UFO in. */
#define VIRTIO_NET_F_MRG_RXBUF 15 /* Host can merge receive buffers. */
+#define VIRTIO_NET_F_STATUS 16 /* virtio_net_config.status available */
+#define VIRTIO_NET_F_CTRL_VQ 17 /* Control channel available */
+#define VIRTIO_NET_F_CTRL_RX 18 /* Control channel RX mode support */
+#define VIRTIO_NET_F_CTRL_VLAN 19 /* Control channel VLAN filtering */
+
+#define VIRTIO_NET_S_LINK_UP 1 /* Link is up */
struct virtio_net_config
{
/* The config defining mac address (if VIRTIO_NET_F_MAC) */
__u8 mac[6];
+ /* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */
+ __u16 status;
} __attribute__((packed));
/* This is the first element of the scatter-gather list. If you don't
@@ -54,4 +63,67 @@ struct virtio_net_hdr_mrg_rxbuf {
__u16 num_buffers; /* Number of merged rx buffers */
};
+/*
+ * Control virtqueue data structures
+ *
+ * The control virtqueue expects a header in the first sg entry
+ * and an ack/status response in the last entry. Data for the
+ * command goes in between.
+ */
+struct virtio_net_ctrl_hdr {
+ __u8 class;
+ __u8 cmd;
+} __attribute__((packed));
+
+typedef __u8 virtio_net_ctrl_ack;
+
+#define VIRTIO_NET_OK 0
+#define VIRTIO_NET_ERR 1
+
+/*
+ * Control the RX mode, ie. promisucous and allmulti. PROMISC and
+ * ALLMULTI commands require an "out" sg entry containing a 1 byte
+ * state value, zero = disable, non-zero = enable. These commands
+ * are supported with the VIRTIO_NET_F_CTRL_RX feature.
+ */
+#define VIRTIO_NET_CTRL_RX 0
+ #define VIRTIO_NET_CTRL_RX_PROMISC 0
+ #define VIRTIO_NET_CTRL_RX_ALLMULTI 1
+
+/*
+ * Control the MAC filter table.
+ *
+ * The MAC filter table is managed by the hypervisor, the guest should
+ * assume the size is infinite. Filtering should be considered
+ * non-perfect, ie. based on hypervisor resources, the guest may
+ * received packets from sources not specified in the filter list.
+ *
+ * In addition to the class/cmd header, the TABLE_SET command requires
+ * two out scatterlists. Each contains a 4 byte count of entries followed
+ * by a concatenated byte stream of the ETH_ALEN MAC addresses. The
+ * first sg list contains unicast addresses, the second is for multicast.
+ * This functionality is present if the VIRTIO_NET_F_CTRL_RX feature
+ * is available.
+ */
+struct virtio_net_ctrl_mac {
+ __u32 entries;
+ __u8 macs[][ETH_ALEN];
+} __attribute__((packed));
+
+#define VIRTIO_NET_CTRL_MAC 1
+ #define VIRTIO_NET_CTRL_MAC_TABLE_SET 0
+
+/*
+ * Control VLAN filtering
+ *
+ * The VLAN filter table is controlled via a simple ADD/DEL interface.
+ * VLAN IDs not added may be filterd by the hypervisor. Del is the
+ * opposite of add. Both commands expect an out entry containing a 2
+ * byte VLAN ID. VLAN filterting is available with the
+ * VIRTIO_NET_F_CTRL_VLAN feature bit.
+ */
+#define VIRTIO_NET_CTRL_VLAN 2
+ #define VIRTIO_NET_CTRL_VLAN_ADD 0
+ #define VIRTIO_NET_CTRL_VLAN_DEL 1
+
#endif /* _LINUX_VIRTIO_NET_H */
diff --git a/include/linux/virtio_pci.h b/include/linux/virtio_pci.h
index cd0fd5d181a..9a3d7c48c62 100644
--- a/include/linux/virtio_pci.h
+++ b/include/linux/virtio_pci.h
@@ -47,9 +47,17 @@
/* The bit of the ISR which indicates a device configuration change. */
#define VIRTIO_PCI_ISR_CONFIG 0x2
+/* MSI-X registers: only enabled if MSI-X is enabled. */
+/* A 16-bit vector for configuration changes. */
+#define VIRTIO_MSI_CONFIG_VECTOR 20
+/* A 16-bit vector for selected queue notifications. */
+#define VIRTIO_MSI_QUEUE_VECTOR 22
+/* Vector value used to disable MSI for queue */
+#define VIRTIO_MSI_NO_VECTOR 0xffff
+
/* The remaining space is defined by each driver as the per-driver
* configuration space */
-#define VIRTIO_PCI_CONFIG 20
+#define VIRTIO_PCI_CONFIG(dev) ((dev)->msix_enabled ? 24 : 20)
/* Virtio ABI version, this must match exactly */
#define VIRTIO_PCI_ABI_VERSION 0
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index 71e03722fb5..693e0ec5afa 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -14,6 +14,8 @@
#define VRING_DESC_F_NEXT 1
/* This marks a buffer as write-only (otherwise read-only). */
#define VRING_DESC_F_WRITE 2
+/* This means the buffer contains a list of buffer descriptors. */
+#define VRING_DESC_F_INDIRECT 4
/* The Host uses this in used->flags to advise the Guest: don't kick me when
* you add a buffer. It's unreliable, so it's simply an optimization. Guest
@@ -24,6 +26,9 @@
* optimization. */
#define VRING_AVAIL_F_NO_INTERRUPT 1
+/* We support indirect buffer descriptors */
+#define VIRTIO_RING_F_INDIRECT_DESC 28
+
/* Virtio ring descriptors: 16 bytes. These can chain together via "next". */
struct vring_desc
{
@@ -119,7 +124,8 @@ struct virtqueue *vring_new_virtqueue(unsigned int num,
struct virtio_device *vdev,
void *pages,
void (*notify)(struct virtqueue *vq),
- void (*callback)(struct virtqueue *vq));
+ void (*callback)(struct virtqueue *vq),
+ const char *name);
void vring_del_virtqueue(struct virtqueue *vq);
/* Filter out transport-specific feature bits. */
void vring_transport_features(struct virtio_device *vdev);
diff --git a/include/linux/vlynq.h b/include/linux/vlynq.h
new file mode 100644
index 00000000000..8f6a95882b0
--- /dev/null
+++ b/include/linux/vlynq.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright (C) 2006, 2007 Eugene Konev <ejka@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef __VLYNQ_H__
+#define __VLYNQ_H__
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#define VLYNQ_NUM_IRQS 32
+
+struct vlynq_mapping {
+ u32 size;
+ u32 offset;
+};
+
+enum vlynq_divisor {
+ vlynq_div_auto = 0,
+ vlynq_ldiv1,
+ vlynq_ldiv2,
+ vlynq_ldiv3,
+ vlynq_ldiv4,
+ vlynq_ldiv5,
+ vlynq_ldiv6,
+ vlynq_ldiv7,
+ vlynq_ldiv8,
+ vlynq_rdiv1,
+ vlynq_rdiv2,
+ vlynq_rdiv3,
+ vlynq_rdiv4,
+ vlynq_rdiv5,
+ vlynq_rdiv6,
+ vlynq_rdiv7,
+ vlynq_rdiv8,
+ vlynq_div_external
+};
+
+struct vlynq_device_id {
+ u32 id;
+ enum vlynq_divisor divisor;
+ unsigned long driver_data;
+};
+
+struct vlynq_regs;
+struct vlynq_device {
+ u32 id, dev_id;
+ int local_irq;
+ int remote_irq;
+ enum vlynq_divisor divisor;
+ u32 regs_start, regs_end;
+ u32 mem_start, mem_end;
+ u32 irq_start, irq_end;
+ int irq;
+ int enabled;
+ struct vlynq_regs *local;
+ struct vlynq_regs *remote;
+ struct device dev;
+};
+
+struct vlynq_driver {
+ char *name;
+ struct vlynq_device_id *id_table;
+ int (*probe)(struct vlynq_device *dev, struct vlynq_device_id *id);
+ void (*remove)(struct vlynq_device *dev);
+ struct device_driver driver;
+};
+
+struct plat_vlynq_ops {
+ int (*on)(struct vlynq_device *dev);
+ void (*off)(struct vlynq_device *dev);
+};
+
+static inline struct vlynq_driver *to_vlynq_driver(struct device_driver *drv)
+{
+ return container_of(drv, struct vlynq_driver, driver);
+}
+
+static inline struct vlynq_device *to_vlynq_device(struct device *device)
+{
+ return container_of(device, struct vlynq_device, dev);
+}
+
+extern struct bus_type vlynq_bus_type;
+
+extern int __vlynq_register_driver(struct vlynq_driver *driver,
+ struct module *owner);
+
+static inline int vlynq_register_driver(struct vlynq_driver *driver)
+{
+ return __vlynq_register_driver(driver, THIS_MODULE);
+}
+
+static inline void *vlynq_get_drvdata(struct vlynq_device *dev)
+{
+ return dev_get_drvdata(&dev->dev);
+}
+
+static inline void vlynq_set_drvdata(struct vlynq_device *dev, void *data)
+{
+ dev_set_drvdata(&dev->dev, data);
+}
+
+static inline u32 vlynq_mem_start(struct vlynq_device *dev)
+{
+ return dev->mem_start;
+}
+
+static inline u32 vlynq_mem_end(struct vlynq_device *dev)
+{
+ return dev->mem_end;
+}
+
+static inline u32 vlynq_mem_len(struct vlynq_device *dev)
+{
+ return dev->mem_end - dev->mem_start + 1;
+}
+
+static inline int vlynq_virq_to_irq(struct vlynq_device *dev, int virq)
+{
+ int irq = dev->irq_start + virq;
+ if ((irq < dev->irq_start) || (irq > dev->irq_end))
+ return -EINVAL;
+
+ return irq;
+}
+
+static inline int vlynq_irq_to_virq(struct vlynq_device *dev, int irq)
+{
+ if ((irq < dev->irq_start) || (irq > dev->irq_end))
+ return -EINVAL;
+
+ return irq - dev->irq_start;
+}
+
+extern void vlynq_unregister_driver(struct vlynq_driver *driver);
+extern int vlynq_enable_device(struct vlynq_device *dev);
+extern void vlynq_disable_device(struct vlynq_device *dev);
+extern int vlynq_set_local_mapping(struct vlynq_device *dev, u32 tx_offset,
+ struct vlynq_mapping *mapping);
+extern int vlynq_set_remote_mapping(struct vlynq_device *dev, u32 tx_offset,
+ struct vlynq_mapping *mapping);
+extern int vlynq_set_local_irq(struct vlynq_device *dev, int virq);
+extern int vlynq_set_remote_irq(struct vlynq_device *dev, int virq);
+
+#endif /* __VLYNQ_H__ */
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 9c0890c7a06..a43ebec3a7b 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -95,6 +95,9 @@ extern struct vm_struct *remove_vm_area(const void *addr);
extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
struct page ***pages);
+extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
+ pgprot_t prot, struct page **pages);
+extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
extern void unmap_kernel_range(unsigned long addr, unsigned long size);
/* Allocate/destroy a 'vmalloc' VM area. */
@@ -110,5 +113,6 @@ extern long vwrite(char *buf, char *addr, unsigned long count);
*/
extern rwlock_t vmlist_lock;
extern struct vm_struct *vmlist;
+extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
#endif /* _LINUX_VMALLOC_H */
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 524cd1b28ec..81a97cf8f0a 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -36,12 +36,14 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
FOR_ALL_ZONES(PGSTEAL),
FOR_ALL_ZONES(PGSCAN_KSWAPD),
FOR_ALL_ZONES(PGSCAN_DIRECT),
+#ifdef CONFIG_NUMA
+ PGSCAN_ZONE_RECLAIM_FAILED,
+#endif
PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
PAGEOUTRUN, ALLOCSTALL, PGROTATED,
#ifdef CONFIG_HUGETLB_PAGE
HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
#endif
-#ifdef CONFIG_UNEVICTABLE_LRU
UNEVICTABLE_PGCULLED, /* culled to noreclaim list */
UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */
UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */
@@ -50,7 +52,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
UNEVICTABLE_PGCLEARED, /* on COW, page truncate */
UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
UNEVICTABLE_MLOCKFREED,
-#endif
NR_VM_EVENT_ITEMS
};
diff --git a/include/linux/w1-gpio.h b/include/linux/w1-gpio.h
index 9797fec7748..3adeff82212 100644
--- a/include/linux/w1-gpio.h
+++ b/include/linux/w1-gpio.h
@@ -18,6 +18,7 @@
struct w1_gpio_platform_data {
unsigned int pin;
unsigned int is_open_drain:1;
+ void (*enable_external_pullup)(int enable);
};
#endif /* _LINUX_W1_GPIO_H */
diff --git a/include/linux/wait.h b/include/linux/wait.h
index a210ede73b5..6788e1a4d4c 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -132,11 +132,12 @@ static inline void __remove_wait_queue(wait_queue_head_t *head,
list_del(&old->task_list);
}
-void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
- int nr_exclusive, int sync, void *key);
void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
-extern void __wake_up_locked(wait_queue_head_t *q, unsigned int mode);
-extern void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
+void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
+void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr,
+ void *key);
+void __wake_up_locked(wait_queue_head_t *q, unsigned int mode);
+void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
void __wake_up_bit(wait_queue_head_t *, void *, int);
int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
@@ -155,21 +156,17 @@ wait_queue_head_t *bit_waitqueue(void *, int);
#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
- * macro to avoid include hell
+ * Wakeup macros to be used to report events to the targets.
*/
-#define wake_up_nested(x, s) \
-do { \
- unsigned long flags; \
- \
- spin_lock_irqsave_nested(&(x)->lock, flags, (s)); \
- wake_up_locked(x); \
- spin_unlock_irqrestore(&(x)->lock, flags); \
-} while (0)
-#else
-#define wake_up_nested(x, s) wake_up(x)
-#endif
+#define wake_up_poll(x, m) \
+ __wake_up(x, TASK_NORMAL, 1, (void *) (m))
+#define wake_up_locked_poll(x, m) \
+ __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
+#define wake_up_interruptible_poll(x, m) \
+ __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
+#define wake_up_interruptible_sync_poll(x, m) \
+ __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
#define __wait_event(wq, condition) \
do { \
@@ -441,13 +438,15 @@ void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
-#define DEFINE_WAIT(name) \
+#define DEFINE_WAIT_FUNC(name, function) \
wait_queue_t name = { \
.private = current, \
- .func = autoremove_wake_function, \
+ .func = function, \
.task_list = LIST_HEAD_INIT((name).task_list), \
}
+#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
+
#define DEFINE_WAIT_BIT(name, word, bit) \
struct wait_bit_queue name = { \
.key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
diff --git a/include/linux/wimax.h b/include/linux/wimax.h
index c89de7f4e5b..4fdcc563551 100644
--- a/include/linux/wimax.h
+++ b/include/linux/wimax.h
@@ -59,7 +59,7 @@ enum {
* M - Major: change if removing or modifying an existing call.
* m - minor: change when adding a new call
*/
- WIMAX_GNL_VERSION = 00,
+ WIMAX_GNL_VERSION = 01,
/* Generic NetLink attributes */
WIMAX_GNL_ATTR_INVALID = 0x00,
WIMAX_GNL_ATTR_MAX = 10,
@@ -78,6 +78,7 @@ enum {
WIMAX_GNL_OP_RFKILL, /* Run wimax_rfkill() */
WIMAX_GNL_OP_RESET, /* Run wimax_rfkill() */
WIMAX_GNL_RE_STATE_CHANGE, /* Report: status change */
+ WIMAX_GNL_OP_STATE_GET, /* Request for current state */
};
@@ -113,6 +114,10 @@ enum {
WIMAX_GNL_RESET_IFIDX = 1,
};
+/* Atributes for wimax_state_get() */
+enum {
+ WIMAX_GNL_STGET_IFIDX = 1,
+};
/*
* Attributes for the Report State Change
diff --git a/include/linux/wimax/debug.h b/include/linux/wimax/debug.h
index ba0c49399a8..c703e034042 100644
--- a/include/linux/wimax/debug.h
+++ b/include/linux/wimax/debug.h
@@ -178,7 +178,7 @@ void __d_head(char *head, size_t head_size,
WARN_ON(1);
} else
snprintf(head, head_size, "%s %s: ",
- dev_driver_string(dev), dev->bus_id);
+ dev_driver_string(dev), dev_name(dev));
}
diff --git a/include/linux/wimax/i2400m.h b/include/linux/wimax/i2400m.h
index 74198f5bb4d..433693ef2bb 100644
--- a/include/linux/wimax/i2400m.h
+++ b/include/linux/wimax/i2400m.h
@@ -207,6 +207,7 @@ enum i2400m_pt {
I2400M_PT_TRACE, /* For device debug */
I2400M_PT_RESET_WARM, /* device reset */
I2400M_PT_RESET_COLD, /* USB[transport] reset, like reconnect */
+ I2400M_PT_EDATA, /* Extended RX data */
I2400M_PT_ILLEGAL
};
@@ -221,9 +222,51 @@ struct i2400m_pl_data_hdr {
} __attribute__((packed));
+/*
+ * Payload for an extended data packet
+ *
+ * New in fw v1.4
+ *
+ * @reorder: if this payload has to be reorder or not (and how)
+ * @cs: the type of data in the packet, as defined per (802.16e
+ * T11.13.19.1). Currently only 2 (IPv4 packet) supported.
+ *
+ * This is prefixed to each and every INCOMING DATA packet.
+ */
+struct i2400m_pl_edata_hdr {
+ __le32 reorder; /* bits defined in i2400m_ro */
+ __u8 cs;
+ __u8 reserved[11];
+} __attribute__((packed));
+
+enum i2400m_cs {
+ I2400M_CS_IPV4_0 = 0,
+ I2400M_CS_IPV4 = 2,
+};
+
+enum i2400m_ro {
+ I2400M_RO_NEEDED = 0x01,
+ I2400M_RO_TYPE = 0x03,
+ I2400M_RO_TYPE_SHIFT = 1,
+ I2400M_RO_CIN = 0x0f,
+ I2400M_RO_CIN_SHIFT = 4,
+ I2400M_RO_FBN = 0x07ff,
+ I2400M_RO_FBN_SHIFT = 8,
+ I2400M_RO_SN = 0x07ff,
+ I2400M_RO_SN_SHIFT = 21,
+};
+
+enum i2400m_ro_type {
+ I2400M_RO_TYPE_RESET = 0,
+ I2400M_RO_TYPE_PACKET,
+ I2400M_RO_TYPE_WS,
+ I2400M_RO_TYPE_PACKET_WS,
+};
+
+
/* Misc constants */
enum {
- I2400M_PL_PAD = 16, /* Payload data size alignment */
+ I2400M_PL_ALIGN = 16, /* Payload data size alignment */
I2400M_PL_SIZE_MAX = 0x3EFF,
I2400M_MAX_PLS_IN_MSG = 60,
/* protocol barkers: sync sequences; for notifications they
@@ -381,6 +424,9 @@ enum i2400m_tlv {
I2400M_TLV_RF_STATUS = 163,
I2400M_TLV_DEVICE_RESET_TYPE = 132,
I2400M_TLV_CONFIG_IDLE_PARAMETERS = 601,
+ I2400M_TLV_CONFIG_IDLE_TIMEOUT = 611,
+ I2400M_TLV_CONFIG_D2H_DATA_FORMAT = 614,
+ I2400M_TLV_CONFIG_DL_HOST_REORDER = 615,
};
@@ -509,4 +555,27 @@ struct i2400m_tlv_media_status {
__le32 media_status;
} __attribute__((packed));
+
+/* New in v1.4 */
+struct i2400m_tlv_config_idle_timeout {
+ struct i2400m_tlv_hdr hdr;
+ __le32 timeout; /* 100 to 300000 ms [5min], 100 increments
+ * 0 disabled */
+} __attribute__((packed));
+
+/* New in v1.4 -- for backward compat, will be removed */
+struct i2400m_tlv_config_d2h_data_format {
+ struct i2400m_tlv_hdr hdr;
+ __u8 format; /* 0 old format, 1 enhanced */
+ __u8 reserved[3];
+} __attribute__((packed));
+
+/* New in v1.4 */
+struct i2400m_tlv_config_dl_host_reorder {
+ struct i2400m_tlv_hdr hdr;
+ __u8 reorder; /* 0 disabled, 1 enabled */
+ __u8 reserved[3];
+} __attribute__((packed));
+
+
#endif /* #ifndef __LINUX__WIMAX__I2400M_H__ */
diff --git a/include/linux/wireless.h b/include/linux/wireless.h
index d7958f9b52c..cb24204851f 100644
--- a/include/linux/wireless.h
+++ b/include/linux/wireless.h
@@ -577,18 +577,22 @@
#define IW_AUTH_RX_UNENCRYPTED_EAPOL 8
#define IW_AUTH_ROAMING_CONTROL 9
#define IW_AUTH_PRIVACY_INVOKED 10
+#define IW_AUTH_CIPHER_GROUP_MGMT 11
+#define IW_AUTH_MFP 12
/* IW_AUTH_WPA_VERSION values (bit field) */
#define IW_AUTH_WPA_VERSION_DISABLED 0x00000001
#define IW_AUTH_WPA_VERSION_WPA 0x00000002
#define IW_AUTH_WPA_VERSION_WPA2 0x00000004
-/* IW_AUTH_PAIRWISE_CIPHER and IW_AUTH_GROUP_CIPHER values (bit field) */
+/* IW_AUTH_PAIRWISE_CIPHER, IW_AUTH_GROUP_CIPHER, and IW_AUTH_CIPHER_GROUP_MGMT
+ * values (bit field) */
#define IW_AUTH_CIPHER_NONE 0x00000001
#define IW_AUTH_CIPHER_WEP40 0x00000002
#define IW_AUTH_CIPHER_TKIP 0x00000004
#define IW_AUTH_CIPHER_CCMP 0x00000008
#define IW_AUTH_CIPHER_WEP104 0x00000010
+#define IW_AUTH_CIPHER_AES_CMAC 0x00000020
/* IW_AUTH_KEY_MGMT values (bit field) */
#define IW_AUTH_KEY_MGMT_802_1X 1
@@ -604,6 +608,11 @@
#define IW_AUTH_ROAMING_DISABLE 1 /* user space program used for roaming
* control */
+/* IW_AUTH_MFP (management frame protection) values */
+#define IW_AUTH_MFP_DISABLED 0 /* MFP disabled */
+#define IW_AUTH_MFP_OPTIONAL 1 /* MFP optional */
+#define IW_AUTH_MFP_REQUIRED 2 /* MFP required */
+
/* SIOCSIWENCODEEXT definitions */
#define IW_ENCODE_SEQ_MAX_SIZE 8
/* struct iw_encode_ext ->alg */
@@ -612,6 +621,7 @@
#define IW_ENCODE_ALG_TKIP 2
#define IW_ENCODE_ALG_CCMP 3
#define IW_ENCODE_ALG_PMK 4
+#define IW_ENCODE_ALG_AES_CMAC 5
/* struct iw_encode_ext ->ext_flags */
#define IW_ENCODE_EXT_TX_SEQ_VALID 0x00000001
#define IW_ENCODE_EXT_RX_SEQ_VALID 0x00000002
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 3cd51e579ab..13e1adf55c4 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -41,6 +41,11 @@ struct delayed_work {
struct timer_list timer;
};
+static inline struct delayed_work *to_delayed_work(struct work_struct *work)
+{
+ return container_of(work, struct delayed_work, work);
+}
+
struct execute_work {
struct work_struct work;
};
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 7300ecdc480..3224820c851 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -79,7 +79,6 @@ struct writeback_control {
void writeback_inodes(struct writeback_control *wbc);
int inode_wait(void *);
void sync_inodes_sb(struct super_block *, int wait);
-void sync_inodes(int wait);
/* writeback.h requires fs.h; it, too, is not included from here. */
static inline void wait_on_inode(struct inode *inode)
@@ -109,8 +108,8 @@ extern int dirty_background_ratio;
extern unsigned long dirty_background_bytes;
extern int vm_dirty_ratio;
extern unsigned long vm_dirty_bytes;
-extern int dirty_writeback_interval;
-extern int dirty_expire_interval;
+extern unsigned int dirty_writeback_interval;
+extern unsigned int dirty_expire_interval;
extern int vm_highmem_is_dirtyable;
extern int block_dump;
extern int laptop_mode;
diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h
index 52f3abd453a..2d4ec15abac 100644
--- a/include/linux/xfrm.h
+++ b/include/linux/xfrm.h
@@ -58,7 +58,7 @@ struct xfrm_selector
__u8 prefixlen_s;
__u8 proto;
int ifindex;
- uid_t user;
+ __kernel_uid32_t user;
};
#define XFRM_INF (~(__u64)0)