aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS2
-rw-r--r--arch/i386/Kconfig2
-rw-r--r--arch/i386/Makefile3
-rw-r--r--arch/i386/boot/compressed/relocs.c2
-rw-r--r--arch/i386/kernel/asm-offsets.c9
-rw-r--r--arch/i386/kernel/entry.S85
-rw-r--r--arch/i386/kernel/head.S5
-rw-r--r--arch/i386/kernel/paravirt.c37
-rw-r--r--arch/i386/kernel/setup.c2
-rw-r--r--arch/i386/kernel/smp.c5
-rw-r--r--arch/i386/kernel/smpboot.c8
-rw-r--r--arch/i386/kernel/syscall_table.S1
-rw-r--r--arch/i386/kernel/tsc.c23
-rw-r--r--arch/i386/kernel/vmi.c4
-rw-r--r--arch/i386/kernel/vmiclock.c6
-rw-r--r--arch/i386/kernel/vmlinux.lds.S1
-rw-r--r--arch/i386/kernel/vsyscall-note.S49
-rw-r--r--arch/i386/mach-voyager/voyager_thread.c2
-rw-r--r--arch/i386/mm/init.c3
-rw-r--r--arch/i386/mm/pageattr.c2
-rw-r--r--arch/i386/xen/Kconfig11
-rw-r--r--arch/i386/xen/Makefile4
-rw-r--r--arch/i386/xen/enlighten.c1144
-rw-r--r--arch/i386/xen/events.c590
-rw-r--r--arch/i386/xen/features.c29
-rw-r--r--arch/i386/xen/manage.c143
-rw-r--r--arch/i386/xen/mmu.c564
-rw-r--r--arch/i386/xen/mmu.h60
-rw-r--r--arch/i386/xen/multicalls.c90
-rw-r--r--arch/i386/xen/multicalls.h45
-rw-r--r--arch/i386/xen/setup.c96
-rw-r--r--arch/i386/xen/smp.c404
-rw-r--r--arch/i386/xen/time.c590
-rw-r--r--arch/i386/xen/xen-asm.S291
-rw-r--r--arch/i386/xen/xen-head.S36
-rw-r--r--arch/i386/xen/xen-ops.h71
-rw-r--r--arch/powerpc/kernel/pci-common.c7
-rw-r--r--arch/powerpc/kernel/setup_64.c12
-rw-r--r--arch/powerpc/kernel/sys_ppc32.c7
-rw-r--r--arch/sparc/Kconfig3
-rw-r--r--arch/sparc64/Kconfig3
-rw-r--r--arch/sparc64/kernel/ds.c19
-rw-r--r--arch/sparc64/kernel/mdesc.c78
-rw-r--r--arch/sparc64/kernel/vio.c94
-rw-r--r--arch/sparc64/kernel/viohs.c30
-rw-r--r--arch/x86_64/ia32/ia32entry.S1
-rw-r--r--arch/x86_64/ia32/sys_ia32.c8
-rw-r--r--arch/x86_64/kernel/early_printk.c5
-rw-r--r--arch/x86_64/kernel/mce.c2
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/thermal.c24
-rw-r--r--drivers/atm/Kconfig2
-rw-r--r--drivers/atm/eni.c19
-rw-r--r--drivers/atm/firestream.c14
-rw-r--r--drivers/atm/idt77252.c6
-rw-r--r--drivers/atm/lanai.c4
-rw-r--r--drivers/atm/nicstarmac.c2
-rw-r--r--drivers/block/Kconfig9
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/sunvdc.c97
-rw-r--r--drivers/block/xen-blkfront.c988
-rw-r--r--drivers/char/Kconfig8
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/hvc_xen.c159
-rw-r--r--drivers/macintosh/therm_pm72.c3
-rw-r--r--drivers/macintosh/windfarm_core.c3
-rw-r--r--drivers/mtd/ubi/build.c25
-rw-r--r--drivers/mtd/ubi/cdev.c49
-rw-r--r--drivers/mtd/ubi/debug.c44
-rw-r--r--drivers/mtd/ubi/debug.h2
-rw-r--r--drivers/mtd/ubi/eba.c98
-rw-r--r--drivers/mtd/ubi/gluebi.c27
-rw-r--r--drivers/mtd/ubi/io.c65
-rw-r--r--drivers/mtd/ubi/kapi.c19
-rw-r--r--drivers/mtd/ubi/misc.c4
-rw-r--r--drivers/mtd/ubi/scan.c127
-rw-r--r--drivers/mtd/ubi/scan.h2
-rw-r--r--drivers/mtd/ubi/ubi.h3
-rw-r--r--drivers/mtd/ubi/upd.c4
-rw-r--r--drivers/mtd/ubi/vmt.c53
-rw-r--r--drivers/mtd/ubi/vtbl.c85
-rw-r--r--drivers/mtd/ubi/wl.c93
-rw-r--r--drivers/net/Kconfig12
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/bnx2.c103
-rw-r--r--drivers/net/bnx2.h10
-rw-r--r--drivers/net/hamradio/baycom_epp.c2
-rw-r--r--drivers/net/pppol2tp.c18
-rw-r--r--drivers/net/sunvnet.c260
-rw-r--r--drivers/net/sunvnet.h4
-rw-r--r--drivers/net/xen-netfront.c1863
-rw-r--r--drivers/pnp/pnpbios/core.c2
-rw-r--r--drivers/sbus/char/bbc_envctrl.c5
-rw-r--r--drivers/sbus/char/envctrl.c7
-rw-r--r--drivers/xen/Makefile2
-rw-r--r--drivers/xen/grant-table.c582
-rw-r--r--drivers/xen/xenbus/Makefile7
-rw-r--r--drivers/xen/xenbus/xenbus_client.c569
-rw-r--r--drivers/xen/xenbus/xenbus_comms.c233
-rw-r--r--drivers/xen/xenbus/xenbus_comms.h46
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c935
-rw-r--r--drivers/xen/xenbus/xenbus_probe.h74
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c861
-rw-r--r--fs/Kconfig10
-rw-r--r--fs/ext4/balloc.c4
-rw-r--r--fs/ext4/extents.c682
-rw-r--r--fs/ext4/file.c1
-rw-r--r--fs/ext4/ialloc.c8
-rw-r--r--fs/ext4/inode.c118
-rw-r--r--fs/ext4/ioctl.c9
-rw-r--r--fs/ext4/namei.c76
-rw-r--r--fs/ext4/super.c50
-rw-r--r--fs/ext4/xattr.c276
-rw-r--r--fs/ext4/xattr.h17
-rw-r--r--fs/jbd2/journal.c79
-rw-r--r--fs/jbd2/recovery.c2
-rw-r--r--fs/ocfs2/heartbeat.c2
-rw-r--r--fs/open.c59
-rw-r--r--include/asm-i386/irq.h1
-rw-r--r--include/asm-i386/mach-default/irq_vectors_limits.h2
-rw-r--r--include/asm-i386/mmu_context.h2
-rw-r--r--include/asm-i386/paravirt.h22
-rw-r--r--include/asm-i386/pgalloc.h6
-rw-r--r--include/asm-i386/setup.h4
-rw-r--r--include/asm-i386/smp.h5
-rw-r--r--include/asm-i386/timer.h32
-rw-r--r--include/asm-i386/unistd.h3
-rw-r--r--include/asm-i386/vmi_time.h2
-rw-r--r--include/asm-i386/xen/hypercall.h413
-rw-r--r--include/asm-i386/xen/hypervisor.h73
-rw-r--r--include/asm-i386/xen/interface.h188
-rw-r--r--include/asm-powerpc/systbl.h1
-rw-r--r--include/asm-powerpc/unistd.h3
-rw-r--r--include/asm-sparc64/io.h5
-rw-r--r--include/asm-sparc64/mdesc.h10
-rw-r--r--include/asm-sparc64/vio.h2
-rw-r--r--include/asm-x86_64/unistd.h2
-rw-r--r--include/linux/elfnote.h22
-rw-r--r--include/linux/ext4_fs.h104
-rw-r--r--include/linux/ext4_fs_extents.h43
-rw-r--r--include/linux/ext4_fs_i.h5
-rw-r--r--include/linux/ext4_fs_sb.h3
-rw-r--r--include/linux/falloc.h6
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/jbd2.h6
-rw-r--r--include/linux/kmod.h52
-rw-r--r--include/linux/major.h2
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/linux/netfilter_ipv4/ipt_iprange.h2
-rw-r--r--include/linux/page-flags.h5
-rw-r--r--include/linux/reboot.h5
-rw-r--r--include/linux/string.h4
-rw-r--r--include/linux/syscalls.h1
-rw-r--r--include/linux/vmalloc.h4
-rw-r--r--include/mtd/ubi-header.h101
-rw-r--r--include/net/tcp.h6
-rw-r--r--include/net/xfrm.h1
-rw-r--r--include/xen/events.h48
-rw-r--r--include/xen/features.h23
-rw-r--r--include/xen/grant_table.h107
-rw-r--r--include/xen/hvc-console.h6
-rw-r--r--include/xen/interface/elfnote.h133
-rw-r--r--include/xen/interface/event_channel.h195
-rw-r--r--include/xen/interface/features.h43
-rw-r--r--include/xen/interface/grant_table.h375
-rw-r--r--include/xen/interface/io/blkif.h94
-rw-r--r--include/xen/interface/io/console.h23
-rw-r--r--include/xen/interface/io/netif.h158
-rw-r--r--include/xen/interface/io/ring.h260
-rw-r--r--include/xen/interface/io/xenbus.h44
-rw-r--r--include/xen/interface/io/xs_wire.h87
-rw-r--r--include/xen/interface/memory.h145
-rw-r--r--include/xen/interface/physdev.h145
-rw-r--r--include/xen/interface/sched.h77
-rw-r--r--include/xen/interface/vcpu.h167
-rw-r--r--include/xen/interface/version.h60
-rw-r--r--include/xen/interface/xen.h447
-rw-r--r--include/xen/page.h179
-rw-r--r--include/xen/xenbus.h234
-rw-r--r--kernel/cpuset.c2
-rw-r--r--kernel/kmod.c216
-rw-r--r--kernel/sys.c58
-rw-r--r--kernel/sysctl.c10
-rw-r--r--lib/Makefile2
-rw-r--r--lib/argv_split.c105
-rw-r--r--lib/kobject_uevent.c2
-rw-r--r--mm/util.c26
-rw-r--r--mm/vmalloc.c53
-rw-r--r--net/atm/br2684.c4
-rw-r--r--net/bridge/br_stp_if.c2
-rw-r--r--net/core/dev.c38
-rw-r--r--net/core/dev_mcast.c12
-rw-r--r--net/core/gen_estimator.c81
-rw-r--r--net/ipv4/tcp_bic.c2
-rw-r--r--net/ipv4/tcp_cong.c3
-rw-r--r--net/ipv4/tcp_cubic.c2
-rw-r--r--net/ipv4/tcp_highspeed.c2
-rw-r--r--net/ipv4/tcp_htcp.c2
-rw-r--r--net/ipv4/tcp_hybla.c4
-rw-r--r--net/ipv4/tcp_illinois.c2
-rw-r--r--net/ipv4/tcp_input.c8
-rw-r--r--net/ipv4/tcp_lp.c5
-rw-r--r--net/ipv4/tcp_scalable.c2
-rw-r--r--net/ipv4/tcp_vegas.c6
-rw-r--r--net/ipv4/tcp_veno.c6
-rw-r--r--net/ipv4/tcp_yeah.c2
-rw-r--r--net/irda/af_irda.c2
-rw-r--r--net/irda/irda_device.c4
-rw-r--r--net/irda/iriap.c2
-rw-r--r--net/irda/irias_object.c43
-rw-r--r--net/irda/irlap.c2
-rw-r--r--net/irda/irlmp.c2
-rw-r--r--net/irda/irproc.c2
-rw-r--r--net/irda/irsysctl.c2
-rw-r--r--net/irda/irttp.c2
-rw-r--r--net/netfilter/Kconfig1
-rw-r--r--net/netlink/af_netlink.c5
-rw-r--r--net/sched/Kconfig6
-rw-r--r--net/sched/sch_atm.c3
-rw-r--r--net/xfrm/xfrm_policy.c2
-rw-r--r--security/keys/request_key.c3
221 files changed, 17347 insertions, 1313 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index a9615a567da..1698dbbc737 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2393,7 +2393,7 @@ P: Artem Bityutskiy
M: dedekind@infradead.org
W: http://www.linux-mtd.infradead.org/
L: linux-mtd@lists.infradead.org
-T: git git://git.infradead.org/ubi-2.6.git
+T: git git://git.infradead.org/~dedekind/ubi-2.6.git
S: Maintained
MICROTEK X6 SCANNER
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index c7c9c2a15fa..7a11b905ef4 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -222,6 +222,8 @@ config PARAVIRT
However, when run without a hypervisor the kernel is
theoretically slower. If in doubt, say N.
+source "arch/i386/xen/Kconfig"
+
config VMI
bool "VMI Paravirt-ops support"
depends on PARAVIRT
diff --git a/arch/i386/Makefile b/arch/i386/Makefile
index 181cc29a7c4..01f0ff0daaf 100644
--- a/arch/i386/Makefile
+++ b/arch/i386/Makefile
@@ -93,6 +93,9 @@ mflags-$(CONFIG_X86_ES7000) := -Iinclude/asm-i386/mach-es7000
mcore-$(CONFIG_X86_ES7000) := mach-default
core-$(CONFIG_X86_ES7000) := arch/i386/mach-es7000/
+# Xen paravirtualization support
+core-$(CONFIG_XEN) += arch/i386/xen/
+
# default subarch .h files
mflags-y += -Iinclude/asm-i386/mach-default
diff --git a/arch/i386/boot/compressed/relocs.c b/arch/i386/boot/compressed/relocs.c
index ce4fda261aa..b0e21c3cee5 100644
--- a/arch/i386/boot/compressed/relocs.c
+++ b/arch/i386/boot/compressed/relocs.c
@@ -31,6 +31,8 @@ static const char* safe_abs_relocs[] = {
"__kernel_rt_sigreturn",
"__kernel_sigreturn",
"SYSENTER_RETURN",
+ "xen_irq_disable_direct_reloc",
+ "xen_save_fl_direct_reloc",
};
static int is_safe_abs_reloc(const char* sym_name)
diff --git a/arch/i386/kernel/asm-offsets.c b/arch/i386/kernel/asm-offsets.c
index 27a776c9044..25f7eb51392 100644
--- a/arch/i386/kernel/asm-offsets.c
+++ b/arch/i386/kernel/asm-offsets.c
@@ -17,6 +17,8 @@
#include <asm/thread_info.h>
#include <asm/elf.h>
+#include <xen/interface/xen.h>
+
#define DEFINE(sym, val) \
asm volatile("\n->" #sym " %0 " #val : : "i" (val))
@@ -59,6 +61,7 @@ void foo(void)
OFFSET(TI_addr_limit, thread_info, addr_limit);
OFFSET(TI_restart_block, thread_info, restart_block);
OFFSET(TI_sysenter_return, thread_info, sysenter_return);
+ OFFSET(TI_cpu, thread_info, cpu);
BLANK();
OFFSET(GDS_size, Xgt_desc_struct, size);
@@ -115,4 +118,10 @@ void foo(void)
OFFSET(PARAVIRT_iret, paravirt_ops, iret);
OFFSET(PARAVIRT_read_cr0, paravirt_ops, read_cr0);
#endif
+
+#ifdef CONFIG_XEN
+ BLANK();
+ OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
+ OFFSET(XEN_vcpu_info_pending, vcpu_info, evtchn_upcall_pending);
+#endif
}
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
index 3c3c220488c..32980b83493 100644
--- a/arch/i386/kernel/entry.S
+++ b/arch/i386/kernel/entry.S
@@ -1023,6 +1023,91 @@ ENTRY(kernel_thread_helper)
CFI_ENDPROC
ENDPROC(kernel_thread_helper)
+#ifdef CONFIG_XEN
+ENTRY(xen_hypervisor_callback)
+ CFI_STARTPROC
+ pushl $0
+ CFI_ADJUST_CFA_OFFSET 4
+ SAVE_ALL
+ TRACE_IRQS_OFF
+
+ /* Check to see if we got the event in the critical
+ region in xen_iret_direct, after we've reenabled
+ events and checked for pending events. This simulates
+ iret instruction's behaviour where it delivers a
+ pending interrupt when enabling interrupts. */
+ movl PT_EIP(%esp),%eax
+ cmpl $xen_iret_start_crit,%eax
+ jb 1f
+ cmpl $xen_iret_end_crit,%eax
+ jae 1f
+
+ call xen_iret_crit_fixup
+
+1: mov %esp, %eax
+ call xen_evtchn_do_upcall
+ jmp ret_from_intr
+ CFI_ENDPROC
+ENDPROC(xen_hypervisor_callback)
+
+# Hypervisor uses this for application faults while it executes.
+# We get here for two reasons:
+# 1. Fault while reloading DS, ES, FS or GS
+# 2. Fault while executing IRET
+# Category 1 we fix up by reattempting the load, and zeroing the segment
+# register if the load fails.
+# Category 2 we fix up by jumping to do_iret_error. We cannot use the
+# normal Linux return path in this case because if we use the IRET hypercall
+# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
+# We distinguish between categories by maintaining a status value in EAX.
+ENTRY(xen_failsafe_callback)
+ CFI_STARTPROC
+ pushl %eax
+ CFI_ADJUST_CFA_OFFSET 4
+ movl $1,%eax
+1: mov 4(%esp),%ds
+2: mov 8(%esp),%es
+3: mov 12(%esp),%fs
+4: mov 16(%esp),%gs
+ testl %eax,%eax
+ popl %eax
+ CFI_ADJUST_CFA_OFFSET -4
+ lea 16(%esp),%esp
+ CFI_ADJUST_CFA_OFFSET -16
+ jz 5f
+ addl $16,%esp
+ jmp iret_exc # EAX != 0 => Category 2 (Bad IRET)
+5: pushl $0 # EAX == 0 => Category 1 (Bad segment)
+ CFI_ADJUST_CFA_OFFSET 4
+ SAVE_ALL
+ jmp ret_from_exception
+ CFI_ENDPROC
+
+.section .fixup,"ax"
+6: xorl %eax,%eax
+ movl %eax,4(%esp)
+ jmp 1b
+7: xorl %eax,%eax
+ movl %eax,8(%esp)
+ jmp 2b
+8: xorl %eax,%eax
+ movl %eax,12(%esp)
+ jmp 3b
+9: xorl %eax,%eax
+ movl %eax,16(%esp)
+ jmp 4b
+.previous
+.section __ex_table,"a"
+ .align 4
+ .long 1b,6b
+ .long 2b,7b
+ .long 3b,8b
+ .long 4b,9b
+.previous
+ENDPROC(xen_failsafe_callback)
+
+#endif /* CONFIG_XEN */
+
.section .rodata,"a"
#include "syscall_table.S"
diff --git a/arch/i386/kernel/head.S b/arch/i386/kernel/head.S
index 82714668d43..7c52b222207 100644
--- a/arch/i386/kernel/head.S
+++ b/arch/i386/kernel/head.S
@@ -510,7 +510,8 @@ ENTRY(_stext)
/*
* BSS section
*/
-.section ".bss.page_aligned","w"
+.section ".bss.page_aligned","wa"
+ .align PAGE_SIZE_asm
ENTRY(swapper_pg_dir)
.fill 1024,4,0
ENTRY(swapper_pg_pmd)
@@ -538,6 +539,8 @@ fault_msg:
.ascii "Int %d: CR2 %p err %p EIP %p CS %p flags %p\n"
.asciz "Stack: %p %p %p %p %p %p %p %p\n"
+#include "../xen/xen-head.S"
+
/*
* The IDT and GDT 'descriptors' are a strange 48-bit object
* only used by the lidt and lgdt instructions. They are not
diff --git a/arch/i386/kernel/paravirt.c b/arch/i386/kernel/paravirt.c
index faab09abca5..53f07a8275e 100644
--- a/arch/i386/kernel/paravirt.c
+++ b/arch/i386/kernel/paravirt.c
@@ -228,6 +228,41 @@ static int __init print_banner(void)
}
core_initcall(print_banner);
+static struct resource reserve_ioports = {
+ .start = 0,
+ .end = IO_SPACE_LIMIT,
+ .name = "paravirt-ioport",
+ .flags = IORESOURCE_IO | IORESOURCE_BUSY,
+};
+
+static struct resource reserve_iomem = {
+ .start = 0,
+ .end = -1,
+ .name = "paravirt-iomem",
+ .flags = IORESOURCE_MEM | IORESOURCE_BUSY,
+};
+
+/*
+ * Reserve the whole legacy IO space to prevent any legacy drivers
+ * from wasting time probing for their hardware. This is a fairly
+ * brute-force approach to disabling all non-virtual drivers.
+ *
+ * Note that this must be called very early to have any effect.
+ */
+int paravirt_disable_iospace(void)
+{
+ int ret;
+
+ ret = request_resource(&ioport_resource, &reserve_ioports);
+ if (ret == 0) {
+ ret = request_resource(&iomem_resource, &reserve_iomem);
+ if (ret)
+ release_resource(&reserve_ioports);
+ }
+
+ return ret;
+}
+
struct paravirt_ops paravirt_ops = {
.name = "bare hardware",
.paravirt_enabled = 0,
@@ -267,7 +302,7 @@ struct paravirt_ops paravirt_ops = {
.write_msr = native_write_msr_safe,
.read_tsc = native_read_tsc,
.read_pmc = native_read_pmc,
- .get_scheduled_cycles = native_read_tsc,
+ .sched_clock = native_sched_clock,
.get_cpu_khz = native_calculate_cpu_khz,
.load_tr_desc = native_load_tr_desc,
.set_ldt = native_set_ldt,
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index 2d61e65eeb5..74871d066c2 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -601,6 +601,8 @@ void __init setup_arch(char **cmdline_p)
* NOTE: at this point the bootmem allocator is fully available.
*/
+ paravirt_post_allocator_init();
+
dmi_scan_machine();
#ifdef CONFIG_X86_GENERICARCH
diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c
index 6299c080f6e..2d35d850202 100644
--- a/arch/i386/kernel/smp.c
+++ b/arch/i386/kernel/smp.c
@@ -22,6 +22,7 @@
#include <asm/mtrr.h>
#include <asm/tlbflush.h>
+#include <asm/mmu_context.h>
#include <mach_apic.h>
/*
@@ -249,13 +250,13 @@ static unsigned long flush_va;
static DEFINE_SPINLOCK(tlbstate_lock);
/*
- * We cannot call mmdrop() because we are in interrupt context,
+ * We cannot call mmdrop() because we are in interrupt context,
* instead update mm->cpu_vm_mask.
*
* We need to reload %cr3 since the page tables may be going
* away from under us..
*/
-static inline void leave_mm (unsigned long cpu)
+void leave_mm(unsigned long cpu)
{
if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
BUG();
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index 0b2954534b8..5910d3fac56 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -148,7 +148,7 @@ void __init smp_alloc_memory(void)
* a given CPU
*/
-static void __cpuinit smp_store_cpu_info(int id)
+void __cpuinit smp_store_cpu_info(int id)
{
struct cpuinfo_x86 *c = cpu_data + id;
@@ -308,8 +308,7 @@ cpumask_t cpu_coregroup_map(int cpu)
/* representing cpus for which sibling maps can be computed */
static cpumask_t cpu_sibling_setup_map;
-static inline void
-set_cpu_sibling_map(int cpu)
+void set_cpu_sibling_map(int cpu)
{
int i;
struct cpuinfo_x86 *c = cpu_data;
@@ -1144,8 +1143,7 @@ void __init native_smp_prepare_boot_cpu(void)
}
#ifdef CONFIG_HOTPLUG_CPU
-static void
-remove_siblinginfo(int cpu)
+void remove_siblinginfo(int cpu)
{
int sibling;
struct cpuinfo_x86 *c = cpu_data;
diff --git a/arch/i386/kernel/syscall_table.S b/arch/i386/kernel/syscall_table.S
index bf6adce5226..8344c70adf6 100644
--- a/arch/i386/kernel/syscall_table.S
+++ b/arch/i386/kernel/syscall_table.S
@@ -323,3 +323,4 @@ ENTRY(sys_call_table)
.long sys_signalfd
.long sys_timerfd
.long sys_eventfd
+ .long sys_fallocate
diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c
index ea63a30ca3e..252f9010f28 100644
--- a/arch/i386/kernel/tsc.c
+++ b/arch/i386/kernel/tsc.c
@@ -84,7 +84,7 @@ static inline int check_tsc_unstable(void)
*
* -johnstul@us.ibm.com "math is hard, lets go shopping!"
*/
-static unsigned long cyc2ns_scale __read_mostly;
+unsigned long cyc2ns_scale __read_mostly;
#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
@@ -93,15 +93,10 @@ static inline void set_cyc2ns_scale(unsigned long cpu_khz)
cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
}
-static inline unsigned long long cycles_2_ns(unsigned long long cyc)
-{
- return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
-}
-
/*
* Scheduler clock - returns current time in nanosec units.
*/
-unsigned long long sched_clock(void)
+unsigned long long native_sched_clock(void)
{
unsigned long long this_offset;
@@ -118,12 +113,24 @@ unsigned long long sched_clock(void)
return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
/* read the Time Stamp Counter: */
- get_scheduled_cycles(this_offset);
+ rdtscll(this_offset);
/* return the value in ns */
return cycles_2_ns(this_offset);
}
+/* We need to define a real function for sched_clock, to override the
+ weak default version */
+#ifdef CONFIG_PARAVIRT
+unsigned long long sched_clock(void)
+{
+ return paravirt_sched_clock();
+}
+#else
+unsigned long long sched_clock(void)
+ __attribute__((alias("native_sched_clock")));
+#endif
+
unsigned long native_calculate_cpu_khz(void)
{
unsigned long long start, end;
diff --git a/arch/i386/kernel/vmi.c b/arch/i386/kernel/vmi.c
index c12720d7cbc..72042bb7ec9 100644
--- a/arch/i386/kernel/vmi.c
+++ b/arch/i386/kernel/vmi.c
@@ -362,7 +362,7 @@ static void *vmi_kmap_atomic_pte(struct page *page, enum km_type type)
}
#endif
-static void vmi_allocate_pt(u32 pfn)
+static void vmi_allocate_pt(struct mm_struct *mm, u32 pfn)
{
vmi_set_page_type(pfn, VMI_PAGE_L1);
vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0);
@@ -891,7 +891,7 @@ static inline int __init activate_vmi(void)
paravirt_ops.setup_boot_clock = vmi_time_bsp_init;
paravirt_ops.setup_secondary_clock = vmi_time_ap_init;
#endif
- paravirt_ops.get_scheduled_cycles = vmi_get_sched_cycles;
+ paravirt_ops.sched_clock = vmi_sched_clock;
paravirt_ops.get_cpu_khz = vmi_cpu_khz;
/* We have true wallclock functions; disable CMOS clock sync */
diff --git a/arch/i386/kernel/vmiclock.c b/arch/i386/kernel/vmiclock.c
index 26a37f8a876..f9b845f4e69 100644
--- a/arch/i386/kernel/vmiclock.c
+++ b/arch/i386/kernel/vmiclock.c
@@ -64,10 +64,10 @@ int vmi_set_wallclock(unsigned long now)
return 0;
}
-/* paravirt_ops.get_scheduled_cycles = vmi_get_sched_cycles */
-unsigned long long vmi_get_sched_cycles(void)
+/* paravirt_ops.sched_clock = vmi_sched_clock */
+unsigned long long vmi_sched_clock(void)
{
- return vmi_timer_ops.get_cycle_counter(VMI_CYCLES_AVAILABLE);
+ return cycles_2_ns(vmi_timer_ops.get_cycle_counter(VMI_CYCLES_AVAILABLE));
}
/* paravirt_ops.get_cpu_khz = vmi_cpu_khz */
diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S
index aa87b06c7c8..00f1bc47d3a 100644
--- a/arch/i386/kernel/vmlinux.lds.S
+++ b/arch/i386/kernel/vmlinux.lds.S
@@ -88,6 +88,7 @@ SECTIONS
. = ALIGN(4096);
.data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
+ *(.data.page_aligned)
*(.data.idt)
}
diff --git a/arch/i386/kernel/vsyscall-note.S b/arch/i386/kernel/vsyscall-note.S
index d4b5be4f3d5..271f16a8ca0 100644
--- a/arch/i386/kernel/vsyscall-note.S
+++ b/arch/i386/kernel/vsyscall-note.S
@@ -3,23 +3,40 @@
* Here we can supply some information useful to userland.
*/
-#include <linux/uts.h>
#include <linux/version.h>
+#include <linux/elfnote.h>
-#define ASM_ELF_NOTE_BEGIN(name, flags, vendor, type) \
- .section name, flags; \
- .balign 4; \
- .long 1f - 0f; /* name length */ \
- .long 3f - 2f; /* data length */ \
- .long type; /* note type */ \
-0: .asciz vendor; /* vendor name */ \
-1: .balign 4; \
-2:
+/* Ideally this would use UTS_NAME, but using a quoted string here
+ doesn't work. Remember to change this when changing the
+ kernel's name. */
+ELFNOTE_START(Linux, 0, "a")
+ .long LINUX_VERSION_CODE
+ELFNOTE_END
-#define ASM_ELF_NOTE_END \
-3: .balign 4; /* pad out section */ \
- .previous
+#ifdef CONFIG_XEN
- ASM_ELF_NOTE_BEGIN(".note.kernel-version", "a", UTS_SYSNAME, 0)
- .long LINUX_VERSION_CODE
- ASM_ELF_NOTE_END
+/*
+ * Add a special note telling glibc's dynamic linker a fake hardware
+ * flavor that it will use to choose the search path for libraries in the
+ * same way it uses real hardware capabilities like "mmx".
+ * We supply "nosegneg" as the fake capability, to indicate that we
+ * do not like negative offsets in instructions using segment overrides,
+ * since we implement those inefficiently. This makes it possible to
+ * install libraries optimized to avoid those access patterns in someplace
+ * like /lib/i686/tls/nosegneg. Note that an /etc/ld.so.conf.d/file
+ * corresponding to the bits here is needed to make ldconfig work right.
+ * It should contain:
+ * hwcap 1 nosegneg
+ * to match the mapping of bit to name that we give here.
+ */
+
+/* Bit used for the pseudo-hwcap for non-negative segments. We use
+ bit 1 to avoid bugs in some versions of glibc when bit 0 is
+ used; the choice is otherwise arbitrary. */
+#define VDSO_NOTE_NONEGSEG_BIT 1
+
+ELFNOTE_START(GNU, 2, "a")
+ .long 1, 1<<VDSO_NOTE_NONEGSEG_BIT /* ncaps, mask */
+ .byte VDSO_NOTE_NONEGSEG_BIT; .asciz "nosegneg" /* bit, name */
+ELFNOTE_END
+#endif
diff --git a/arch/i386/mach-voyager/voyager_thread.c b/arch/i386/mach-voyager/voyager_thread.c
index b4b24e0e45e..f9d59533815 100644
--- a/arch/i386/mach-voyager/voyager_thread.c
+++ b/arch/i386/mach-voyager/voyager_thread.c
@@ -52,7 +52,7 @@ execute(const char *string)
NULL,
};
- if ((ret = call_usermodehelper(argv[0], argv, envp, 1)) != 0) {
+ if ((ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC)) != 0) {
printk(KERN_ERR "Voyager failed to run \"%s\": %i\n",
string, ret);
}
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index 7135946d366..6a68b1ae061 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -87,7 +87,7 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
- paravirt_alloc_pt(__pa(page_table) >> PAGE_SHIFT);
+ paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT);
set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
BUG_ON(page_table != pte_offset_kernel(pmd, 0));
}
@@ -473,6 +473,7 @@ void zap_low_mappings (void)
static int disable_nx __initdata = 0;
u64 __supported_pte_mask __read_mostly = ~_PAGE_NX;
+EXPORT_SYMBOL_GPL(__supported_pte_mask);
/*
* noexec = on|off
diff --git a/arch/i386/mm/pageattr.c b/arch/i386/mm/pageattr.c
index 2eb14a73be9..37992ffb163 100644
--- a/arch/i386/mm/pageattr.c
+++ b/arch/i386/mm/pageattr.c
@@ -60,7 +60,7 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot,
address = __pa(address);
addr = address & LARGE_PAGE_MASK;
pbase = (pte_t *)page_address(base);
- paravirt_alloc_pt(page_to_pfn(base));
+ paravirt_alloc_pt(&init_mm, page_to_pfn(base));
for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT,
addr == address ? prot : ref_prot));
diff --git a/arch/i386/xen/Kconfig b/arch/i386/xen/Kconfig
new file mode 100644
index 00000000000..9df99e1885a
--- /dev/null
+++ b/arch/i386/xen/Kconfig
@@ -0,0 +1,11 @@
+#
+# This Kconfig describes xen options
+#
+
+config XEN
+ bool "Enable support for Xen hypervisor"
+ depends on PARAVIRT && X86_CMPXCHG && X86_TSC && !NEED_MULTIPLE_NODES
+ help
+ This is the Linux Xen port. Enabling this will allow the
+ kernel to boot in a paravirtualized environment under the
+ Xen hypervisor.
diff --git a/arch/i386/xen/Makefile b/arch/i386/xen/Makefile
new file mode 100644
index 00000000000..343df246bd3
--- /dev/null
+++ b/arch/i386/xen/Makefile
@@ -0,0 +1,4 @@
+obj-y := enlighten.o setup.o features.o multicalls.o mmu.o \
+ events.o time.o manage.o xen-asm.o
+
+obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/i386/xen/enlighten.c b/arch/i386/xen/enlighten.c
new file mode 100644
index 00000000000..9a8c1181c00
--- /dev/null
+++ b/arch/i386/xen/enlighten.c
@@ -0,0 +1,1144 @@
+/*
+ * Core of Xen paravirt_ops implementation.
+ *
+ * This file contains the xen_paravirt_ops structure itself, and the
+ * implementations for:
+ * - privileged instructions
+ * - interrupt flags
+ * - segment operations
+ * - booting and setup
+ *
+ * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/preempt.h>
+#include <linux/hardirq.h>
+#include <linux/percpu.h>
+#include <linux/delay.h>
+#include <linux/start_kernel.h>
+#include <linux/sched.h>
+#include <linux/bootmem.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/page-flags.h>
+#include <linux/highmem.h>
+#include <linux/smp.h>
+
+#include <xen/interface/xen.h>
+#include <xen/interface/physdev.h>
+#include <xen/interface/vcpu.h>
+#include <xen/interface/sched.h>
+#include <xen/features.h>
+#include <xen/page.h>
+
+#include <asm/paravirt.h>
+#include <asm/page.h>
+#include <asm/xen/hypercall.h>
+#include <asm/xen/hypervisor.h>
+#include <asm/fixmap.h>
+#include <asm/processor.h>
+#include <asm/setup.h>
+#include <asm/desc.h>
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+#include <asm/reboot.h>
+
+#include "xen-ops.h"
+#include "mmu.h"
+#include "multicalls.h"
+
+EXPORT_SYMBOL_GPL(hypercall_page);
+
+DEFINE_PER_CPU(enum paravirt_lazy_mode, xen_lazy_mode);
+
+DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
+DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
+DEFINE_PER_CPU(unsigned long, xen_cr3);
+
+struct start_info *xen_start_info;
+EXPORT_SYMBOL_GPL(xen_start_info);
+
+static /* __initdata */ struct shared_info dummy_shared_info;
+
+/*
+ * Point at some empty memory to start with. We map the real shared_info
+ * page as soon as fixmap is up and running.
+ */
+struct shared_info *HYPERVISOR_shared_info = (void *)&dummy_shared_info;
+
+/*
+ * Flag to determine whether vcpu info placement is available on all
+ * VCPUs. We assume it is to start with, and then set it to zero on
+ * the first failure. This is because it can succeed on some VCPUs
+ * and not others, since it can involve hypervisor memory allocation,
+ * or because the guest failed to guarantee all the appropriate
+ * constraints on all VCPUs (ie buffer can't cross a page boundary).
+ *
+ * Note that any particular CPU may be using a placed vcpu structure,
+ * but we can only optimise if the all are.
+ *
+ * 0: not available, 1: available
+ */
+static int have_vcpu_info_placement = 1;
+
+static void __init xen_vcpu_setup(int cpu)
+{
+ struct vcpu_register_vcpu_info info;
+ int err;
+ struct vcpu_info *vcpup;
+
+ per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
+
+ if (!have_vcpu_info_placement)
+ return; /* already tested, not available */
+
+ vcpup = &per_cpu(xen_vcpu_info, cpu);
+
+ info.mfn = virt_to_mfn(vcpup);
+ info.offset = offset_in_page(vcpup);
+
+ printk(KERN_DEBUG "trying to map vcpu_info %d at %p, mfn %x, offset %d\n",
+ cpu, vcpup, info.mfn, info.offset);
+
+ /* Check to see if the hypervisor will put the vcpu_info
+ structure where we want it, which allows direct access via
+ a percpu-variable. */
+ err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
+
+ if (err) {
+ printk(KERN_DEBUG "register_vcpu_info failed: err=%d\n", err);
+ have_vcpu_info_placement = 0;
+ } else {
+ /* This cpu is using the registered vcpu info, even if
+ later ones fail to. */
+ per_cpu(xen_vcpu, cpu) = vcpup;
+
+ printk(KERN_DEBUG "cpu %d using vcpu_info at %p\n",
+ cpu, vcpup);
+ }
+}
+
+static void __init xen_banner(void)
+{
+ printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
+ paravirt_ops.name);
+ printk(KERN_INFO "Hypervisor signature: %s\n", xen_start_info->magic);
+}
+
+static void xen_cpuid(unsigned int *eax, unsigned int *ebx,
+ unsigned int *ecx, unsigned int *edx)
+{
+ unsigned maskedx = ~0;
+
+ /*
+ * Mask out inconvenient features, to try and disable as many
+ * unsupported kernel subsystems as possible.
+ */
+ if (*eax == 1)
+ maskedx = ~((1 << X86_FEATURE_APIC) | /* disable APIC */
+ (1 << X86_FEATURE_ACPI) | /* disable ACPI */
+ (1 << X86_FEATURE_ACC)); /* thermal monitoring */
+
+ asm(XEN_EMULATE_PREFIX "cpuid"
+ : "=a" (*eax),
+ "=b" (*ebx),
+ "=c" (*ecx),
+ "=d" (*edx)
+ : "0" (*eax), "2" (*ecx));
+ *edx &= maskedx;
+}
+
+static void xen_set_debugreg(int reg, unsigned long val)
+{
+ HYPERVISOR_set_debugreg(reg, val);
+}
+
+static unsigned long xen_get_debugreg(int reg)
+{
+ return HYPERVISOR_get_debugreg(reg);
+}
+
+static unsigned long xen_save_fl(void)
+{
+ struct vcpu_info *vcpu;
+ unsigned long flags;
+
+ vcpu = x86_read_percpu(xen_vcpu);
+
+ /* flag has opposite sense of mask */
+ flags = !vcpu->evtchn_upcall_mask;
+
+ /* convert to IF type flag
+ -0 -> 0x00000000
+ -1 -> 0xffffffff
+ */
+ return (-flags) & X86_EFLAGS_IF;
+}
+
+static void xen_restore_fl(unsigned long flags)
+{
+ struct vcpu_info *vcpu;
+
+ /* convert from IF type flag */
+ flags = !(flags & X86_EFLAGS_IF);
+
+ /* There's a one instruction preempt window here. We need to
+ make sure we're don't switch CPUs between getting the vcpu
+ pointer and updating the mask. */
+ preempt_disable();
+ vcpu = x86_read_percpu(xen_vcpu);
+ vcpu->evtchn_upcall_mask = flags;
+ preempt_enable_no_resched();
+
+ /* Doesn't matter if we get preempted here, because any
+ pending event will get dealt with anyway. */
+
+ if (flags == 0) {
+ preempt_check_resched();
+ barrier(); /* unmask then check (avoid races) */
+ if (unlikely(vcpu->evtchn_upcall_pending))
+ force_evtchn_callback();
+ }
+}
+
+static void xen_irq_disable(void)
+{
+ /* There's a one instruction preempt window here. We need to
+ make sure we're don't switch CPUs between getting the vcpu
+ pointer and updating the mask. */
+ preempt_disable();
+ x86_read_percpu(xen_vcpu)->evtchn_upcall_mask = 1;
+ preempt_enable_no_resched();
+}
+
+static void xen_irq_enable(void)
+{
+ struct vcpu_info *vcpu;
+
+ /* There's a one instruction preempt window here. We need to
+ make sure we're don't switch CPUs between getting the vcpu
+ pointer and updating the mask. */
+ preempt_disable();
+ vcpu = x86_read_percpu(xen_vcpu);
+ vcpu->evtchn_upcall_mask = 0;
+ preempt_enable_no_resched();
+
+ /* Doesn't matter if we get preempted here, because any
+ pending event will get dealt with anyway. */
+
+ barrier(); /* unmask then check (avoid races) */
+ if (unlikely(vcpu->evtchn_upcall_pending))
+ force_evtchn_callback();
+}
+
+static void xen_safe_halt(void)
+{
+ /* Blocking includes an implicit local_irq_enable(). */
+ if (HYPERVISOR_sched_op(SCHEDOP_block, 0) != 0)
+ BUG();
+}
+
+static void xen_halt(void)
+{
+ if (irqs_disabled())
+ HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
+ else
+ xen_safe_halt();
+}
+
+static void xen_set_lazy_mode(enum paravirt_lazy_mode mode)
+{
+ BUG_ON(preemptible());
+
+ switch (mode) {
+ case PARAVIRT_LAZY_NONE:
+ BUG_ON(x86_read_percpu(xen_lazy_mode) == PARAVIRT_LAZY_NONE);
+ break;
+
+ case PARAVIRT_LAZY_MMU:
+ case PARAVIRT_LAZY_CPU:
+ BUG_ON(x86_read_percpu(xen_lazy_mode) != PARAVIRT_LAZY_NONE);
+ break;
+
+ case PARAVIRT_LAZY_FLUSH:
+ /* flush if necessary, but don't change state */
+ if (x86_read_percpu(xen_lazy_mode) != PARAVIRT_LAZY_NONE)
+ xen_mc_flush();
+ return;
+ }
+
+ xen_mc_flush();
+ x86_write_percpu(xen_lazy_mode, mode);
+}
+
+static unsigned long xen_store_tr(void)
+{
+ return 0;
+}
+
+static void xen_set_ldt(const void *addr, unsigned entries)
+{
+ unsigned long linear_addr = (unsigned long)addr;
+ struct mmuext_op *op;
+ struct multicall_space mcs = xen_mc_entry(sizeof(*op));
+
+ op = mcs.args;
+ op->cmd = MMUEXT_SET_LDT;
+ if (linear_addr) {
+ /* ldt my be vmalloced, use arbitrary_virt_to_machine */
+ xmaddr_t maddr;
+ maddr = arbitrary_virt_to_machine((unsigned long)addr);
+ linear_addr = (unsigned long)maddr.maddr;
+ }
+ op->arg1.linear_addr = linear_addr;
+ op->arg2.nr_ents = entries;
+
+ MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+
+ xen_mc_issue(PARAVIRT_LAZY_CPU);
+}
+
+static void xen_load_gdt(const struct Xgt_desc_struct *dtr)
+{
+ unsigned long *frames;
+ unsigned long va = dtr->address;
+ unsigned int size = dtr->size + 1;
+ unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+ int f;
+ struct multicall_space mcs;
+
+ /* A GDT can be up to 64k in size, which corresponds to 8192
+ 8-byte entries, or 16 4k pages.. */
+
+ BUG_ON(size > 65536);
+ BUG_ON(va & ~PAGE_MASK);
+
+ mcs = xen_mc_entry(sizeof(*frames) * pages);
+ frames = mcs.args;
+
+ for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
+ frames[f] = virt_to_mfn(va);
+ make_lowmem_page_readonly((void *)va);
+ }
+
+ MULTI_set_gdt(mcs.mc, frames, size / sizeof(struct desc_struct));
+
+ xen_mc_issue(PARAVIRT_LAZY_CPU);
+}
+
+static void load_TLS_descriptor(struct thread_struct *t,
+ unsigned int cpu, unsigned int i)
+{
+ struct desc_struct *gdt = get_cpu_gdt_table(cpu);
+ xmaddr_t maddr = virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]);
+ struct multicall_space mc = __xen_mc_entry(0);
+
+ MULTI_update_descriptor(mc.mc, maddr.maddr, t->tls_array[i]);
+}
+
+static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
+{
+ xen_mc_batch();
+
+ load_TLS_descriptor(t, cpu, 0);
+ load_TLS_descriptor(t, cpu, 1);
+ load_TLS_descriptor(t, cpu, 2);
+
+ xen_mc_issue(PARAVIRT_LAZY_CPU);
+
+ /*
+ * XXX sleazy hack: If we're being called in a lazy-cpu zone,
+ * it means we're in a context switch, and %gs has just been
+ * saved. This means we can zero it out to prevent faults on
+ * exit from the hypervisor if the next process has no %gs.
+ * Either way, it has been saved, and the new value will get
+ * loaded properly. This will go away as soon as Xen has been
+ * modified to not save/restore %gs for normal hypercalls.
+ */
+ if (xen_get_lazy_mode() == PARAVIRT_LAZY_CPU)
+ loadsegment(gs, 0);
+}
+
+static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
+ u32 low, u32 high)
+{
+ unsigned long lp = (unsigned long)&dt[entrynum];
+ xmaddr_t mach_lp = virt_to_machine(lp);
+ u64 entry = (u64)high << 32 | low;
+
+ preempt_disable();
+
+ xen_mc_flush();
+ if (HYPERVISOR_update_descriptor(mach_lp.maddr, entry))
+ BUG();
+
+ preempt_enable();
+}
+
+static int cvt_gate_to_trap(int vector, u32 low, u32 high,
+ struct trap_info *info)
+{
+ u8 type, dpl;
+
+ type = (high >> 8) & 0x1f;
+ dpl = (high >> 13) & 3;
+
+ if (type != 0xf && type != 0xe)
+ return 0;
+
+ info->vector = vector;
+ info->address = (high & 0xffff0000) | (low & 0x0000ffff);
+ info->cs = low >> 16;
+ info->flags = dpl;
+ /* interrupt gates clear IF */
+ if (type == 0xe)
+ info->flags |= 4;
+
+ return 1;
+}
+
+/* Locations of each CPU's IDT */
+static DEFINE_PER_CPU(struct Xgt_desc_struct, idt_desc);
+
+/* Set an IDT entry. If the entry is part of the current IDT, then
+ also update Xen. */
+static void xen_write_idt_entry(struct desc_struct *dt, int entrynum,
+ u32 low, u32 high)
+{
+ unsigned long p = (unsigned long)&dt[entrynum];
+ unsigned long start, end;
+
+ preempt_disable();
+
+ start = __get_cpu_var(idt_desc).address;
+ end = start + __get_cpu_var(idt_desc).size + 1;
+
+ xen_mc_flush();
+
+ write_dt_entry(dt, entrynum, low, high);
+
+ if (p >= start && (p + 8) <= end) {
+ struct trap_info info[2];
+
+ info[1].address = 0;
+
+ if (cvt_gate_to_trap(entrynum, low, high, &info[0]))
+ if (HYPERVISOR_set_trap_table(info))
+ BUG();
+ }
+
+ preempt_enable();
+}
+
+static void xen_convert_trap_info(const struct Xgt_desc_struct *desc,
+ struct trap_info *traps)
+{
+ unsigned in, out, count;
+
+ count = (desc->size+1) / 8;
+ BUG_ON(count > 256);
+
+ for (in = out = 0; in < count; in++) {
+ const u32 *entry = (u32 *)(desc->address + in * 8);
+
+ if (cvt_gate_to_trap(in, entry[0], entry[1], &traps[out]))
+ out++;
+ }
+ traps[out].address = 0;
+}
+
+void xen_copy_trap_info(struct trap_info *traps)
+{
+ const struct Xgt_desc_struct *desc = &__get_cpu_var(idt_desc);
+
+ xen_convert_trap_info(desc, traps);
+}
+
+/* Load a new IDT into Xen. In principle this can be per-CPU, so we
+ hold a spinlock to protect the static traps[] array (static because
+ it avoids allocation, and saves stack space). */
+static void xen_load_idt(const struct Xgt_desc_struct *desc)
+{
+ static DEFINE_SPINLOCK(lock);
+ static struct trap_info traps[257];
+
+ spin_lock(&lock);
+
+ __get_cpu_var(idt_desc) = *desc;
+
+ xen_convert_trap_info(desc, traps);
+
+ xen_mc_flush();
+ if (HYPERVISOR_set_trap_table(traps))
+ BUG();
+
+ spin_unlock(&lock);
+}
+
+/* Write a GDT descriptor entry. Ignore LDT descriptors, since
+ they're handled differently. */
+static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
+ u32 low, u32 high)
+{
+ preempt_disable();
+
+ switch ((high >> 8) & 0xff) {
+ case DESCTYPE_LDT:
+ case DESCTYPE_TSS:
+ /* ignore */
+ break;
+
+ default: {
+ xmaddr_t maddr = virt_to_machine(&dt[entry]);
+ u64 desc = (u64)high << 32 | low;
+
+ xen_mc_flush();
+ if (HYPERVISOR_update_descriptor(maddr.maddr, desc))
+ BUG();
+ }
+
+ }
+
+ preempt_enable();
+}
+
+static void xen_load_esp0(struct tss_struct *tss,
+ struct thread_struct *thread)
+{
+ struct multicall_space mcs = xen_mc_entry(0);
+ MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->esp0);
+ xen_mc_issue(PARAVIRT_LAZY_CPU);
+}
+
+static void xen_set_iopl_mask(unsigned mask)
+{
+ struct physdev_set_iopl set_iopl;
+
+ /* Force the change at ring 0. */
+ set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3;
+ HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
+}
+
+static void xen_io_delay(void)
+{
+}
+
+#ifdef CONFIG_X86_LOCAL_APIC
+static unsigned long xen_apic_read(unsigned long reg)
+{
+ return 0;
+}
+
+static void xen_apic_write(unsigned long reg, unsigned long val)
+{
+ /* Warn to see if there's any stray references */
+ WARN_ON(1);
+}
+#endif
+
+static void xen_flush_tlb(void)
+{
+ struct mmuext_op *op;
+ struct multicall_space mcs = xen_mc_entry(sizeof(*op));
+
+ op = mcs.args;
+ op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
+ MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
+}
+
+static void xen_flush_tlb_single(unsigned long addr)
+{
+ struct mmuext_op *op;
+ struct multicall_space mcs = xen_mc_entry(sizeof(*op));
+
+ op = mcs.args;
+ op->cmd = MMUEXT_INVLPG_LOCAL;
+ op->arg1.linear_addr = addr & PAGE_MASK;
+ MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
+}
+
+static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm,
+ unsigned long va)
+{
+ struct {
+ struct mmuext_op op;
+ cpumask_t mask;
+ } *args;
+ cpumask_t cpumask = *cpus;
+ struct multicall_space mcs;
+
+ /*
+ * A couple of (to be removed) sanity checks:
+ *
+ * - current CPU must not be in mask
+ * - mask must exist :)
+ */
+ BUG_ON(cpus_empty(cpumask));
+ BUG_ON(cpu_isset(smp_processor_id(), cpumask));
+ BUG_ON(!mm);
+
+ /* If a CPU which we ran on has gone down, OK. */
+ cpus_and(cpumask, cpumask, cpu_online_map);
+ if (cpus_empty(cpumask))
+ return;
+
+ mcs = xen_mc_entry(sizeof(*args));
+ args = mcs.args;
+ args->mask = cpumask;
+ args->op.arg2.vcpumask = &args->mask;
+
+ if (va == TLB_FLUSH_ALL) {
+ args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
+ } else {
+ args->op.cmd = MMUEXT_INVLPG_MULTI;
+ args->op.arg1.linear_addr = va;
+ }
+
+ MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
+}
+
+static void xen_write_cr2(unsigned long cr2)
+{
+ x86_read_percpu(xen_vcpu)->arch.cr2 = cr2;
+}
+
+static unsigned long xen_read_cr2(void)
+{
+ return x86_read_percpu(xen_vcpu)->arch.cr2;
+}
+
+static unsigned long xen_read_cr2_direct(void)
+{
+ return x86_read_percpu(xen_vcpu_info.arch.cr2);
+}
+
+static void xen_write_cr4(unsigned long cr4)
+{
+ /* never allow TSC to be disabled */
+ native_write_cr4(cr4 & ~X86_CR4_TSD);
+}
+
+static unsigned long xen_read_cr3(void)
+{
+ return x86_read_percpu(xen_cr3);
+}
+
+static void xen_write_cr3(unsigned long cr3)
+{
+ BUG_ON(preemptible());
+
+ if (cr3 == x86_read_percpu(xen_cr3)) {
+ /* just a simple tlb flush */
+ xen_flush_tlb();
+ return;
+ }
+
+ x86_write_percpu(xen_cr3, cr3);
+
+
+ {
+ struct mmuext_op *op;
+ struct multicall_space mcs = xen_mc_entry(sizeof(*op));
+ unsigned long mfn = pfn_to_mfn(PFN_DOWN(cr3));
+
+ op = mcs.args;
+ op->cmd = MMUEXT_NEW_BASEPTR;
+ op->arg1.mfn = mfn;
+
+ MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+
+ xen_mc_issue(PARAVIRT_LAZY_CPU);
+ }
+}
+
+/* Early in boot, while setting up the initial pagetable, assume
+ everything is pinned. */
+static __init void xen_alloc_pt_init(struct mm_struct *mm, u32 pfn)
+{
+ BUG_ON(mem_map); /* should only be used early */
+ make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
+}
+
+/* This needs to make sure the new pte page is pinned iff its being
+ attached to a pinned pagetable. */
+static void xen_alloc_pt(struct mm_struct *mm, u32 pfn)
+{
+ struct page *page = pfn_to_page(pfn);
+
+ if (PagePinned(virt_to_page(mm->pgd))) {
+ SetPagePinned(page);
+
+ if (!PageHighMem(page))
+ make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
+ else
+ /* make sure there are no stray mappings of
+ this page */
+ kmap_flush_unused();
+ }
+}
+
+/* This should never happen until we're OK to use struct page */
+static void xen_release_pt(u32 pfn)
+{
+ struct page *page = pfn_to_page(pfn);
+
+ if (PagePinned(page)) {
+ if (!PageHighMem(page))
+ make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
+ }
+}
+
+#ifdef CONFIG_HIGHPTE
+static void *xen_kmap_atomic_pte(struct page *page, enum km_type type)
+{
+ pgprot_t prot = PAGE_KERNEL;
+
+ if (PagePinned(page))
+ prot = PAGE_KERNEL_RO;
+
+ if (0 && PageHighMem(page))
+ printk("mapping highpte %lx type %d prot %s\n",
+ page_to_pfn(page), type,
+ (unsigned long)pgprot_val(prot) & _PAGE_RW ? "WRITE" : "READ");
+
+ return kmap_atomic_prot(page, type, prot);
+}
+#endif
+
+static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
+{
+ /* If there's an existing pte, then don't allow _PAGE_RW to be set */
+ if (pte_val_ma(*ptep) & _PAGE_PRESENT)
+ pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
+ pte_val_ma(pte));
+
+ return pte;
+}
+
+/* Init-time set_pte while constructing initial pagetables, which
+ doesn't allow RO pagetable pages to be remapped RW */
+static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
+{
+ pte = mask_rw_pte(ptep, pte);
+
+ xen_set_pte(ptep, pte);
+}
+
+static __init void xen_pagetable_setup_start(pgd_t *base)
+{
+ pgd_t *xen_pgd = (pgd_t *)xen_start_info->pt_base;
+
+ /* special set_pte for pagetable initialization */
+ paravirt_ops.set_pte = xen_set_pte_init;
+
+ init_mm.pgd = base;
+ /*
+ * copy top-level of Xen-supplied pagetable into place. For
+ * !PAE we can use this as-is, but for PAE it is a stand-in
+ * while we copy the pmd pages.
+ */
+ memcpy(base, xen_pgd, PTRS_PER_PGD * sizeof(pgd_t));
+
+ if (PTRS_PER_PMD > 1) {
+ int i;
+ /*
+ * For PAE, need to allocate new pmds, rather than
+ * share Xen's, since Xen doesn't like pmd's being
+ * shared between address spaces.
+ */
+ for (i = 0; i < PTRS_PER_PGD; i++) {
+ if (pgd_val_ma(xen_pgd[i]) & _PAGE_PRESENT) {
+ pmd_t *pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
+
+ memcpy(pmd, (void *)pgd_page_vaddr(xen_pgd[i]),
+ PAGE_SIZE);
+
+ make_lowmem_page_readonly(pmd);
+
+ set_pgd(&base[i], __pgd(1 + __pa(pmd)));
+ } else
+ pgd_clear(&base[i]);
+ }
+ }
+
+ /* make sure zero_page is mapped RO so we can use it in pagetables */
+ make_lowmem_page_readonly(empty_zero_page);
+ make_lowmem_page_readonly(base);
+ /*
+ * Switch to new pagetable. This is done before
+ * pagetable_init has done anything so that the new pages
+ * added to the table can be prepared properly for Xen.
+ */
+ xen_write_cr3(__pa(base));
+}
+
+static __init void xen_pagetable_setup_done(pgd_t *base)
+{
+ /* This will work as long as patching hasn't happened yet
+ (which it hasn't) */
+ paravirt_ops.alloc_pt = xen_alloc_pt;
+ paravirt_ops.set_pte = xen_set_pte;
+
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ /*
+ * Create a mapping for the shared info page.
+ * Should be set_fixmap(), but shared_info is a machine
+ * address with no corresponding pseudo-phys address.
+ */
+ set_pte_mfn(fix_to_virt(FIX_PARAVIRT_BOOTMAP),
+ PFN_DOWN(xen_start_info->shared_info),
+ PAGE_KERNEL);
+
+ HYPERVISOR_shared_info =
+ (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
+
+ } else
+ HYPERVISOR_shared_info =
+ (struct shared_info *)__va(xen_start_info->shared_info);
+
+ /* Actually pin the pagetable down, but we can't set PG_pinned
+ yet because the page structures don't exist yet. */
+ {
+ struct mmuext_op op;
+#ifdef CONFIG_X86_PAE
+ op.cmd = MMUEXT_PIN_L3_TABLE;
+#else
+ op.cmd = MMUEXT_PIN_L3_TABLE;
+#endif
+ op.arg1.mfn = pfn_to_mfn(PFN_DOWN(__pa(base)));
+ if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
+ BUG();
+ }
+}
+
+/* This is called once we have the cpu_possible_map */
+void __init xen_setup_vcpu_info_placement(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ xen_vcpu_setup(cpu);
+
+ /* xen_vcpu_setup managed to place the vcpu_info within the
+ percpu area for all cpus, so make use of it */
+ if (have_vcpu_info_placement) {
+ printk(KERN_INFO "Xen: using vcpu_info placement\n");
+
+ paravirt_ops.save_fl = xen_save_fl_direct;
+ paravirt_ops.restore_fl = xen_restore_fl_direct;
+ paravirt_ops.irq_disable = xen_irq_disable_direct;
+ paravirt_ops.irq_enable = xen_irq_enable_direct;
+ paravirt_ops.read_cr2 = xen_read_cr2_direct;
+ paravirt_ops.iret = xen_iret_direct;
+ }
+}
+
+static unsigned xen_patch(u8 type, u16 clobbers, void *insns, unsigned len)
+{
+ char *start, *end, *reloc;
+ unsigned ret;
+
+ start = end = reloc = NULL;
+
+#define SITE(x) \
+ case PARAVIRT_PATCH(x): \
+ if (have_vcpu_info_placement) { \
+ start = (char *)xen_##x##_direct; \
+ end = xen_##x##_direct_end; \
+ reloc = xen_##x##_direct_reloc; \
+ } \
+ goto patch_site
+
+ switch (type) {
+ SITE(irq_enable);
+ SITE(irq_disable);
+ SITE(save_fl);
+ SITE(restore_fl);
+#undef SITE
+
+ patch_site:
+ if (start == NULL || (end-start) > len)
+ goto default_patch;
+
+ ret = paravirt_patch_insns(insns, len, start, end);
+
+ /* Note: because reloc is assigned from something that
+ appears to be an array, gcc assumes it's non-null,
+ but doesn't know its relationship with start and
+ end. */
+ if (reloc > start && reloc < end) {
+ int reloc_off = reloc - start;
+ long *relocp = (long *)(insns + reloc_off);
+ long delta = start - (char *)insns;
+
+ *relocp += delta;
+ }
+ break;
+
+ default_patch:
+ default:
+ ret = paravirt_patch_default(type, clobbers, insns, len);
+ break;
+ }
+
+ return ret;
+}
+
+static const struct paravirt_ops xen_paravirt_ops __initdata = {
+ .paravirt_enabled = 1,
+ .shared_kernel_pmd = 0,
+
+ .name = "Xen",
+ .banner = xen_banner,
+
+ .patch = xen_patch,
+
+ .memory_setup = xen_memory_setup,
+ .arch_setup = xen_arch_setup,
+ .init_IRQ = xen_init_IRQ,
+ .post_allocator_init = xen_mark_init_mm_pinned,
+
+ .time_init = xen_time_init,
+ .set_wallclock = xen_set_wallclock,
+ .get_wallclock = xen_get_wallclock,
+ .get_cpu_khz = xen_cpu_khz,
+ .sched_clock = xen_sched_clock,
+
+ .cpuid = xen_cpuid,
+
+ .set_debugreg = xen_set_debugreg,
+ .get_debugreg = xen_get_debugreg,
+
+ .clts = native_clts,
+
+ .read_cr0 = native_read_cr0,
+ .write_cr0 = native_write_cr0,
+
+ .read_cr2 = xen_read_cr2,
+ .write_cr2 = xen_write_cr2,
+
+ .read_cr3 = xen_read_cr3,
+ .write_cr3 = xen_write_cr3,
+
+ .read_cr4 = native_read_cr4,
+ .read_cr4_safe = native_read_cr4_safe,
+ .write_cr4 = xen_write_cr4,
+
+ .save_fl = xen_save_fl,
+ .restore_fl = xen_restore_fl,
+ .irq_disable = xen_irq_disable,
+ .irq_enable = xen_irq_enable,
+ .safe_halt = xen_safe_halt,
+ .halt = xen_halt,
+ .wbinvd = native_wbinvd,
+
+ .read_msr = native_read_msr_safe,
+ .write_msr = native_write_msr_safe,
+ .read_tsc = native_read_tsc,
+ .read_pmc = native_read_pmc,
+
+ .iret = (void *)&hypercall_page[__HYPERVISOR_iret],
+ .irq_enable_sysexit = NULL, /* never called */
+
+ .load_tr_desc = paravirt_nop,
+ .set_ldt = xen_set_ldt,
+ .load_gdt = xen_load_gdt,
+ .load_idt = xen_load_idt,
+ .load_tls = xen_load_tls,
+
+ .store_gdt = native_store_gdt,
+ .store_idt = native_store_idt,
+ .store_tr = xen_store_tr,
+
+ .write_ldt_entry = xen_write_ldt_entry,
+ .write_gdt_entry = xen_write_gdt_entry,
+ .write_idt_entry = xen_write_idt_entry,
+ .load_esp0 = xen_load_esp0,
+
+ .set_iopl_mask = xen_set_iopl_mask,
+ .io_delay = xen_io_delay,
+
+#ifdef CONFIG_X86_LOCAL_APIC
+ .apic_write = xen_apic_write,
+ .apic_write_atomic = xen_apic_write,
+ .apic_read = xen_apic_read,
+ .setup_boot_clock = paravirt_nop,
+ .setup_secondary_clock = paravirt_nop,
+ .startup_ipi_hook = paravirt_nop,
+#endif
+
+ .flush_tlb_user = xen_flush_tlb,
+ .flush_tlb_kernel = xen_flush_tlb,
+ .flush_tlb_single = xen_flush_tlb_single,
+ .flush_tlb_others = xen_flush_tlb_others,
+
+ .pte_update = paravirt_nop,
+ .pte_update_defer = paravirt_nop,
+
+ .pagetable_setup_start = xen_pagetable_setup_start,
+ .pagetable_setup_done = xen_pagetable_setup_done,
+
+ .alloc_pt = xen_alloc_pt_init,
+ .release_pt = xen_release_pt,
+ .alloc_pd = paravirt_nop,
+ .alloc_pd_clone = paravirt_nop,
+ .release_pd = paravirt_nop,
+
+#ifdef CONFIG_HIGHPTE
+ .kmap_atomic_pte = xen_kmap_atomic_pte,
+#endif
+
+ .set_pte = NULL, /* see xen_pagetable_setup_* */
+ .set_pte_at = xen_set_pte_at,
+ .set_pmd = xen_set_pmd,
+
+ .pte_val = xen_pte_val,
+ .pgd_val = xen_pgd_val,
+
+ .make_pte = xen_make_pte,
+ .make_pgd = xen_make_pgd,
+
+#ifdef CONFIG_X86_PAE
+ .set_pte_atomic = xen_set_pte_atomic,
+ .set_pte_present = xen_set_pte_at,
+ .set_pud = xen_set_pud,
+ .pte_clear = xen_pte_clear,
+ .pmd_clear = xen_pmd_clear,
+
+ .make_pmd = xen_make_pmd,
+ .pmd_val = xen_pmd_val,
+#endif /* PAE */
+
+ .activate_mm = xen_activate_mm,
+ .dup_mmap = xen_dup_mmap,
+ .exit_mmap = xen_exit_mmap,
+
+ .set_lazy_mode = xen_set_lazy_mode,
+};
+
+#ifdef CONFIG_SMP
+static const struct smp_ops xen_smp_ops __initdata = {
+ .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
+ .smp_prepare_cpus = xen_smp_prepare_cpus,
+ .cpu_up = xen_cpu_up,
+ .smp_cpus_done = xen_smp_cpus_done,
+
+ .smp_send_stop = xen_smp_send_stop,
+ .smp_send_reschedule = xen_smp_send_reschedule,
+ .smp_call_function_mask = xen_smp_call_function_mask,
+};
+#endif /* CONFIG_SMP */
+
+static void xen_reboot(int reason)
+{
+#ifdef CONFIG_SMP
+ smp_send_stop();
+#endif
+
+ if (HYPERVISOR_sched_op(SCHEDOP_shutdown, reason))
+ BUG();
+}
+
+static void xen_restart(char *msg)
+{
+ xen_reboot(SHUTDOWN_reboot);
+}
+
+static void xen_emergency_restart(void)
+{
+ xen_reboot(SHUTDOWN_reboot);
+}
+
+static void xen_machine_halt(void)
+{
+ xen_reboot(SHUTDOWN_poweroff);
+}
+
+static void xen_crash_shutdown(struct pt_regs *regs)
+{
+ xen_reboot(SHUTDOWN_crash);
+}
+
+static const struct machine_ops __initdata xen_machine_ops = {
+ .restart = xen_restart,
+ .halt = xen_machine_halt,
+ .power_off = xen_machine_halt,
+ .shutdown = xen_machine_halt,
+ .crash_shutdown = xen_crash_shutdown,
+ .emergency_restart = xen_emergency_restart,
+};
+
+
+/* First C function to be called on Xen boot */
+asmlinkage void __init xen_start_kernel(void)
+{
+ pgd_t *pgd;
+
+ if (!xen_start_info)
+ return;
+
+ BUG_ON(memcmp(xen_start_info->magic, "xen-3.0", 7) != 0);
+
+ /* Install Xen paravirt ops */
+ paravirt_ops = xen_paravirt_ops;
+ machine_ops = xen_machine_ops;
+
+#ifdef CONFIG_SMP
+ smp_ops = xen_smp_ops;
+#endif
+
+ xen_setup_features();
+
+ /* Get mfn list */
+ if (!xen_feature(XENFEAT_auto_translated_physmap))
+ phys_to_machine_mapping = (unsigned long *)xen_start_info->mfn_list;
+
+ pgd = (pgd_t *)xen_start_info->pt_base;
+
+ init_pg_tables_end = __pa(pgd) + xen_start_info->nr_pt_frames*PAGE_SIZE;
+
+ init_mm.pgd = pgd; /* use the Xen pagetables to start */
+
+ /* keep using Xen gdt for now; no urgent need to change it */
+
+ x86_write_percpu(xen_cr3, __pa(pgd));
+
+#ifdef CONFIG_SMP
+ /* Don't do the full vcpu_info placement stuff until we have a
+ possible map. */
+ per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
+#else
+ /* May as well do it now, since there's no good time to call
+ it later on UP. */
+ xen_setup_vcpu_info_placement();
+#endif
+
+ paravirt_ops.kernel_rpl = 1;
+ if (xen_feature(XENFEAT_supervisor_mode_kernel))
+ paravirt_ops.kernel_rpl = 0;
+
+ /* set the limit of our address space */
+ reserve_top_address(-HYPERVISOR_VIRT_START + 2 * PAGE_SIZE);
+
+ /* set up basic CPUID stuff */
+ cpu_detect(&new_cpu_data);
+ new_cpu_data.hard_math = 1;
+ new_cpu_data.x86_capability[0] = cpuid_edx(1);
+
+ /* Poke various useful things into boot_params */
+ LOADER_TYPE = (9 << 4) | 0;
+ INITRD_START = xen_start_info->mod_start ? __pa(xen_start_info->mod_start) : 0;
+ INITRD_SIZE = xen_start_info->mod_len;
+
+ /* Start the world */
+ start_kernel();
+}
diff --git a/arch/i386/xen/events.c b/arch/i386/xen/events.c
new file mode 100644
index 00000000000..8904acc20f8
--- /dev/null
+++ b/arch/i386/xen/events.c
@@ -0,0 +1,590 @@
+/*
+ * Xen event channels
+ *
+ * Xen models interrupts with abstract event channels. Because each
+ * domain gets 1024 event channels, but NR_IRQ is not that large, we
+ * must dynamically map irqs<->event channels. The event channels
+ * interface with the rest of the kernel by defining a xen interrupt
+ * chip. When an event is recieved, it is mapped to an irq and sent
+ * through the normal interrupt processing path.
+ *
+ * There are four kinds of events which can be mapped to an event
+ * channel:
+ *
+ * 1. Inter-domain notifications. This includes all the virtual
+ * device events, since they're driven by front-ends in another domain
+ * (typically dom0).
+ * 2. VIRQs, typically used for timers. These are per-cpu events.
+ * 3. IPIs.
+ * 4. Hardware interrupts. Not supported at present.
+ *
+ * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
+ */
+
+#include <linux/linkage.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/string.h>
+
+#include <asm/ptrace.h>
+#include <asm/irq.h>
+#include <asm/sync_bitops.h>
+#include <asm/xen/hypercall.h>
+
+#include <xen/events.h>
+#include <xen/interface/xen.h>
+#include <xen/interface/event_channel.h>
+
+#include "xen-ops.h"
+
+/*
+ * This lock protects updates to the following mapping and reference-count
+ * arrays. The lock does not need to be acquired to read the mapping tables.
+ */
+static DEFINE_SPINLOCK(irq_mapping_update_lock);
+
+/* IRQ <-> VIRQ mapping. */
+static DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
+
+/* IRQ <-> IPI mapping */
+static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1};
+
+/* Packed IRQ information: binding type, sub-type index, and event channel. */
+struct packed_irq
+{
+ unsigned short evtchn;
+ unsigned char index;
+ unsigned char type;
+};
+
+static struct packed_irq irq_info[NR_IRQS];
+
+/* Binding types. */
+enum {
+ IRQT_UNBOUND,
+ IRQT_PIRQ,
+ IRQT_VIRQ,
+ IRQT_IPI,
+ IRQT_EVTCHN
+};
+
+/* Convenient shorthand for packed representation of an unbound IRQ. */
+#define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0)
+
+static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
+ [0 ... NR_EVENT_CHANNELS-1] = -1
+};
+static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG];
+static u8 cpu_evtchn[NR_EVENT_CHANNELS];
+
+/* Reference counts for bindings to IRQs. */
+static int irq_bindcount[NR_IRQS];
+
+/* Xen will never allocate port zero for any purpose. */
+#define VALID_EVTCHN(chn) ((chn) != 0)
+
+/*
+ * Force a proper event-channel callback from Xen after clearing the
+ * callback mask. We do this in a very simple manner, by making a call
+ * down into Xen. The pending flag will be checked by Xen on return.
+ */
+void force_evtchn_callback(void)
+{
+ (void)HYPERVISOR_xen_version(0, NULL);
+}
+EXPORT_SYMBOL_GPL(force_evtchn_callback);
+
+static struct irq_chip xen_dynamic_chip;
+
+/* Constructor for packed IRQ information. */
+static inline struct packed_irq mk_irq_info(u32 type, u32 index, u32 evtchn)
+{
+ return (struct packed_irq) { evtchn, index, type };
+}
+
+/*
+ * Accessors for packed IRQ information.
+ */
+static inline unsigned int evtchn_from_irq(int irq)
+{
+ return irq_info[irq].evtchn;
+}
+
+static inline unsigned int index_from_irq(int irq)
+{
+ return irq_info[irq].index;
+}
+
+static inline unsigned int type_from_irq(int irq)
+{
+ return irq_info[irq].type;
+}
+
+static inline unsigned long active_evtchns(unsigned int cpu,
+ struct shared_info *sh,
+ unsigned int idx)
+{
+ return (sh->evtchn_pending[idx] &
+ cpu_evtchn_mask[cpu][idx] &
+ ~sh->evtchn_mask[idx]);
+}
+
+static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
+{
+ int irq = evtchn_to_irq[chn];
+
+ BUG_ON(irq == -1);
+#ifdef CONFIG_SMP
+ irq_desc[irq].affinity = cpumask_of_cpu(cpu);
+#endif
+
+ __clear_bit(chn, cpu_evtchn_mask[cpu_evtchn[chn]]);
+ __set_bit(chn, cpu_evtchn_mask[cpu]);
+
+ cpu_evtchn[chn] = cpu;
+}
+
+static void init_evtchn_cpu_bindings(void)
+{
+#ifdef CONFIG_SMP
+ int i;
+ /* By default all event channels notify CPU#0. */
+ for (i = 0; i < NR_IRQS; i++)
+ irq_desc[i].affinity = cpumask_of_cpu(0);
+#endif
+
+ memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
+ memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
+}
+
+static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
+{
+ return cpu_evtchn[evtchn];
+}
+
+static inline void clear_evtchn(int port)
+{
+ struct shared_info *s = HYPERVISOR_shared_info;
+ sync_clear_bit(port, &s->evtchn_pending[0]);
+}
+
+static inline void set_evtchn(int port)
+{
+ struct shared_info *s = HYPERVISOR_shared_info;
+ sync_set_bit(port, &s->evtchn_pending[0]);
+}
+
+
+/**
+ * notify_remote_via_irq - send event to remote end of event channel via irq
+ * @irq: irq of event channel to send event to
+ *
+ * Unlike notify_remote_via_evtchn(), this is safe to use across
+ * save/restore. Notifications on a broken connection are silently
+ * dropped.
+ */
+void notify_remote_via_irq(int irq)
+{
+ int evtchn = evtchn_from_irq(irq);
+
+ if (VALID_EVTCHN(evtchn))
+ notify_remote_via_evtchn(evtchn);
+}
+EXPORT_SYMBOL_GPL(notify_remote_via_irq);
+
+static void mask_evtchn(int port)
+{
+ struct shared_info *s = HYPERVISOR_shared_info;
+ sync_set_bit(port, &s->evtchn_mask[0]);
+}
+
+static void unmask_evtchn(int port)
+{
+ struct shared_info *s = HYPERVISOR_shared_info;
+ unsigned int cpu = get_cpu();
+
+ BUG_ON(!irqs_disabled());
+
+ /* Slow path (hypercall) if this is a non-local port. */
+ if (unlikely(cpu != cpu_from_evtchn(port))) {
+ struct evtchn_unmask unmask = { .port = port };
+ (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
+ } else {
+ struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
+
+ sync_clear_bit(port, &s->evtchn_mask[0]);
+
+ /*
+ * The following is basically the equivalent of
+ * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
+ * the interrupt edge' if the channel is masked.
+ */
+ if (sync_test_bit(port, &s->evtchn_pending[0]) &&
+ !sync_test_and_set_bit(port / BITS_PER_LONG,
+ &vcpu_info->evtchn_pending_sel))
+ vcpu_info->evtchn_upcall_pending = 1;
+ }
+
+ put_cpu();
+}
+
+static int find_unbound_irq(void)
+{
+ int irq;
+
+ /* Only allocate from dynirq range */
+ for (irq = 0; irq < NR_IRQS; irq++)
+ if (irq_bindcount[irq] == 0)
+ break;
+
+ if (irq == NR_IRQS)
+ panic("No available IRQ to bind to: increase NR_IRQS!\n");
+
+ return irq;
+}
+
+int bind_evtchn_to_irq(unsigned int evtchn)
+{
+ int irq;
+
+ spin_lock(&irq_mapping_update_lock);
+
+ irq = evtchn_to_irq[evtchn];
+
+ if (irq == -1) {
+ irq = find_unbound_irq();
+
+ dynamic_irq_init(irq);
+ set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
+ handle_level_irq, "event");
+
+ evtchn_to_irq[evtchn] = irq;
+ irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
+ }
+
+ irq_bindcount[irq]++;
+
+ spin_unlock(&irq_mapping_update_lock);
+
+ return irq;
+}
+EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
+
+static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
+{
+ struct evtchn_bind_ipi bind_ipi;
+ int evtchn, irq;
+
+ spin_lock(&irq_mapping_update_lock);
+
+ irq = per_cpu(ipi_to_irq, cpu)[ipi];
+ if (irq == -1) {
+ irq = find_unbound_irq();
+ if (irq < 0)
+ goto out;
+
+ dynamic_irq_init(irq);
+ set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
+ handle_level_irq, "ipi");
+
+ bind_ipi.vcpu = cpu;
+ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
+ &bind_ipi) != 0)
+ BUG();
+ evtchn = bind_ipi.port;
+
+ evtchn_to_irq[evtchn] = irq;
+ irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
+
+ per_cpu(ipi_to_irq, cpu)[ipi] = irq;
+
+ bind_evtchn_to_cpu(evtchn, cpu);
+ }
+
+ irq_bindcount[irq]++;
+
+ out:
+ spin_unlock(&irq_mapping_update_lock);
+ return irq;
+}
+
+
+static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
+{
+ struct evtchn_bind_virq bind_virq;
+ int evtchn, irq;
+
+ spin_lock(&irq_mapping_update_lock);
+
+ irq = per_cpu(virq_to_irq, cpu)[virq];
+
+ if (irq == -1) {
+ bind_virq.virq = virq;
+ bind_virq.vcpu = cpu;
+ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
+ &bind_virq) != 0)
+ BUG();
+ evtchn = bind_virq.port;
+
+ irq = find_unbound_irq();
+
+ dynamic_irq_init(irq);
+ set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
+ handle_level_irq, "virq");
+
+ evtchn_to_irq[evtchn] = irq;
+ irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
+
+ per_cpu(virq_to_irq, cpu)[virq] = irq;
+
+ bind_evtchn_to_cpu(evtchn, cpu);
+ }
+
+ irq_bindcount[irq]++;
+
+ spin_unlock(&irq_mapping_update_lock);
+
+ return irq;
+}
+
+static void unbind_from_irq(unsigned int irq)
+{
+ struct evtchn_close close;
+ int evtchn = evtchn_from_irq(irq);
+
+ spin_lock(&irq_mapping_update_lock);
+
+ if (VALID_EVTCHN(evtchn) && (--irq_bindcount[irq] == 0)) {
+ close.port = evtchn;
+ if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
+ BUG();
+
+ switch (type_from_irq(irq)) {
+ case IRQT_VIRQ:
+ per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
+ [index_from_irq(irq)] = -1;
+ break;
+ default:
+ break;
+ }
+
+ /* Closed ports are implicitly re-bound to VCPU0. */
+ bind_evtchn_to_cpu(evtchn, 0);
+
+ evtchn_to_irq[evtchn] = -1;
+ irq_info[irq] = IRQ_UNBOUND;
+
+ dynamic_irq_init(irq);
+ }
+
+ spin_unlock(&irq_mapping_update_lock);
+}
+
+int bind_evtchn_to_irqhandler(unsigned int evtchn,
+ irqreturn_t (*handler)(int, void *),
+ unsigned long irqflags,
+ const char *devname, void *dev_id)
+{
+ unsigned int irq;
+ int retval;
+
+ irq = bind_evtchn_to_irq(evtchn);
+ retval = request_irq(irq, handler, irqflags, devname, dev_id);
+ if (retval != 0) {
+ unbind_from_irq(irq);
+ return retval;
+ }
+
+ return irq;
+}
+EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
+
+int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
+ irqreturn_t (*handler)(int, void *),
+ unsigned long irqflags, const char *devname, void *dev_id)
+{
+ unsigned int irq;
+ int retval;
+
+ irq = bind_virq_to_irq(virq, cpu);
+ retval = request_irq(irq, handler, irqflags, devname, dev_id);
+ if (retval != 0) {
+ unbind_from_irq(irq);
+ return retval;
+ }
+
+ return irq;
+}
+EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
+
+int bind_ipi_to_irqhandler(enum ipi_vector ipi,
+ unsigned int cpu,
+ irq_handler_t handler,
+ unsigned long irqflags,
+ const char *devname,
+ void *dev_id)
+{
+ int irq, retval;
+
+ irq = bind_ipi_to_irq(ipi, cpu);
+ if (irq < 0)
+ return irq;
+
+ retval = request_irq(irq, handler, irqflags, devname, dev_id);
+ if (retval != 0) {
+ unbind_from_irq(irq);
+ return retval;
+ }
+
+ return irq;
+}
+
+void unbind_from_irqhandler(unsigned int irq, void *dev_id)
+{
+ free_irq(irq, dev_id);
+ unbind_from_irq(irq);
+}
+EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
+
+void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
+{
+ int irq = per_cpu(ipi_to_irq, cpu)[vector];
+ BUG_ON(irq < 0);
+ notify_remote_via_irq(irq);
+}
+
+
+/*
+ * Search the CPUs pending events bitmasks. For each one found, map
+ * the event number to an irq, and feed it into do_IRQ() for
+ * handling.
+ *
+ * Xen uses a two-level bitmap to speed searching. The first level is
+ * a bitset of words which contain pending event bits. The second
+ * level is a bitset of pending events themselves.
+ */
+fastcall void xen_evtchn_do_upcall(struct pt_regs *regs)
+{
+ int cpu = get_cpu();
+ struct shared_info *s = HYPERVISOR_shared_info;
+ struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
+ unsigned long pending_words;
+
+ vcpu_info->evtchn_upcall_pending = 0;
+
+ /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
+ pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
+ while (pending_words != 0) {
+ unsigned long pending_bits;
+ int word_idx = __ffs(pending_words);
+ pending_words &= ~(1UL << word_idx);
+
+ while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) {
+ int bit_idx = __ffs(pending_bits);
+ int port = (word_idx * BITS_PER_LONG) + bit_idx;
+ int irq = evtchn_to_irq[port];
+
+ if (irq != -1) {
+ regs->orig_eax = ~irq;
+ do_IRQ(regs);
+ }
+ }
+ }
+
+ put_cpu();
+}
+
+/* Rebind an evtchn so that it gets delivered to a specific cpu */
+static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
+{
+ struct evtchn_bind_vcpu bind_vcpu;
+ int evtchn = evtchn_from_irq(irq);
+
+ if (!VALID_EVTCHN(evtchn))
+ return;
+
+ /* Send future instances of this interrupt to other vcpu. */
+ bind_vcpu.port = evtchn;
+ bind_vcpu.vcpu = tcpu;
+
+ /*
+ * If this fails, it usually just indicates that we're dealing with a
+ * virq or IPI channel, which don't actually need to be rebound. Ignore
+ * it, but don't do the xenlinux-level rebind in that case.
+ */
+ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
+ bind_evtchn_to_cpu(evtchn, tcpu);
+}
+
+
+static void set_affinity_irq(unsigned irq, cpumask_t dest)
+{
+ unsigned tcpu = first_cpu(dest);
+ rebind_irq_to_cpu(irq, tcpu);
+}
+
+static void enable_dynirq(unsigned int irq)
+{
+ int evtchn = evtchn_from_irq(irq);
+
+ if (VALID_EVTCHN(evtchn))
+ unmask_evtchn(evtchn);
+}
+
+static void disable_dynirq(unsigned int irq)
+{
+ int evtchn = evtchn_from_irq(irq);
+
+ if (VALID_EVTCHN(evtchn))
+ mask_evtchn(evtchn);
+}
+
+static void ack_dynirq(unsigned int irq)
+{
+ int evtchn = evtchn_from_irq(irq);
+
+ move_native_irq(irq);
+
+ if (VALID_EVTCHN(evtchn))
+ clear_evtchn(evtchn);
+}
+
+static int retrigger_dynirq(unsigned int irq)
+{
+ int evtchn = evtchn_from_irq(irq);
+ int ret = 0;
+
+ if (VALID_EVTCHN(evtchn)) {
+ set_evtchn(evtchn);
+ ret = 1;
+ }
+
+ return ret;
+}
+
+static struct irq_chip xen_dynamic_chip __read_mostly = {
+ .name = "xen-dyn",
+ .mask = disable_dynirq,
+ .unmask = enable_dynirq,
+ .ack = ack_dynirq,
+ .set_affinity = set_affinity_irq,
+ .retrigger = retrigger_dynirq,
+};
+
+void __init xen_init_IRQ(void)
+{
+ int i;
+
+ init_evtchn_cpu_bindings();
+
+ /* No event channels are 'live' right now. */
+ for (i = 0; i < NR_EVENT_CHANNELS; i++)
+ mask_evtchn(i);
+
+ /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
+ for (i = 0; i < NR_IRQS; i++)
+ irq_bindcount[i] = 0;
+
+ irq_ctx_init(smp_processor_id());
+}
diff --git a/arch/i386/xen/features.c b/arch/i386/xen/features.c
new file mode 100644
index 00000000000..0707714e40d
--- /dev/null
+++ b/arch/i386/xen/features.c
@@ -0,0 +1,29 @@
+/******************************************************************************
+ * features.c
+ *
+ * Xen feature flags.
+ *
+ * Copyright (c) 2006, Ian Campbell, XenSource Inc.
+ */
+#include <linux/types.h>
+#include <linux/cache.h>
+#include <linux/module.h>
+#include <asm/xen/hypervisor.h>
+#include <xen/features.h>
+
+u8 xen_features[XENFEAT_NR_SUBMAPS * 32] __read_mostly;
+EXPORT_SYMBOL_GPL(xen_features);
+
+void xen_setup_features(void)
+{
+ struct xen_feature_info fi;
+ int i, j;
+
+ for (i = 0; i < XENFEAT_NR_SUBMAPS; i++) {
+ fi.submap_idx = i;
+ if (HYPERVISOR_xen_version(XENVER_get_features, &fi) < 0)
+ break;
+ for (j = 0; j < 32; j++)
+ xen_features[i * 32 + j] = !!(fi.submap & 1<<j);
+ }
+}
diff --git a/arch/i386/xen/manage.c b/arch/i386/xen/manage.c
new file mode 100644
index 00000000000..aa7af9e6abc
--- /dev/null
+++ b/arch/i386/xen/manage.c
@@ -0,0 +1,143 @@
+/*
+ * Handle extern requests for shutdown, reboot and sysrq
+ */
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/reboot.h>
+#include <linux/sysrq.h>
+
+#include <xen/xenbus.h>
+
+#define SHUTDOWN_INVALID -1
+#define SHUTDOWN_POWEROFF 0
+#define SHUTDOWN_SUSPEND 2
+/* Code 3 is SHUTDOWN_CRASH, which we don't use because the domain can only
+ * report a crash, not be instructed to crash!
+ * HALT is the same as POWEROFF, as far as we're concerned. The tools use
+ * the distinction when we return the reason code to them.
+ */
+#define SHUTDOWN_HALT 4
+
+/* Ignore multiple shutdown requests. */
+static int shutting_down = SHUTDOWN_INVALID;
+
+static void shutdown_handler(struct xenbus_watch *watch,
+ const char **vec, unsigned int len)
+{
+ char *str;
+ struct xenbus_transaction xbt;
+ int err;
+
+ if (shutting_down != SHUTDOWN_INVALID)
+ return;
+
+ again:
+ err = xenbus_transaction_start(&xbt);
+ if (err)
+ return;
+
+ str = (char *)xenbus_read(xbt, "control", "shutdown", NULL);
+ /* Ignore read errors and empty reads. */
+ if (XENBUS_IS_ERR_READ(str)) {
+ xenbus_transaction_end(xbt, 1);
+ return;
+ }
+
+ xenbus_write(xbt, "control", "shutdown", "");
+
+ err = xenbus_transaction_end(xbt, 0);
+ if (err == -EAGAIN) {
+ kfree(str);
+ goto again;
+ }
+
+ if (strcmp(str, "poweroff") == 0 ||
+ strcmp(str, "halt") == 0)
+ orderly_poweroff(false);
+ else if (strcmp(str, "reboot") == 0)
+ ctrl_alt_del();
+ else {
+ printk(KERN_INFO "Ignoring shutdown request: %s\n", str);
+ shutting_down = SHUTDOWN_INVALID;
+ }
+
+ kfree(str);
+}
+
+static void sysrq_handler(struct xenbus_watch *watch, const char **vec,
+ unsigned int len)
+{
+ char sysrq_key = '\0';
+ struct xenbus_transaction xbt;
+ int err;
+
+ again:
+ err = xenbus_transaction_start(&xbt);
+ if (err)
+ return;
+ if (!xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key)) {
+ printk(KERN_ERR "Unable to read sysrq code in "
+ "control/sysrq\n");
+ xenbus_transaction_end(xbt, 1);
+ return;
+ }
+
+ if (sysrq_key != '\0')
+ xenbus_printf(xbt, "control", "sysrq", "%c", '\0');
+
+ err = xenbus_transaction_end(xbt, 0);
+ if (err == -EAGAIN)
+ goto again;
+
+ if (sysrq_key != '\0')
+ handle_sysrq(sysrq_key, NULL);
+}
+
+static struct xenbus_watch shutdown_watch = {
+ .node = "control/shutdown",
+ .callback = shutdown_handler
+};
+
+static struct xenbus_watch sysrq_watch = {
+ .node = "control/sysrq",
+ .callback = sysrq_handler
+};
+
+static int setup_shutdown_watcher(void)
+{
+ int err;
+
+ err = register_xenbus_watch(&shutdown_watch);
+ if (err) {
+ printk(KERN_ERR "Failed to set shutdown watcher\n");
+ return err;
+ }
+
+ err = register_xenbus_watch(&sysrq_watch);
+ if (err) {
+ printk(KERN_ERR "Failed to set sysrq watcher\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int shutdown_event(struct notifier_block *notifier,
+ unsigned long event,
+ void *data)
+{
+ setup_shutdown_watcher();
+ return NOTIFY_DONE;
+}
+
+static int __init setup_shutdown_event(void)
+{
+ static struct notifier_block xenstore_notifier = {
+ .notifier_call = shutdown_event
+ };
+ register_xenstore_notifier(&xenstore_notifier);
+
+ return 0;
+}
+
+subsys_initcall(setup_shutdown_event);
diff --git a/arch/i386/xen/mmu.c b/arch/i386/xen/mmu.c
new file mode 100644
index 00000000000..4ae038aa6c2
--- /dev/null
+++ b/arch/i386/xen/mmu.c
@@ -0,0 +1,564 @@
+/*
+ * Xen mmu operations
+ *
+ * This file contains the various mmu fetch and update operations.
+ * The most important job they must perform is the mapping between the
+ * domain's pfn and the overall machine mfns.
+ *
+ * Xen allows guests to directly update the pagetable, in a controlled
+ * fashion. In other words, the guest modifies the same pagetable
+ * that the CPU actually uses, which eliminates the overhead of having
+ * a separate shadow pagetable.
+ *
+ * In order to allow this, it falls on the guest domain to map its
+ * notion of a "physical" pfn - which is just a domain-local linear
+ * address - into a real "machine address" which the CPU's MMU can
+ * use.
+ *
+ * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
+ * inserted directly into the pagetable. When creating a new
+ * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
+ * when reading the content back with __(pgd|pmd|pte)_val, it converts
+ * the mfn back into a pfn.
+ *
+ * The other constraint is that all pages which make up a pagetable
+ * must be mapped read-only in the guest. This prevents uncontrolled
+ * guest updates to the pagetable. Xen strictly enforces this, and
+ * will disallow any pagetable update which will end up mapping a
+ * pagetable page RW, and will disallow using any writable page as a
+ * pagetable.
+ *
+ * Naively, when loading %cr3 with the base of a new pagetable, Xen
+ * would need to validate the whole pagetable before going on.
+ * Naturally, this is quite slow. The solution is to "pin" a
+ * pagetable, which enforces all the constraints on the pagetable even
+ * when it is not actively in use. This menas that Xen can be assured
+ * that it is still valid when you do load it into %cr3, and doesn't
+ * need to revalidate it.
+ *
+ * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
+ */
+#include <linux/sched.h>
+#include <linux/highmem.h>
+#include <linux/bug.h>
+#include <linux/sched.h>
+
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+#include <asm/mmu_context.h>
+#include <asm/paravirt.h>
+
+#include <asm/xen/hypercall.h>
+#include <asm/xen/hypervisor.h>
+
+#include <xen/page.h>
+#include <xen/interface/xen.h>
+
+#include "multicalls.h"
+#include "mmu.h"
+
+xmaddr_t arbitrary_virt_to_machine(unsigned long address)
+{
+ pte_t *pte = lookup_address(address);
+ unsigned offset = address & PAGE_MASK;
+
+ BUG_ON(pte == NULL);
+
+ return XMADDR((pte_mfn(*pte) << PAGE_SHIFT) + offset);
+}
+
+void make_lowmem_page_readonly(void *vaddr)
+{
+ pte_t *pte, ptev;
+ unsigned long address = (unsigned long)vaddr;
+
+ pte = lookup_address(address);
+ BUG_ON(pte == NULL);
+
+ ptev = pte_wrprotect(*pte);
+
+ if (HYPERVISOR_update_va_mapping(address, ptev, 0))
+ BUG();
+}
+
+void make_lowmem_page_readwrite(void *vaddr)
+{
+ pte_t *pte, ptev;
+ unsigned long address = (unsigned long)vaddr;
+
+ pte = lookup_address(address);
+ BUG_ON(pte == NULL);
+
+ ptev = pte_mkwrite(*pte);
+
+ if (HYPERVISOR_update_va_mapping(address, ptev, 0))
+ BUG();
+}
+
+
+void xen_set_pmd(pmd_t *ptr, pmd_t val)
+{
+ struct multicall_space mcs;
+ struct mmu_update *u;
+
+ preempt_disable();
+
+ mcs = xen_mc_entry(sizeof(*u));
+ u = mcs.args;
+ u->ptr = virt_to_machine(ptr).maddr;
+ u->val = pmd_val_ma(val);
+ MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
+
+ preempt_enable();
+}
+
+/*
+ * Associate a virtual page frame with a given physical page frame
+ * and protection flags for that frame.
+ */
+void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ pgd = swapper_pg_dir + pgd_index(vaddr);
+ if (pgd_none(*pgd)) {
+ BUG();
+ return;
+ }
+ pud = pud_offset(pgd, vaddr);
+ if (pud_none(*pud)) {
+ BUG();
+ return;
+ }
+ pmd = pmd_offset(pud, vaddr);
+ if (pmd_none(*pmd)) {
+ BUG();
+ return;
+ }
+ pte = pte_offset_kernel(pmd, vaddr);
+ /* <mfn,flags> stored as-is, to permit clearing entries */
+ xen_set_pte(pte, mfn_pte(mfn, flags));
+
+ /*
+ * It's enough to flush this one mapping.
+ * (PGE mappings get flushed as well)
+ */
+ __flush_tlb_one(vaddr);
+}
+
+void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pteval)
+{
+ if (mm == current->mm || mm == &init_mm) {
+ if (xen_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
+ struct multicall_space mcs;
+ mcs = xen_mc_entry(0);
+
+ MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
+ return;
+ } else
+ if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
+ return;
+ }
+ xen_set_pte(ptep, pteval);
+}
+
+#ifdef CONFIG_X86_PAE
+void xen_set_pud(pud_t *ptr, pud_t val)
+{
+ struct multicall_space mcs;
+ struct mmu_update *u;
+
+ preempt_disable();
+
+ mcs = xen_mc_entry(sizeof(*u));
+ u = mcs.args;
+ u->ptr = virt_to_machine(ptr).maddr;
+ u->val = pud_val_ma(val);
+ MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
+
+ preempt_enable();
+}
+
+void xen_set_pte(pte_t *ptep, pte_t pte)
+{
+ ptep->pte_high = pte.pte_high;
+ smp_wmb();
+ ptep->pte_low = pte.pte_low;
+}
+
+void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
+{
+ set_64bit((u64 *)ptep, pte_val_ma(pte));
+}
+
+void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+ ptep->pte_low = 0;
+ smp_wmb(); /* make sure low gets written first */
+ ptep->pte_high = 0;
+}
+
+void xen_pmd_clear(pmd_t *pmdp)
+{
+ xen_set_pmd(pmdp, __pmd(0));
+}
+
+unsigned long long xen_pte_val(pte_t pte)
+{
+ unsigned long long ret = 0;
+
+ if (pte.pte_low) {
+ ret = ((unsigned long long)pte.pte_high << 32) | pte.pte_low;
+ ret = machine_to_phys(XMADDR(ret)).paddr | 1;
+ }
+
+ return ret;
+}
+
+unsigned long long xen_pmd_val(pmd_t pmd)
+{
+ unsigned long long ret = pmd.pmd;
+ if (ret)
+ ret = machine_to_phys(XMADDR(ret)).paddr | 1;
+ return ret;
+}
+
+unsigned long long xen_pgd_val(pgd_t pgd)
+{
+ unsigned long long ret = pgd.pgd;
+ if (ret)
+ ret = machine_to_phys(XMADDR(ret)).paddr | 1;
+ return ret;
+}
+
+pte_t xen_make_pte(unsigned long long pte)
+{
+ if (pte & 1)
+ pte = phys_to_machine(XPADDR(pte)).maddr;
+
+ return (pte_t){ pte, pte >> 32 };
+}
+
+pmd_t xen_make_pmd(unsigned long long pmd)
+{
+ if (pmd & 1)
+ pmd = phys_to_machine(XPADDR(pmd)).maddr;
+
+ return (pmd_t){ pmd };
+}
+
+pgd_t xen_make_pgd(unsigned long long pgd)
+{
+ if (pgd & _PAGE_PRESENT)
+ pgd = phys_to_machine(XPADDR(pgd)).maddr;
+
+ return (pgd_t){ pgd };
+}
+#else /* !PAE */
+void xen_set_pte(pte_t *ptep, pte_t pte)
+{
+ *ptep = pte;
+}
+
+unsigned long xen_pte_val(pte_t pte)
+{
+ unsigned long ret = pte.pte_low;
+
+ if (ret & _PAGE_PRESENT)
+ ret = machine_to_phys(XMADDR(ret)).paddr;
+
+ return ret;
+}
+
+unsigned long xen_pgd_val(pgd_t pgd)
+{
+ unsigned long ret = pgd.pgd;
+ if (ret)
+ ret = machine_to_phys(XMADDR(ret)).paddr | 1;
+ return ret;
+}
+
+pte_t xen_make_pte(unsigned long pte)
+{
+ if (pte & _PAGE_PRESENT)
+ pte = phys_to_machine(XPADDR(pte)).maddr;
+
+ return (pte_t){ pte };
+}
+
+pgd_t xen_make_pgd(unsigned long pgd)
+{
+ if (pgd & _PAGE_PRESENT)
+ pgd = phys_to_machine(XPADDR(pgd)).maddr;
+
+ return (pgd_t){ pgd };
+}
+#endif /* CONFIG_X86_PAE */
+
+
+
+/*
+ (Yet another) pagetable walker. This one is intended for pinning a
+ pagetable. This means that it walks a pagetable and calls the
+ callback function on each page it finds making up the page table,
+ at every level. It walks the entire pagetable, but it only bothers
+ pinning pte pages which are below pte_limit. In the normal case
+ this will be TASK_SIZE, but at boot we need to pin up to
+ FIXADDR_TOP. But the important bit is that we don't pin beyond
+ there, because then we start getting into Xen's ptes.
+*/
+static int pgd_walk(pgd_t *pgd_base, int (*func)(struct page *, unsigned),
+ unsigned long limit)
+{
+ pgd_t *pgd = pgd_base;
+ int flush = 0;
+ unsigned long addr = 0;
+ unsigned long pgd_next;
+
+ BUG_ON(limit > FIXADDR_TOP);
+
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return 0;
+
+ for (; addr != FIXADDR_TOP; pgd++, addr = pgd_next) {
+ pud_t *pud;
+ unsigned long pud_limit, pud_next;
+
+ pgd_next = pud_limit = pgd_addr_end(addr, FIXADDR_TOP);
+
+ if (!pgd_val(*pgd))
+ continue;
+
+ pud = pud_offset(pgd, 0);
+
+ if (PTRS_PER_PUD > 1) /* not folded */
+ flush |= (*func)(virt_to_page(pud), 0);
+
+ for (; addr != pud_limit; pud++, addr = pud_next) {
+ pmd_t *pmd;
+ unsigned long pmd_limit;
+
+ pud_next = pud_addr_end(addr, pud_limit);
+
+ if (pud_next < limit)
+ pmd_limit = pud_next;
+ else
+ pmd_limit = limit;
+
+ if (pud_none(*pud))
+ continue;
+
+ pmd = pmd_offset(pud, 0);
+
+ if (PTRS_PER_PMD > 1) /* not folded */
+ flush |= (*func)(virt_to_page(pmd), 0);
+
+ for (; addr != pmd_limit; pmd++) {
+ addr += (PAGE_SIZE * PTRS_PER_PTE);
+ if ((pmd_limit-1) < (addr-1)) {
+ addr = pmd_limit;
+ break;
+ }
+
+ if (pmd_none(*pmd))
+ continue;
+
+ flush |= (*func)(pmd_page(*pmd), 0);
+ }
+ }
+ }
+
+ flush |= (*func)(virt_to_page(pgd_base), UVMF_TLB_FLUSH);
+
+ return flush;
+}
+
+static int pin_page(struct page *page, unsigned flags)
+{
+ unsigned pgfl = test_and_set_bit(PG_pinned, &page->flags);
+ int flush;
+
+ if (pgfl)
+ flush = 0; /* already pinned */
+ else if (PageHighMem(page))
+ /* kmaps need flushing if we found an unpinned
+ highpage */
+ flush = 1;
+ else {
+ void *pt = lowmem_page_address(page);
+ unsigned long pfn = page_to_pfn(page);
+ struct multicall_space mcs = __xen_mc_entry(0);
+
+ flush = 0;
+
+ MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
+ pfn_pte(pfn, PAGE_KERNEL_RO),
+ flags);
+ }
+
+ return flush;
+}
+
+/* This is called just after a mm has been created, but it has not
+ been used yet. We need to make sure that its pagetable is all
+ read-only, and can be pinned. */
+void xen_pgd_pin(pgd_t *pgd)
+{
+ struct multicall_space mcs;
+ struct mmuext_op *op;
+
+ xen_mc_batch();
+
+ if (pgd_walk(pgd, pin_page, TASK_SIZE)) {
+ /* re-enable interrupts for kmap_flush_unused */
+ xen_mc_issue(0);
+ kmap_flush_unused();
+ xen_mc_batch();
+ }
+
+ mcs = __xen_mc_entry(sizeof(*op));
+ op = mcs.args;
+
+#ifdef CONFIG_X86_PAE
+ op->cmd = MMUEXT_PIN_L3_TABLE;
+#else
+ op->cmd = MMUEXT_PIN_L2_TABLE;
+#endif
+ op->arg1.mfn = pfn_to_mfn(PFN_DOWN(__pa(pgd)));
+ MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+
+ xen_mc_issue(0);
+}
+
+/* The init_mm pagetable is really pinned as soon as its created, but
+ that's before we have page structures to store the bits. So do all
+ the book-keeping now. */
+static __init int mark_pinned(struct page *page, unsigned flags)
+{
+ SetPagePinned(page);
+ return 0;
+}
+
+void __init xen_mark_init_mm_pinned(void)
+{
+ pgd_walk(init_mm.pgd, mark_pinned, FIXADDR_TOP);
+}
+
+static int unpin_page(struct page *page, unsigned flags)
+{
+ unsigned pgfl = test_and_clear_bit(PG_pinned, &page->flags);
+
+ if (pgfl && !PageHighMem(page)) {
+ void *pt = lowmem_page_address(page);
+ unsigned long pfn = page_to_pfn(page);
+ struct multicall_space mcs = __xen_mc_entry(0);
+
+ MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
+ pfn_pte(pfn, PAGE_KERNEL),
+ flags);
+ }
+
+ return 0; /* never need to flush on unpin */
+}
+
+/* Release a pagetables pages back as normal RW */
+static void xen_pgd_unpin(pgd_t *pgd)
+{
+ struct mmuext_op *op;
+ struct multicall_space mcs;
+
+ xen_mc_batch();
+
+ mcs = __xen_mc_entry(sizeof(*op));
+
+ op = mcs.args;
+ op->cmd = MMUEXT_UNPIN_TABLE;
+ op->arg1.mfn = pfn_to_mfn(PFN_DOWN(__pa(pgd)));
+
+ MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+
+ pgd_walk(pgd, unpin_page, TASK_SIZE);
+
+ xen_mc_issue(0);
+}
+
+void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
+{
+ spin_lock(&next->page_table_lock);
+ xen_pgd_pin(next->pgd);
+ spin_unlock(&next->page_table_lock);
+}
+
+void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
+{
+ spin_lock(&mm->page_table_lock);
+ xen_pgd_pin(mm->pgd);
+ spin_unlock(&mm->page_table_lock);
+}
+
+
+#ifdef CONFIG_SMP
+/* Another cpu may still have their %cr3 pointing at the pagetable, so
+ we need to repoint it somewhere else before we can unpin it. */
+static void drop_other_mm_ref(void *info)
+{
+ struct mm_struct *mm = info;
+
+ if (__get_cpu_var(cpu_tlbstate).active_mm == mm)
+ leave_mm(smp_processor_id());
+}
+
+static void drop_mm_ref(struct mm_struct *mm)
+{
+ if (current->active_mm == mm) {
+ if (current->mm == mm)
+ load_cr3(swapper_pg_dir);
+ else
+ leave_mm(smp_processor_id());
+ }
+
+ if (!cpus_empty(mm->cpu_vm_mask))
+ xen_smp_call_function_mask(mm->cpu_vm_mask, drop_other_mm_ref,
+ mm, 1);
+}
+#else
+static void drop_mm_ref(struct mm_struct *mm)
+{
+ if (current->active_mm == mm)
+ load_cr3(swapper_pg_dir);
+}
+#endif
+
+/*
+ * While a process runs, Xen pins its pagetables, which means that the
+ * hypervisor forces it to be read-only, and it controls all updates
+ * to it. This means that all pagetable updates have to go via the
+ * hypervisor, which is moderately expensive.
+ *
+ * Since we're pulling the pagetable down, we switch to use init_mm,
+ * unpin old process pagetable and mark it all read-write, which
+ * allows further operations on it to be simple memory accesses.
+ *
+ * The only subtle point is that another CPU may be still using the
+ * pagetable because of lazy tlb flushing. This means we need need to
+ * switch all CPUs off this pagetable before we can unpin it.
+ */
+void xen_exit_mmap(struct mm_struct *mm)
+{
+ get_cpu(); /* make sure we don't move around */
+ drop_mm_ref(mm);
+ put_cpu();
+
+ spin_lock(&mm->page_table_lock);
+ xen_pgd_unpin(mm->pgd);
+ spin_unlock(&mm->page_table_lock);
+}
diff --git a/arch/i386/xen/mmu.h b/arch/i386/xen/mmu.h
new file mode 100644
index 00000000000..c9ff27f3ac3
--- /dev/null
+++ b/arch/i386/xen/mmu.h
@@ -0,0 +1,60 @@
+#ifndef _XEN_MMU_H
+
+#include <linux/linkage.h>
+#include <asm/page.h>
+
+/*
+ * Page-directory addresses above 4GB do not fit into architectural %cr3.
+ * When accessing %cr3, or equivalent field in vcpu_guest_context, guests
+ * must use the following accessor macros to pack/unpack valid MFNs.
+ *
+ * Note that Xen is using the fact that the pagetable base is always
+ * page-aligned, and putting the 12 MSB of the address into the 12 LSB
+ * of cr3.
+ */
+#define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20))
+#define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20))
+
+
+void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
+
+void xen_set_pte(pte_t *ptep, pte_t pteval);
+void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pteval);
+void xen_set_pmd(pmd_t *pmdp, pmd_t pmdval);
+
+void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next);
+void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm);
+void xen_exit_mmap(struct mm_struct *mm);
+
+void xen_pgd_pin(pgd_t *pgd);
+//void xen_pgd_unpin(pgd_t *pgd);
+
+#ifdef CONFIG_X86_PAE
+unsigned long long xen_pte_val(pte_t);
+unsigned long long xen_pmd_val(pmd_t);
+unsigned long long xen_pgd_val(pgd_t);
+
+pte_t xen_make_pte(unsigned long long);
+pmd_t xen_make_pmd(unsigned long long);
+pgd_t xen_make_pgd(unsigned long long);
+
+void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pteval);
+void xen_set_pte_atomic(pte_t *ptep, pte_t pte);
+void xen_set_pud(pud_t *ptr, pud_t val);
+void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
+void xen_pmd_clear(pmd_t *pmdp);
+
+
+#else
+unsigned long xen_pte_val(pte_t);
+unsigned long xen_pmd_val(pmd_t);
+unsigned long xen_pgd_val(pgd_t);
+
+pte_t xen_make_pte(unsigned long);
+pmd_t xen_make_pmd(unsigned long);
+pgd_t xen_make_pgd(unsigned long);
+#endif
+
+#endif /* _XEN_MMU_H */
diff --git a/arch/i386/xen/multicalls.c b/arch/i386/xen/multicalls.c
new file mode 100644
index 00000000000..c837e8e463d
--- /dev/null
+++ b/arch/i386/xen/multicalls.c
@@ -0,0 +1,90 @@
+/*
+ * Xen hypercall batching.
+ *
+ * Xen allows multiple hypercalls to be issued at once, using the
+ * multicall interface. This allows the cost of trapping into the
+ * hypervisor to be amortized over several calls.
+ *
+ * This file implements a simple interface for multicalls. There's a
+ * per-cpu buffer of outstanding multicalls. When you want to queue a
+ * multicall for issuing, you can allocate a multicall slot for the
+ * call and its arguments, along with storage for space which is
+ * pointed to by the arguments (for passing pointers to structures,
+ * etc). When the multicall is actually issued, all the space for the
+ * commands and allocated memory is freed for reuse.
+ *
+ * Multicalls are flushed whenever any of the buffers get full, or
+ * when explicitly requested. There's no way to get per-multicall
+ * return results back. It will BUG if any of the multicalls fail.
+ *
+ * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
+ */
+#include <linux/percpu.h>
+#include <linux/hardirq.h>
+
+#include <asm/xen/hypercall.h>
+
+#include "multicalls.h"
+
+#define MC_BATCH 32
+#define MC_ARGS (MC_BATCH * 16 / sizeof(u64))
+
+struct mc_buffer {
+ struct multicall_entry entries[MC_BATCH];
+ u64 args[MC_ARGS];
+ unsigned mcidx, argidx;
+};
+
+static DEFINE_PER_CPU(struct mc_buffer, mc_buffer);
+DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags);
+
+void xen_mc_flush(void)
+{
+ struct mc_buffer *b = &__get_cpu_var(mc_buffer);
+ int ret = 0;
+ unsigned long flags;
+
+ BUG_ON(preemptible());
+
+ /* Disable interrupts in case someone comes in and queues
+ something in the middle */
+ local_irq_save(flags);
+
+ if (b->mcidx) {
+ int i;
+
+ if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0)
+ BUG();
+ for (i = 0; i < b->mcidx; i++)
+ if (b->entries[i].result < 0)
+ ret++;
+ b->mcidx = 0;
+ b->argidx = 0;
+ } else
+ BUG_ON(b->argidx != 0);
+
+ local_irq_restore(flags);
+
+ BUG_ON(ret);
+}
+
+struct multicall_space __xen_mc_entry(size_t args)
+{
+ struct mc_buffer *b = &__get_cpu_var(mc_buffer);
+ struct multicall_space ret;
+ unsigned argspace = (args + sizeof(u64) - 1) / sizeof(u64);
+
+ BUG_ON(preemptible());
+ BUG_ON(argspace > MC_ARGS);
+
+ if (b->mcidx == MC_BATCH ||
+ (b->argidx + argspace) > MC_ARGS)
+ xen_mc_flush();
+
+ ret.mc = &b->entries[b->mcidx];
+ b->mcidx++;
+ ret.args = &b->args[b->argidx];
+ b->argidx += argspace;
+
+ return ret;
+}
diff --git a/arch/i386/xen/multicalls.h b/arch/i386/xen/multicalls.h
new file mode 100644
index 00000000000..e6f7530b156
--- /dev/null
+++ b/arch/i386/xen/multicalls.h
@@ -0,0 +1,45 @@
+#ifndef _XEN_MULTICALLS_H
+#define _XEN_MULTICALLS_H
+
+#include "xen-ops.h"
+
+/* Multicalls */
+struct multicall_space
+{
+ struct multicall_entry *mc;
+ void *args;
+};
+
+/* Allocate room for a multicall and its args */
+struct multicall_space __xen_mc_entry(size_t args);
+
+DECLARE_PER_CPU(unsigned long, xen_mc_irq_flags);
+
+/* Call to start a batch of multiple __xen_mc_entry()s. Must be
+ paired with xen_mc_issue() */
+static inline void xen_mc_batch(void)
+{
+ /* need to disable interrupts until this entry is complete */
+ local_irq_save(__get_cpu_var(xen_mc_irq_flags));
+}
+
+static inline struct multicall_space xen_mc_entry(size_t args)
+{
+ xen_mc_batch();
+ return __xen_mc_entry(args);
+}
+
+/* Flush all pending multicalls */
+void xen_mc_flush(void);
+
+/* Issue a multicall if we're not in a lazy mode */
+static inline void xen_mc_issue(unsigned mode)
+{
+ if ((xen_get_lazy_mode() & mode) == 0)
+ xen_mc_flush();
+
+ /* restore flags saved in xen_mc_batch */
+ local_irq_restore(x86_read_percpu(xen_mc_irq_flags));
+}
+
+#endif /* _XEN_MULTICALLS_H */
diff --git a/arch/i386/xen/setup.c b/arch/i386/xen/setup.c
new file mode 100644
index 00000000000..2fe6eac510f
--- /dev/null
+++ b/arch/i386/xen/setup.c
@@ -0,0 +1,96 @@
+/*
+ * Machine specific setup for xen
+ *
+ * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/pm.h>
+
+#include <asm/elf.h>
+#include <asm/e820.h>
+#include <asm/setup.h>
+#include <asm/xen/hypervisor.h>
+#include <asm/xen/hypercall.h>
+
+#include <xen/interface/physdev.h>
+#include <xen/features.h>
+
+#include "xen-ops.h"
+
+/* These are code, but not functions. Defined in entry.S */
+extern const char xen_hypervisor_callback[];
+extern const char xen_failsafe_callback[];
+
+unsigned long *phys_to_machine_mapping;
+EXPORT_SYMBOL(phys_to_machine_mapping);
+
+/**
+ * machine_specific_memory_setup - Hook for machine specific memory setup.
+ **/
+
+char * __init xen_memory_setup(void)
+{
+ unsigned long max_pfn = xen_start_info->nr_pages;
+
+ e820.nr_map = 0;
+ add_memory_region(0, PFN_PHYS(max_pfn), E820_RAM);
+
+ return "Xen";
+}
+
+static void xen_idle(void)
+{
+ local_irq_disable();
+
+ if (need_resched())
+ local_irq_enable();
+ else {
+ current_thread_info()->status &= ~TS_POLLING;
+ smp_mb__after_clear_bit();
+ safe_halt();
+ current_thread_info()->status |= TS_POLLING;
+ }
+}
+
+void __init xen_arch_setup(void)
+{
+ struct physdev_set_iopl set_iopl;
+ int rc;
+
+ HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
+ HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
+
+ if (!xen_feature(XENFEAT_auto_translated_physmap))
+ HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_pae_extended_cr3);
+
+ HYPERVISOR_set_callbacks(__KERNEL_CS, (unsigned long)xen_hypervisor_callback,
+ __KERNEL_CS, (unsigned long)xen_failsafe_callback);
+
+ set_iopl.iopl = 1;
+ rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
+ if (rc != 0)
+ printk(KERN_INFO "physdev_op failed %d\n", rc);
+
+#ifdef CONFIG_ACPI
+ if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
+ printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
+ disable_acpi();
+ }
+#endif
+
+ memcpy(boot_command_line, xen_start_info->cmd_line,
+ MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
+ COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
+
+ pm_idle = xen_idle;
+
+#ifdef CONFIG_SMP
+ /* fill cpus_possible with all available cpus */
+ xen_fill_possible_map();
+#endif
+
+ paravirt_disable_iospace();
+}
diff --git a/arch/i386/xen/smp.c b/arch/i386/xen/smp.c
new file mode 100644
index 00000000000..557b8e24706
--- /dev/null
+++ b/arch/i386/xen/smp.c
@@ -0,0 +1,404 @@
+/*
+ * Xen SMP support
+ *
+ * This file implements the Xen versions of smp_ops. SMP under Xen is
+ * very straightforward. Bringing a CPU up is simply a matter of
+ * loading its initial context and setting it running.
+ *
+ * IPIs are handled through the Xen event mechanism.
+ *
+ * Because virtual CPUs can be scheduled onto any real CPU, there's no
+ * useful topology information for the kernel to make use of. As a
+ * result, all CPUs are treated as if they're single-core and
+ * single-threaded.
+ *
+ * This does not handle HOTPLUG_CPU yet.
+ */
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/smp.h>
+
+#include <asm/paravirt.h>
+#include <asm/desc.h>
+#include <asm/pgtable.h>
+#include <asm/cpu.h>
+
+#include <xen/interface/xen.h>
+#include <xen/interface/vcpu.h>
+
+#include <asm/xen/interface.h>
+#include <asm/xen/hypercall.h>
+
+#include <xen/page.h>
+#include <xen/events.h>
+
+#include "xen-ops.h"
+#include "mmu.h"
+
+static cpumask_t cpu_initialized_map;
+static DEFINE_PER_CPU(int, resched_irq);
+static DEFINE_PER_CPU(int, callfunc_irq);
+
+/*
+ * Structure and data for smp_call_function(). This is designed to minimise
+ * static memory requirements. It also looks cleaner.
+ */
+static DEFINE_SPINLOCK(call_lock);
+
+struct call_data_struct {
+ void (*func) (void *info);
+ void *info;
+ atomic_t started;
+ atomic_t finished;
+ int wait;
+};
+
+static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
+
+static struct call_data_struct *call_data;
+
+/*
+ * Reschedule call back. Nothing to do,
+ * all the work is done automatically when
+ * we return from the interrupt.
+ */
+static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
+{
+ return IRQ_HANDLED;
+}
+
+static __cpuinit void cpu_bringup_and_idle(void)
+{
+ int cpu = smp_processor_id();
+
+ cpu_init();
+
+ preempt_disable();
+ per_cpu(cpu_state, cpu) = CPU_ONLINE;
+
+ xen_setup_cpu_clockevents();
+
+ /* We can take interrupts now: we're officially "up". */
+ local_irq_enable();
+
+ wmb(); /* make sure everything is out */
+ cpu_idle();
+}
+
+static int xen_smp_intr_init(unsigned int cpu)
+{
+ int rc;
+ const char *resched_name, *callfunc_name;
+
+ per_cpu(resched_irq, cpu) = per_cpu(callfunc_irq, cpu) = -1;
+
+ resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
+ rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
+ cpu,
+ xen_reschedule_interrupt,
+ IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
+ resched_name,
+ NULL);
+ if (rc < 0)
+ goto fail;
+ per_cpu(resched_irq, cpu) = rc;
+
+ callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
+ rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
+ cpu,
+ xen_call_function_interrupt,
+ IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
+ callfunc_name,
+ NULL);
+ if (rc < 0)
+ goto fail;
+ per_cpu(callfunc_irq, cpu) = rc;
+
+ return 0;
+
+ fail:
+ if (per_cpu(resched_irq, cpu) >= 0)
+ unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
+ if (per_cpu(callfunc_irq, cpu) >= 0)
+ unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
+ return rc;
+}
+
+void __init xen_fill_possible_map(void)
+{
+ int i, rc;
+
+ for (i = 0; i < NR_CPUS; i++) {
+ rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
+ if (rc >= 0)
+ cpu_set(i, cpu_possible_map);
+ }
+}
+
+void __init xen_smp_prepare_boot_cpu(void)
+{
+ int cpu;
+
+ BUG_ON(smp_processor_id() != 0);
+ native_smp_prepare_boot_cpu();
+
+ /* We've switched to the "real" per-cpu gdt, so make sure the
+ old memory can be recycled */
+ make_lowmem_page_readwrite(&per_cpu__gdt_page);
+
+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
+ cpus_clear(cpu_sibling_map[cpu]);
+ cpus_clear(cpu_core_map[cpu]);
+ }
+
+ xen_setup_vcpu_info_placement();
+}
+
+void __init xen_smp_prepare_cpus(unsigned int max_cpus)
+{
+ unsigned cpu;
+
+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
+ cpus_clear(cpu_sibling_map[cpu]);
+ cpus_clear(cpu_core_map[cpu]);
+ }
+
+ smp_store_cpu_info(0);
+ set_cpu_sibling_map(0);
+
+ if (xen_smp_intr_init(0))
+ BUG();
+
+ cpu_initialized_map = cpumask_of_cpu(0);
+
+ /* Restrict the possible_map according to max_cpus. */
+ while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
+ for (cpu = NR_CPUS-1; !cpu_isset(cpu, cpu_possible_map); cpu--)
+ continue;
+ cpu_clear(cpu, cpu_possible_map);
+ }
+
+ for_each_possible_cpu (cpu) {
+ struct task_struct *idle;
+
+ if (cpu == 0)
+ continue;
+
+ idle = fork_idle(cpu);
+ if (IS_ERR(idle))
+ panic("failed fork for CPU %d", cpu);
+
+ cpu_set(cpu, cpu_present_map);
+ }
+
+ //init_xenbus_allowed_cpumask();
+}
+
+static __cpuinit int
+cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
+{
+ struct vcpu_guest_context *ctxt;
+ struct gdt_page *gdt = &per_cpu(gdt_page, cpu);
+
+ if (cpu_test_and_set(cpu, cpu_initialized_map))
+ return 0;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (ctxt == NULL)
+ return -ENOMEM;
+
+ ctxt->flags = VGCF_IN_KERNEL;
+ ctxt->user_regs.ds = __USER_DS;
+ ctxt->user_regs.es = __USER_DS;
+ ctxt->user_regs.fs = __KERNEL_PERCPU;
+ ctxt->user_regs.gs = 0;
+ ctxt->user_regs.ss = __KERNEL_DS;
+ ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
+ ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
+
+ memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
+
+ xen_copy_trap_info(ctxt->trap_ctxt);
+
+ ctxt->ldt_ents = 0;
+
+ BUG_ON((unsigned long)gdt->gdt & ~PAGE_MASK);
+ make_lowmem_page_readonly(gdt->gdt);
+
+ ctxt->gdt_frames[0] = virt_to_mfn(gdt->gdt);
+ ctxt->gdt_ents = ARRAY_SIZE(gdt->gdt);
+
+ ctxt->user_regs.cs = __KERNEL_CS;
+ ctxt->user_regs.esp = idle->thread.esp0 - sizeof(struct pt_regs);
+
+ ctxt->kernel_ss = __KERNEL_DS;
+ ctxt->kernel_sp = idle->thread.esp0;
+
+ ctxt->event_callback_cs = __KERNEL_CS;
+ ctxt->event_callback_eip = (unsigned long)xen_hypervisor_callback;
+ ctxt->failsafe_callback_cs = __KERNEL_CS;
+ ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback;
+
+ per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
+ ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
+
+ if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
+ BUG();
+
+ kfree(ctxt);
+ return 0;
+}
+
+int __cpuinit xen_cpu_up(unsigned int cpu)
+{
+ struct task_struct *idle = idle_task(cpu);
+ int rc;
+
+#if 0
+ rc = cpu_up_check(cpu);
+ if (rc)
+ return rc;
+#endif
+
+ init_gdt(cpu);
+ per_cpu(current_task, cpu) = idle;
+ irq_ctx_init(cpu);
+ xen_setup_timer(cpu);
+
+ /* make sure interrupts start blocked */
+ per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
+
+ rc = cpu_initialize_context(cpu, idle);
+ if (rc)
+ return rc;
+
+ if (num_online_cpus() == 1)
+ alternatives_smp_switch(1);
+
+ rc = xen_smp_intr_init(cpu);
+ if (rc)
+ return rc;
+
+ smp_store_cpu_info(cpu);
+ set_cpu_sibling_map(cpu);
+ /* This must be done before setting cpu_online_map */
+ wmb();
+
+ cpu_set(cpu, cpu_online_map);
+
+ rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
+ BUG_ON(rc);
+
+ return 0;
+}
+
+void xen_smp_cpus_done(unsigned int max_cpus)
+{
+}
+
+static void stop_self(void *v)
+{
+ int cpu = smp_processor_id();
+
+ /* make sure we're not pinning something down */
+ load_cr3(swapper_pg_dir);
+ /* should set up a minimal gdt */
+
+ HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL);
+ BUG();
+}
+
+void xen_smp_send_stop(void)
+{
+ smp_call_function(stop_self, NULL, 0, 0);
+}
+
+void xen_smp_send_reschedule(int cpu)
+{
+ xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
+}
+
+
+static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector)
+{
+ unsigned cpu;
+
+ cpus_and(mask, mask, cpu_online_map);
+
+ for_each_cpu_mask(cpu, mask)
+ xen_send_IPI_one(cpu, vector);
+}
+
+static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
+{
+ void (*func) (void *info) = call_data->func;
+ void *info = call_data->info;
+ int wait = call_data->wait;
+
+ /*
+ * Notify initiating CPU that I've grabbed the data and am
+ * about to execute the function
+ */
+ mb();
+ atomic_inc(&call_data->started);
+ /*
+ * At this point the info structure may be out of scope unless wait==1
+ */
+ irq_enter();
+ (*func)(info);
+ irq_exit();
+
+ if (wait) {
+ mb(); /* commit everything before setting finished */
+ atomic_inc(&call_data->finished);
+ }
+
+ return IRQ_HANDLED;
+}
+
+int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *),
+ void *info, int wait)
+{
+ struct call_data_struct data;
+ int cpus;
+
+ /* Holding any lock stops cpus from going down. */
+ spin_lock(&call_lock);
+
+ cpu_clear(smp_processor_id(), mask);
+
+ cpus = cpus_weight(mask);
+ if (!cpus) {
+ spin_unlock(&call_lock);
+ return 0;
+ }
+
+ /* Can deadlock when called with interrupts disabled */
+ WARN_ON(irqs_disabled());
+
+ data.func = func;
+ data.info = info;
+ atomic_set(&data.started, 0);
+ data.wait = wait;
+ if (wait)
+ atomic_set(&data.finished, 0);
+
+ call_data = &data;
+ mb(); /* write everything before IPI */
+
+ /* Send a message to other CPUs and wait for them to respond */
+ xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
+
+ /* Make sure other vcpus get a chance to run.
+ XXX too severe? Maybe we should check the other CPU's states? */
+ HYPERVISOR_sched_op(SCHEDOP_yield, 0);
+
+ /* Wait for response */
+ while (atomic_read(&data.started) != cpus ||
+ (wait && atomic_read(&data.finished) != cpus))
+ cpu_relax();
+
+ spin_unlock(&call_lock);
+
+ return 0;
+}
diff --git a/arch/i386/xen/time.c b/arch/i386/xen/time.c
new file mode 100644
index 00000000000..51fdabf1fd4
--- /dev/null
+++ b/arch/i386/xen/time.c
@@ -0,0 +1,590 @@
+/*
+ * Xen time implementation.
+ *
+ * This is implemented in terms of a clocksource driver which uses
+ * the hypervisor clock as a nanosecond timebase, and a clockevent
+ * driver which uses the hypervisor's timer mechanism.
+ *
+ * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
+ */
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/kernel_stat.h>
+
+#include <asm/xen/hypervisor.h>
+#include <asm/xen/hypercall.h>
+
+#include <xen/events.h>
+#include <xen/interface/xen.h>
+#include <xen/interface/vcpu.h>
+
+#include "xen-ops.h"
+
+#define XEN_SHIFT 22
+
+/* Xen may fire a timer up to this many ns early */
+#define TIMER_SLOP 100000
+#define NS_PER_TICK (1000000000LL / HZ)
+
+static cycle_t xen_clocksource_read(void);
+
+/* These are perodically updated in shared_info, and then copied here. */
+struct shadow_time_info {
+ u64 tsc_timestamp; /* TSC at last update of time vals. */
+ u64 system_timestamp; /* Time, in nanosecs, since boot. */
+ u32 tsc_to_nsec_mul;
+ int tsc_shift;
+ u32 version;
+};
+
+static DEFINE_PER_CPU(struct shadow_time_info, shadow_time);
+
+/* runstate info updated by Xen */
+static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
+
+/* snapshots of runstate info */
+static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate_snapshot);
+
+/* unused ns of stolen and blocked time */
+static DEFINE_PER_CPU(u64, residual_stolen);
+static DEFINE_PER_CPU(u64, residual_blocked);
+
+/* return an consistent snapshot of 64-bit time/counter value */
+static u64 get64(const u64 *p)
+{
+ u64 ret;
+
+ if (BITS_PER_LONG < 64) {
+ u32 *p32 = (u32 *)p;
+ u32 h, l;
+
+ /*
+ * Read high then low, and then make sure high is
+ * still the same; this will only loop if low wraps
+ * and carries into high.
+ * XXX some clean way to make this endian-proof?
+ */
+ do {
+ h = p32[1];
+ barrier();
+ l = p32[0];
+ barrier();
+ } while (p32[1] != h);
+
+ ret = (((u64)h) << 32) | l;
+ } else
+ ret = *p;
+
+ return ret;
+}
+
+/*
+ * Runstate accounting
+ */
+static void get_runstate_snapshot(struct vcpu_runstate_info *res)
+{
+ u64 state_time;
+ struct vcpu_runstate_info *state;
+
+ BUG_ON(preemptible());
+
+ state = &__get_cpu_var(runstate);
+
+ /*
+ * The runstate info is always updated by the hypervisor on
+ * the current CPU, so there's no need to use anything
+ * stronger than a compiler barrier when fetching it.
+ */
+ do {
+ state_time = get64(&state->state_entry_time);
+ barrier();
+ *res = *state;
+ barrier();
+ } while (get64(&state->state_entry_time) != state_time);
+}
+
+static void setup_runstate_info(int cpu)
+{
+ struct vcpu_register_runstate_memory_area area;
+
+ area.addr.v = &per_cpu(runstate, cpu);
+
+ if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area,
+ cpu, &area))
+ BUG();
+}
+
+static void do_stolen_accounting(void)
+{
+ struct vcpu_runstate_info state;
+ struct vcpu_runstate_info *snap;
+ s64 blocked, runnable, offline, stolen;
+ cputime_t ticks;
+
+ get_runstate_snapshot(&state);
+
+ WARN_ON(state.state != RUNSTATE_running);
+
+ snap = &__get_cpu_var(runstate_snapshot);
+
+ /* work out how much time the VCPU has not been runn*ing* */
+ blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked];
+ runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable];
+ offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline];
+
+ *snap = state;
+
+ /* Add the appropriate number of ticks of stolen time,
+ including any left-overs from last time. Passing NULL to
+ account_steal_time accounts the time as stolen. */
+ stolen = runnable + offline + __get_cpu_var(residual_stolen);
+
+ if (stolen < 0)
+ stolen = 0;
+
+ ticks = 0;
+ while (stolen >= NS_PER_TICK) {
+ ticks++;
+ stolen -= NS_PER_TICK;
+ }
+ __get_cpu_var(residual_stolen) = stolen;
+ account_steal_time(NULL, ticks);
+
+ /* Add the appropriate number of ticks of blocked time,
+ including any left-overs from last time. Passing idle to
+ account_steal_time accounts the time as idle/wait. */
+ blocked += __get_cpu_var(residual_blocked);
+
+ if (blocked < 0)
+ blocked = 0;
+
+ ticks = 0;
+ while (blocked >= NS_PER_TICK) {
+ ticks++;
+ blocked -= NS_PER_TICK;
+ }
+ __get_cpu_var(residual_blocked) = blocked;
+ account_steal_time(idle_task(smp_processor_id()), ticks);
+}
+
+/*
+ * Xen sched_clock implementation. Returns the number of unstolen
+ * nanoseconds, which is nanoseconds the VCPU spent in RUNNING+BLOCKED
+ * states.
+ */
+unsigned long long xen_sched_clock(void)
+{
+ struct vcpu_runstate_info state;
+ cycle_t now;
+ u64 ret;
+ s64 offset;
+
+ /*
+ * Ideally sched_clock should be called on a per-cpu basis
+ * anyway, so preempt should already be disabled, but that's
+ * not current practice at the moment.
+ */
+ preempt_disable();
+
+ now = xen_clocksource_read();
+
+ get_runstate_snapshot(&state);
+
+ WARN_ON(state.state != RUNSTATE_running);
+
+ offset = now - state.state_entry_time;
+ if (offset < 0)
+ offset = 0;
+
+ ret = state.time[RUNSTATE_blocked] +
+ state.time[RUNSTATE_running] +
+ offset;
+
+ preempt_enable();
+
+ return ret;
+}
+
+
+/* Get the CPU speed from Xen */
+unsigned long xen_cpu_khz(void)
+{
+ u64 cpu_khz = 1000000ULL << 32;
+ const struct vcpu_time_info *info =
+ &HYPERVISOR_shared_info->vcpu_info[0].time;
+
+ do_div(cpu_khz, info->tsc_to_system_mul);
+ if (info->tsc_shift < 0)
+ cpu_khz <<= -info->tsc_shift;
+ else
+ cpu_khz >>= info->tsc_shift;
+
+ return cpu_khz;
+}
+
+/*
+ * Reads a consistent set of time-base values from Xen, into a shadow data
+ * area.
+ */
+static unsigned get_time_values_from_xen(void)
+{
+ struct vcpu_time_info *src;
+ struct shadow_time_info *dst;
+
+ /* src is shared memory with the hypervisor, so we need to
+ make sure we get a consistent snapshot, even in the face of
+ being preempted. */
+ src = &__get_cpu_var(xen_vcpu)->time;
+ dst = &__get_cpu_var(shadow_time);
+
+ do {
+ dst->version = src->version;
+ rmb(); /* fetch version before data */
+ dst->tsc_timestamp = src->tsc_timestamp;
+ dst->system_timestamp = src->system_time;
+ dst->tsc_to_nsec_mul = src->tsc_to_system_mul;
+ dst->tsc_shift = src->tsc_shift;
+ rmb(); /* test version after fetching data */
+ } while ((src->version & 1) | (dst->version ^ src->version));
+
+ return dst->version;
+}
+
+/*
+ * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
+ * yielding a 64-bit result.
+ */
+static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift)
+{
+ u64 product;
+#ifdef __i386__
+ u32 tmp1, tmp2;
+#endif
+
+ if (shift < 0)
+ delta >>= -shift;
+ else
+ delta <<= shift;
+
+#ifdef __i386__
+ __asm__ (
+ "mul %5 ; "
+ "mov %4,%%eax ; "
+ "mov %%edx,%4 ; "
+ "mul %5 ; "
+ "xor %5,%5 ; "
+ "add %4,%%eax ; "
+ "adc %5,%%edx ; "
+ : "=A" (product), "=r" (tmp1), "=r" (tmp2)
+ : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
+#elif __x86_64__
+ __asm__ (
+ "mul %%rdx ; shrd $32,%%rdx,%%rax"
+ : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
+#else
+#error implement me!
+#endif
+
+ return product;
+}
+
+static u64 get_nsec_offset(struct shadow_time_info *shadow)
+{
+ u64 now, delta;
+ now = native_read_tsc();
+ delta = now - shadow->tsc_timestamp;
+ return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift);
+}
+
+static cycle_t xen_clocksource_read(void)
+{
+ struct shadow_time_info *shadow = &get_cpu_var(shadow_time);
+ cycle_t ret;
+ unsigned version;
+
+ do {
+ version = get_time_values_from_xen();
+ barrier();
+ ret = shadow->system_timestamp + get_nsec_offset(shadow);
+ barrier();
+ } while (version != __get_cpu_var(xen_vcpu)->time.version);
+
+ put_cpu_var(shadow_time);
+
+ return ret;
+}
+
+static void xen_read_wallclock(struct timespec *ts)
+{
+ const struct shared_info *s = HYPERVISOR_shared_info;
+ u32 version;
+ u64 delta;
+ struct timespec now;
+
+ /* get wallclock at system boot */
+ do {
+ version = s->wc_version;
+ rmb(); /* fetch version before time */
+ now.tv_sec = s->wc_sec;
+ now.tv_nsec = s->wc_nsec;
+ rmb(); /* fetch time before checking version */
+ } while ((s->wc_version & 1) | (version ^ s->wc_version));
+
+ delta = xen_clocksource_read(); /* time since system boot */
+ delta += now.tv_sec * (u64)NSEC_PER_SEC + now.tv_nsec;
+
+ now.tv_nsec = do_div(delta, NSEC_PER_SEC);
+ now.tv_sec = delta;
+
+ set_normalized_timespec(ts, now.tv_sec, now.tv_nsec);
+}
+
+unsigned long xen_get_wallclock(void)
+{
+ struct timespec ts;
+
+ xen_read_wallclock(&ts);
+
+ return ts.tv_sec;
+}
+
+int xen_set_wallclock(unsigned long now)
+{
+ /* do nothing for domU */
+ return -1;
+}
+
+static struct clocksource xen_clocksource __read_mostly = {
+ .name = "xen",
+ .rating = 400,
+ .read = xen_clocksource_read,
+ .mask = ~0,
+ .mult = 1<<XEN_SHIFT, /* time directly in nanoseconds */
+ .shift = XEN_SHIFT,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+/*
+ Xen clockevent implementation
+
+ Xen has two clockevent implementations:
+
+ The old timer_op one works with all released versions of Xen prior
+ to version 3.0.4. This version of the hypervisor provides a
+ single-shot timer with nanosecond resolution. However, sharing the
+ same event channel is a 100Hz tick which is delivered while the
+ vcpu is running. We don't care about or use this tick, but it will
+ cause the core time code to think the timer fired too soon, and
+ will end up resetting it each time. It could be filtered, but
+ doing so has complications when the ktime clocksource is not yet
+ the xen clocksource (ie, at boot time).
+
+ The new vcpu_op-based timer interface allows the tick timer period
+ to be changed or turned off. The tick timer is not useful as a
+ periodic timer because events are only delivered to running vcpus.
+ The one-shot timer can report when a timeout is in the past, so
+ set_next_event is capable of returning -ETIME when appropriate.
+ This interface is used when available.
+*/
+
+
+/*
+ Get a hypervisor absolute time. In theory we could maintain an
+ offset between the kernel's time and the hypervisor's time, and
+ apply that to a kernel's absolute timeout. Unfortunately the
+ hypervisor and kernel times can drift even if the kernel is using
+ the Xen clocksource, because ntp can warp the kernel's clocksource.
+*/
+static s64 get_abs_timeout(unsigned long delta)
+{
+ return xen_clocksource_read() + delta;
+}
+
+static void xen_timerop_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ /* unsupported */
+ WARN_ON(1);
+ break;
+
+ case CLOCK_EVT_MODE_ONESHOT:
+ break;
+
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ HYPERVISOR_set_timer_op(0); /* cancel timeout */
+ break;
+ }
+}
+
+static int xen_timerop_set_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT);
+
+ if (HYPERVISOR_set_timer_op(get_abs_timeout(delta)) < 0)
+ BUG();
+
+ /* We may have missed the deadline, but there's no real way of
+ knowing for sure. If the event was in the past, then we'll
+ get an immediate interrupt. */
+
+ return 0;
+}
+
+static const struct clock_event_device xen_timerop_clockevent = {
+ .name = "xen",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+
+ .max_delta_ns = 0xffffffff,
+ .min_delta_ns = TIMER_SLOP,
+
+ .mult = 1,
+ .shift = 0,
+ .rating = 500,
+
+ .set_mode = xen_timerop_set_mode,
+ .set_next_event = xen_timerop_set_next_event,
+};
+
+
+
+static void xen_vcpuop_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ int cpu = smp_processor_id();
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ WARN_ON(1); /* unsupported */
+ break;
+
+ case CLOCK_EVT_MODE_ONESHOT:
+ if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
+ BUG();
+ break;
+
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ if (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, cpu, NULL) ||
+ HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
+ BUG();
+ break;
+ }
+}
+
+static int xen_vcpuop_set_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ int cpu = smp_processor_id();
+ struct vcpu_set_singleshot_timer single;
+ int ret;
+
+ WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT);
+
+ single.timeout_abs_ns = get_abs_timeout(delta);
+ single.flags = VCPU_SSHOTTMR_future;
+
+ ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &single);
+
+ BUG_ON(ret != 0 && ret != -ETIME);
+
+ return ret;
+}
+
+static const struct clock_event_device xen_vcpuop_clockevent = {
+ .name = "xen",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+
+ .max_delta_ns = 0xffffffff,
+ .min_delta_ns = TIMER_SLOP,
+
+ .mult = 1,
+ .shift = 0,
+ .rating = 500,
+
+ .set_mode = xen_vcpuop_set_mode,
+ .set_next_event = xen_vcpuop_set_next_event,
+};
+
+static const struct clock_event_device *xen_clockevent =
+ &xen_timerop_clockevent;
+static DEFINE_PER_CPU(struct clock_event_device, xen_clock_events);
+
+static irqreturn_t xen_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = &__get_cpu_var(xen_clock_events);
+ irqreturn_t ret;
+
+ ret = IRQ_NONE;
+ if (evt->event_handler) {
+ evt->event_handler(evt);
+ ret = IRQ_HANDLED;
+ }
+
+ do_stolen_accounting();
+
+ return ret;
+}
+
+void xen_setup_timer(int cpu)
+{
+ const char *name;
+ struct clock_event_device *evt;
+ int irq;
+
+ printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);
+
+ name = kasprintf(GFP_KERNEL, "timer%d", cpu);
+ if (!name)
+ name = "<timer kasprintf failed>";
+
+ irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
+ IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
+ name, NULL);
+
+ evt = &per_cpu(xen_clock_events, cpu);
+ memcpy(evt, xen_clockevent, sizeof(*evt));
+
+ evt->cpumask = cpumask_of_cpu(cpu);
+ evt->irq = irq;
+
+ setup_runstate_info(cpu);
+}
+
+void xen_setup_cpu_clockevents(void)
+{
+ BUG_ON(preemptible());
+
+ clockevents_register_device(&__get_cpu_var(xen_clock_events));
+}
+
+__init void xen_time_init(void)
+{
+ int cpu = smp_processor_id();
+
+ get_time_values_from_xen();
+
+ clocksource_register(&xen_clocksource);
+
+ if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL) == 0) {
+ /* Successfully turned off 100Hz tick, so we have the
+ vcpuop-based timer interface */
+ printk(KERN_DEBUG "Xen: using vcpuop timer interface\n");
+ xen_clockevent = &xen_vcpuop_clockevent;
+ }
+
+ /* Set initial system time with full resolution */
+ xen_read_wallclock(&xtime);
+ set_normalized_timespec(&wall_to_monotonic,
+ -xtime.tv_sec, -xtime.tv_nsec);
+
+ tsc_disable = 0;
+
+ xen_setup_timer(cpu);
+ xen_setup_cpu_clockevents();
+}
diff --git a/arch/i386/xen/xen-asm.S b/arch/i386/xen/xen-asm.S
new file mode 100644
index 00000000000..1a43b60c0c6
--- /dev/null
+++ b/arch/i386/xen/xen-asm.S
@@ -0,0 +1,291 @@
+/*
+ Asm versions of Xen pv-ops, suitable for either direct use or inlining.
+ The inline versions are the same as the direct-use versions, with the
+ pre- and post-amble chopped off.
+
+ This code is encoded for size rather than absolute efficiency,
+ with a view to being able to inline as much as possible.
+
+ We only bother with direct forms (ie, vcpu in pda) of the operations
+ here; the indirect forms are better handled in C, since they're
+ generally too large to inline anyway.
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
+#include <asm/percpu.h>
+#include <asm/processor-flags.h>
+#include <asm/segment.h>
+
+#include <xen/interface/xen.h>
+
+#define RELOC(x, v) .globl x##_reloc; x##_reloc=v
+#define ENDPATCH(x) .globl x##_end; x##_end=.
+
+/* Pseudo-flag used for virtual NMI, which we don't implement yet */
+#define XEN_EFLAGS_NMI 0x80000000
+
+/*
+ Enable events. This clears the event mask and tests the pending
+ event status with one and operation. If there are pending
+ events, then enter the hypervisor to get them handled.
+ */
+ENTRY(xen_irq_enable_direct)
+ /* Clear mask and test pending */
+ andw $0x00ff, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_pending
+ /* Preempt here doesn't matter because that will deal with
+ any pending interrupts. The pending check may end up being
+ run on the wrong CPU, but that doesn't hurt. */
+ jz 1f
+2: call check_events
+1:
+ENDPATCH(xen_irq_enable_direct)
+ ret
+ ENDPROC(xen_irq_enable_direct)
+ RELOC(xen_irq_enable_direct, 2b+1)
+
+
+/*
+ Disabling events is simply a matter of making the event mask
+ non-zero.
+ */
+ENTRY(xen_irq_disable_direct)
+ movb $1, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
+ENDPATCH(xen_irq_disable_direct)
+ ret
+ ENDPROC(xen_irq_disable_direct)
+ RELOC(xen_irq_disable_direct, 0)
+
+/*
+ (xen_)save_fl is used to get the current interrupt enable status.
+ Callers expect the status to be in X86_EFLAGS_IF, and other bits
+ may be set in the return value. We take advantage of this by
+ making sure that X86_EFLAGS_IF has the right value (and other bits
+ in that byte are 0), but other bits in the return value are
+ undefined. We need to toggle the state of the bit, because
+ Xen and x86 use opposite senses (mask vs enable).
+ */
+ENTRY(xen_save_fl_direct)
+ testb $0xff, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
+ setz %ah
+ addb %ah,%ah
+ENDPATCH(xen_save_fl_direct)
+ ret
+ ENDPROC(xen_save_fl_direct)
+ RELOC(xen_save_fl_direct, 0)
+
+
+/*
+ In principle the caller should be passing us a value return
+ from xen_save_fl_direct, but for robustness sake we test only
+ the X86_EFLAGS_IF flag rather than the whole byte. After
+ setting the interrupt mask state, it checks for unmasked
+ pending events and enters the hypervisor to get them delivered
+ if so.
+ */
+ENTRY(xen_restore_fl_direct)
+ testb $X86_EFLAGS_IF>>8, %ah
+ setz PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
+ /* Preempt here doesn't matter because that will deal with
+ any pending interrupts. The pending check may end up being
+ run on the wrong CPU, but that doesn't hurt. */
+
+ /* check for unmasked and pending */
+ cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_pending
+ jz 1f
+2: call check_events
+1:
+ENDPATCH(xen_restore_fl_direct)
+ ret
+ ENDPROC(xen_restore_fl_direct)
+ RELOC(xen_restore_fl_direct, 2b+1)
+
+/*
+ This is run where a normal iret would be run, with the same stack setup:
+ 8: eflags
+ 4: cs
+ esp-> 0: eip
+
+ This attempts to make sure that any pending events are dealt
+ with on return to usermode, but there is a small window in
+ which an event can happen just before entering usermode. If
+ the nested interrupt ends up setting one of the TIF_WORK_MASK
+ pending work flags, they will not be tested again before
+ returning to usermode. This means that a process can end up
+ with pending work, which will be unprocessed until the process
+ enters and leaves the kernel again, which could be an
+ unbounded amount of time. This means that a pending signal or
+ reschedule event could be indefinitely delayed.
+
+ The fix is to notice a nested interrupt in the critical
+ window, and if one occurs, then fold the nested interrupt into
+ the current interrupt stack frame, and re-process it
+ iteratively rather than recursively. This means that it will
+ exit via the normal path, and all pending work will be dealt
+ with appropriately.
+
+ Because the nested interrupt handler needs to deal with the
+ current stack state in whatever form its in, we keep things
+ simple by only using a single register which is pushed/popped
+ on the stack.
+
+ Non-direct iret could be done in the same way, but it would
+ require an annoying amount of code duplication. We'll assume
+ that direct mode will be the common case once the hypervisor
+ support becomes commonplace.
+ */
+ENTRY(xen_iret_direct)
+ /* test eflags for special cases */
+ testl $(X86_EFLAGS_VM | XEN_EFLAGS_NMI), 8(%esp)
+ jnz hyper_iret
+
+ push %eax
+ ESP_OFFSET=4 # bytes pushed onto stack
+
+ /* Store vcpu_info pointer for easy access. Do it this
+ way to avoid having to reload %fs */
+#ifdef CONFIG_SMP
+ GET_THREAD_INFO(%eax)
+ movl TI_cpu(%eax),%eax
+ movl __per_cpu_offset(,%eax,4),%eax
+ lea per_cpu__xen_vcpu_info(%eax),%eax
+#else
+ movl $per_cpu__xen_vcpu_info, %eax
+#endif
+
+ /* check IF state we're restoring */
+ testb $X86_EFLAGS_IF>>8, 8+1+ESP_OFFSET(%esp)
+
+ /* Maybe enable events. Once this happens we could get a
+ recursive event, so the critical region starts immediately
+ afterwards. However, if that happens we don't end up
+ resuming the code, so we don't have to be worried about
+ being preempted to another CPU. */
+ setz XEN_vcpu_info_mask(%eax)
+xen_iret_start_crit:
+
+ /* check for unmasked and pending */
+ cmpw $0x0001, XEN_vcpu_info_pending(%eax)
+
+ /* If there's something pending, mask events again so we
+ can jump back into xen_hypervisor_callback */
+ sete XEN_vcpu_info_mask(%eax)
+
+ popl %eax
+
+ /* From this point on the registers are restored and the stack
+ updated, so we don't need to worry about it if we're preempted */
+iret_restore_end:
+
+ /* Jump to hypervisor_callback after fixing up the stack.
+ Events are masked, so jumping out of the critical
+ region is OK. */
+ je xen_hypervisor_callback
+
+ iret
+xen_iret_end_crit:
+
+hyper_iret:
+ /* put this out of line since its very rarely used */
+ jmp hypercall_page + __HYPERVISOR_iret * 32
+
+ .globl xen_iret_start_crit, xen_iret_end_crit
+
+/*
+ This is called by xen_hypervisor_callback in entry.S when it sees
+ that the EIP at the time of interrupt was between xen_iret_start_crit
+ and xen_iret_end_crit. We're passed the EIP in %eax so we can do
+ a more refined determination of what to do.
+
+ The stack format at this point is:
+ ----------------
+ ss : (ss/esp may be present if we came from usermode)
+ esp :
+ eflags } outer exception info
+ cs }
+ eip }
+ ---------------- <- edi (copy dest)
+ eax : outer eax if it hasn't been restored
+ ----------------
+ eflags } nested exception info
+ cs } (no ss/esp because we're nested
+ eip } from the same ring)
+ orig_eax }<- esi (copy src)
+ - - - - - - - -
+ fs }
+ es }
+ ds } SAVE_ALL state
+ eax }
+ : :
+ ebx }
+ ----------------
+ return addr <- esp
+ ----------------
+
+ In order to deliver the nested exception properly, we need to shift
+ everything from the return addr up to the error code so it
+ sits just under the outer exception info. This means that when we
+ handle the exception, we do it in the context of the outer exception
+ rather than starting a new one.
+
+ The only caveat is that if the outer eax hasn't been
+ restored yet (ie, it's still on stack), we need to insert
+ its value into the SAVE_ALL state before going on, since
+ it's usermode state which we eventually need to restore.
+ */
+ENTRY(xen_iret_crit_fixup)
+ /* offsets +4 for return address */
+
+ /*
+ Paranoia: Make sure we're really coming from userspace.
+ One could imagine a case where userspace jumps into the
+ critical range address, but just before the CPU delivers a GP,
+ it decides to deliver an interrupt instead. Unlikely?
+ Definitely. Easy to avoid? Yes. The Intel documents
+ explicitly say that the reported EIP for a bad jump is the
+ jump instruction itself, not the destination, but some virtual
+ environments get this wrong.
+ */
+ movl PT_CS+4(%esp), %ecx
+ andl $SEGMENT_RPL_MASK, %ecx
+ cmpl $USER_RPL, %ecx
+ je 2f
+
+ lea PT_ORIG_EAX+4(%esp), %esi
+ lea PT_EFLAGS+4(%esp), %edi
+
+ /* If eip is before iret_restore_end then stack
+ hasn't been restored yet. */
+ cmp $iret_restore_end, %eax
+ jae 1f
+
+ movl 0+4(%edi),%eax /* copy EAX */
+ movl %eax, PT_EAX+4(%esp)
+
+ lea ESP_OFFSET(%edi),%edi /* move dest up over saved regs */
+
+ /* set up the copy */
+1: std
+ mov $(PT_EIP+4) / 4, %ecx /* copy ret+saved regs up to orig_eax */
+ rep movsl
+ cld
+
+ lea 4(%edi),%esp /* point esp to new frame */
+2: ret
+
+
+/*
+ Force an event check by making a hypercall,
+ but preserve regs before making the call.
+ */
+check_events:
+ push %eax
+ push %ecx
+ push %edx
+ call force_evtchn_callback
+ pop %edx
+ pop %ecx
+ pop %eax
+ ret
diff --git a/arch/i386/xen/xen-head.S b/arch/i386/xen/xen-head.S
new file mode 100644
index 00000000000..2998d55a001
--- /dev/null
+++ b/arch/i386/xen/xen-head.S
@@ -0,0 +1,36 @@
+/* Xen-specific pieces of head.S, intended to be included in the right
+ place in head.S */
+
+#ifdef CONFIG_XEN
+
+#include <linux/elfnote.h>
+#include <asm/boot.h>
+#include <xen/interface/elfnote.h>
+
+ENTRY(startup_xen)
+ movl %esi,xen_start_info
+ cld
+ movl $(init_thread_union+THREAD_SIZE),%esp
+ jmp xen_start_kernel
+
+.pushsection ".bss.page_aligned"
+ .align PAGE_SIZE_asm
+ENTRY(hypercall_page)
+ .skip 0x1000
+.popsection
+
+ ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux")
+ ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz "2.6")
+ ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz "xen-3.0")
+ ELFNOTE(Xen, XEN_ELFNOTE_VIRT_BASE, .long __PAGE_OFFSET)
+ ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, .long startup_xen)
+ ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .long hypercall_page)
+ ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz "!writable_page_tables|pae_pgdir_above_4gb")
+#ifdef CONFIG_X86_PAE
+ ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz "yes")
+#else
+ ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz "no")
+#endif
+ ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz "generic")
+
+#endif /*CONFIG_XEN */
diff --git a/arch/i386/xen/xen-ops.h b/arch/i386/xen/xen-ops.h
new file mode 100644
index 00000000000..b9aaea45f07
--- /dev/null
+++ b/arch/i386/xen/xen-ops.h
@@ -0,0 +1,71 @@
+#ifndef XEN_OPS_H
+#define XEN_OPS_H
+
+#include <linux/init.h>
+
+/* These are code, but not functions. Defined in entry.S */
+extern const char xen_hypervisor_callback[];
+extern const char xen_failsafe_callback[];
+
+void xen_copy_trap_info(struct trap_info *traps);
+
+DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu);
+DECLARE_PER_CPU(unsigned long, xen_cr3);
+
+extern struct start_info *xen_start_info;
+extern struct shared_info *HYPERVISOR_shared_info;
+
+char * __init xen_memory_setup(void);
+void __init xen_arch_setup(void);
+void __init xen_init_IRQ(void);
+
+void xen_setup_timer(int cpu);
+void xen_setup_cpu_clockevents(void);
+unsigned long xen_cpu_khz(void);
+void __init xen_time_init(void);
+unsigned long xen_get_wallclock(void);
+int xen_set_wallclock(unsigned long time);
+unsigned long long xen_sched_clock(void);
+
+void xen_mark_init_mm_pinned(void);
+
+DECLARE_PER_CPU(enum paravirt_lazy_mode, xen_lazy_mode);
+
+static inline unsigned xen_get_lazy_mode(void)
+{
+ return x86_read_percpu(xen_lazy_mode);
+}
+
+void __init xen_fill_possible_map(void);
+
+void __init xen_setup_vcpu_info_placement(void);
+void xen_smp_prepare_boot_cpu(void);
+void xen_smp_prepare_cpus(unsigned int max_cpus);
+int xen_cpu_up(unsigned int cpu);
+void xen_smp_cpus_done(unsigned int max_cpus);
+
+void xen_smp_send_stop(void);
+void xen_smp_send_reschedule(int cpu);
+int xen_smp_call_function (void (*func) (void *info), void *info, int nonatomic,
+ int wait);
+int xen_smp_call_function_single(int cpu, void (*func) (void *info), void *info,
+ int nonatomic, int wait);
+
+int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *),
+ void *info, int wait);
+
+
+/* Declare an asm function, along with symbols needed to make it
+ inlineable */
+#define DECL_ASM(ret, name, ...) \
+ ret name(__VA_ARGS__); \
+ extern char name##_end[]; \
+ extern char name##_reloc[] \
+
+DECL_ASM(void, xen_irq_enable_direct, void);
+DECL_ASM(void, xen_irq_disable_direct, void);
+DECL_ASM(unsigned long, xen_save_fl_direct, void);
+DECL_ASM(void, xen_restore_fl_direct, unsigned long);
+
+void xen_iret_direct(void);
+#endif /* XEN_OPS_H */
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index faf5ef3e90d..94b4a028232 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -156,11 +156,14 @@ static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
#endif /* CONFIG_PPC_OF */
/* Add sysfs properties */
-void pcibios_add_platform_entries(struct pci_dev *pdev)
+int pcibios_add_platform_entries(struct pci_dev *pdev)
{
#ifdef CONFIG_PPC_OF
- device_create_file(&pdev->dev, &dev_attr_devspec);
+ return device_create_file(&pdev->dev, &dev_attr_devspec);
+#else
+ return 0;
#endif /* CONFIG_PPC_OF */
+
}
char __init *pcibios_setup(char *str)
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index bc43bba05cf..6018178708a 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -350,11 +350,13 @@ void __init setup_system(void)
{
DBG(" -> setup_system()\n");
- /* Apply CPUs-specific fixups to kernel text (nop out sections
- * not relevant to this CPU)
+ /* Apply the CPUs-specific and firmware specific fixups to kernel
+ * text (nop out sections not relevant to this CPU or this firmware)
*/
do_feature_fixups(cur_cpu_spec->cpu_features,
&__start___ftr_fixup, &__stop___ftr_fixup);
+ do_feature_fixups(powerpc_firmware_features,
+ &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
/*
* Unflatten the device-tree passed by prom_init or kexec
@@ -392,12 +394,6 @@ void __init setup_system(void)
if (ppc_md.init_early)
ppc_md.init_early();
- /* Apply firmware specific fixups to kernel text (nop out
- * sections not relevant to this firmware)
- */
- do_feature_fixups(powerpc_firmware_features,
- &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
-
/*
* We can discover serial ports now since the above did setup the
* hash table management for us, thus ioremap works. We do that early
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index b42cbf1e2d7..bd85b5fd08c 100644
--- a/arch/powerpc/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -773,6 +773,13 @@ asmlinkage int compat_sys_truncate64(const char __user * path, u32 reg4,
return sys_truncate(path, (high << 32) | low);
}
+asmlinkage long compat_sys_fallocate(int fd, int mode, u32 offhi, u32 offlo,
+ u32 lenhi, u32 lenlo)
+{
+ return sys_fallocate(fd, mode, ((loff_t)offhi << 32) | offlo,
+ ((loff_t)lenhi << 32) | lenlo);
+}
+
asmlinkage int compat_sys_ftruncate64(unsigned int fd, u32 reg4, unsigned long high,
unsigned long low)
{
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 73df7115325..603d83ad65c 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -21,6 +21,9 @@ config GENERIC_ISA_DMA
bool
default y
+config ARCH_NO_VIRT_TO_BUS
+ def_bool y
+
source "init/Kconfig"
menu "General machine setup"
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index b84b6af1241..df6ee71894d 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -62,6 +62,9 @@ config AUDIT_ARCH
bool
default y
+config ARCH_NO_VIRT_TO_BUS
+ def_bool y
+
choice
prompt "Kernel page size"
default SPARC64_PAGE_SIZE_8KB
diff --git a/arch/sparc64/kernel/ds.c b/arch/sparc64/kernel/ds.c
index ba01533f4e0..fa1f04d756a 100644
--- a/arch/sparc64/kernel/ds.c
+++ b/arch/sparc64/kernel/ds.c
@@ -1013,6 +1013,19 @@ static void ds_up(struct ds_info *dp)
dp->hs_state = DS_HS_START;
}
+static void ds_reset(struct ds_info *dp)
+{
+ int i;
+
+ dp->hs_state = 0;
+
+ for (i = 0; i < ARRAY_SIZE(ds_states); i++) {
+ struct ds_cap_state *cp = &ds_states[i];
+
+ cp->state = CAP_STATE_UNKNOWN;
+ }
+}
+
static void ds_event(void *arg, int event)
{
struct ds_info *dp = arg;
@@ -1028,6 +1041,12 @@ static void ds_event(void *arg, int event)
return;
}
+ if (event == LDC_EVENT_RESET) {
+ ds_reset(dp);
+ spin_unlock_irqrestore(&ds_lock, flags);
+ return;
+ }
+
if (event != LDC_EVENT_DATA_READY) {
printk(KERN_WARNING PFX "Unexpected LDC event %d\n", event);
spin_unlock_irqrestore(&ds_lock, flags);
diff --git a/arch/sparc64/kernel/mdesc.c b/arch/sparc64/kernel/mdesc.c
index de5310ffdb4..302ba5e5a0b 100644
--- a/arch/sparc64/kernel/mdesc.c
+++ b/arch/sparc64/kernel/mdesc.c
@@ -137,7 +137,7 @@ static struct mdesc_handle *mdesc_kmalloc(unsigned int mdesc_size)
sizeof(struct mdesc_hdr) +
mdesc_size);
- base = kmalloc(handle_size + 15, GFP_KERNEL);
+ base = kmalloc(handle_size + 15, GFP_KERNEL | __GFP_NOFAIL);
if (base) {
struct mdesc_handle *hp;
unsigned long addr;
@@ -214,18 +214,83 @@ void mdesc_release(struct mdesc_handle *hp)
}
EXPORT_SYMBOL(mdesc_release);
+static DEFINE_MUTEX(mdesc_mutex);
+static struct mdesc_notifier_client *client_list;
+
+void mdesc_register_notifier(struct mdesc_notifier_client *client)
+{
+ u64 node;
+
+ mutex_lock(&mdesc_mutex);
+ client->next = client_list;
+ client_list = client;
+
+ mdesc_for_each_node_by_name(cur_mdesc, node, client->node_name)
+ client->add(cur_mdesc, node);
+
+ mutex_unlock(&mdesc_mutex);
+}
+
+/* Run 'func' on nodes which are in A but not in B. */
+static void invoke_on_missing(const char *name,
+ struct mdesc_handle *a,
+ struct mdesc_handle *b,
+ void (*func)(struct mdesc_handle *, u64))
+{
+ u64 node;
+
+ mdesc_for_each_node_by_name(a, node, name) {
+ const u64 *id = mdesc_get_property(a, node, "id", NULL);
+ int found = 0;
+ u64 fnode;
+
+ mdesc_for_each_node_by_name(b, fnode, name) {
+ const u64 *fid = mdesc_get_property(b, fnode,
+ "id", NULL);
+
+ if (*id == *fid) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found)
+ func(a, node);
+ }
+}
+
+static void notify_one(struct mdesc_notifier_client *p,
+ struct mdesc_handle *old_hp,
+ struct mdesc_handle *new_hp)
+{
+ invoke_on_missing(p->node_name, old_hp, new_hp, p->remove);
+ invoke_on_missing(p->node_name, new_hp, old_hp, p->add);
+}
+
+static void mdesc_notify_clients(struct mdesc_handle *old_hp,
+ struct mdesc_handle *new_hp)
+{
+ struct mdesc_notifier_client *p = client_list;
+
+ while (p) {
+ notify_one(p, old_hp, new_hp);
+ p = p->next;
+ }
+}
+
void mdesc_update(void)
{
unsigned long len, real_len, status;
struct mdesc_handle *hp, *orig_hp;
unsigned long flags;
+ mutex_lock(&mdesc_mutex);
+
(void) sun4v_mach_desc(0UL, 0UL, &len);
hp = mdesc_alloc(len, &kmalloc_mdesc_memops);
if (!hp) {
printk(KERN_ERR "MD: mdesc alloc fails\n");
- return;
+ goto out;
}
status = sun4v_mach_desc(__pa(&hp->mdesc), len, &real_len);
@@ -234,18 +299,25 @@ void mdesc_update(void)
status);
atomic_dec(&hp->refcnt);
mdesc_free(hp);
- return;
+ goto out;
}
spin_lock_irqsave(&mdesc_lock, flags);
orig_hp = cur_mdesc;
cur_mdesc = hp;
+ spin_unlock_irqrestore(&mdesc_lock, flags);
+ mdesc_notify_clients(orig_hp, hp);
+
+ spin_lock_irqsave(&mdesc_lock, flags);
if (atomic_dec_and_test(&orig_hp->refcnt))
mdesc_free(orig_hp);
else
list_add(&orig_hp->list, &mdesc_zombie_list);
spin_unlock_irqrestore(&mdesc_lock, flags);
+
+out:
+ mutex_unlock(&mdesc_mutex);
}
static struct mdesc_elem *node_block(struct mdesc_hdr *mdesc)
diff --git a/arch/sparc64/kernel/vio.c b/arch/sparc64/kernel/vio.c
index 49569b44ea1..8d3cc4fdb55 100644
--- a/arch/sparc64/kernel/vio.c
+++ b/arch/sparc64/kernel/vio.c
@@ -201,10 +201,11 @@ static void vio_fill_channel_info(struct mdesc_handle *hp, u64 mp,
static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
struct device *parent)
{
- const char *type, *compat;
+ const char *type, *compat, *bus_id_name;
struct device_node *dp;
struct vio_dev *vdev;
int err, tlen, clen;
+ const u64 *id;
type = mdesc_get_property(hp, mp, "device-type", &tlen);
if (!type) {
@@ -220,6 +221,16 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
return NULL;
}
+ bus_id_name = type;
+ if (!strcmp(type, "domain-services-port"))
+ bus_id_name = "ds";
+
+ if (strlen(bus_id_name) >= KOBJ_NAME_LEN - 4) {
+ printk(KERN_ERR "VIO: bus_id_name [%s] is too long.\n",
+ bus_id_name);
+ return NULL;
+ }
+
compat = mdesc_get_property(hp, mp, "device-type", &clen);
if (!compat) {
clen = 0;
@@ -249,7 +260,14 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
vio_fill_channel_info(hp, mp, vdev);
- snprintf(vdev->dev.bus_id, BUS_ID_SIZE, "%lx", mp);
+ id = mdesc_get_property(hp, mp, "id", NULL);
+ if (!id)
+ snprintf(vdev->dev.bus_id, BUS_ID_SIZE, "%s",
+ bus_id_name);
+ else
+ snprintf(vdev->dev.bus_id, BUS_ID_SIZE, "%s-%lu",
+ bus_id_name, *id);
+
vdev->dev.parent = parent;
vdev->dev.bus = &vio_bus_type;
vdev->dev.release = vio_dev_release;
@@ -269,6 +287,8 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
}
vdev->dp = dp;
+ printk(KERN_ERR "VIO: Adding device %s\n", vdev->dev.bus_id);
+
err = device_register(&vdev->dev);
if (err) {
printk(KERN_ERR "VIO: Could not register device %s, err=%d\n",
@@ -283,46 +303,46 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
return vdev;
}
-static void walk_tree(struct mdesc_handle *hp, u64 n, struct vio_dev *parent)
+static void vio_add(struct mdesc_handle *hp, u64 node)
{
- u64 a;
-
- mdesc_for_each_arc(a, hp, n, MDESC_ARC_TYPE_FWD) {
- struct vio_dev *vdev;
- u64 target;
-
- target = mdesc_arc_target(hp, a);
- vdev = vio_create_one(hp, target, &parent->dev);
- if (vdev)
- walk_tree(hp, target, vdev);
- }
+ (void) vio_create_one(hp, node, &root_vdev->dev);
}
-static void create_devices(struct mdesc_handle *hp, u64 root)
+static int vio_md_node_match(struct device *dev, void *arg)
{
- u64 mp;
+ struct vio_dev *vdev = to_vio_dev(dev);
- root_vdev = vio_create_one(hp, root, NULL);
- if (!root_vdev) {
- printk(KERN_ERR "VIO: Coult not create root device.\n");
- return;
- }
+ if (vdev->mp == (u64) arg)
+ return 1;
- walk_tree(hp, root, root_vdev);
+ return 0;
+}
+
+static void vio_remove(struct mdesc_handle *hp, u64 node)
+{
+ struct device *dev;
- /* Domain services is odd as it doesn't sit underneath the
- * channel-devices node, so we plug it in manually.
- */
- mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "domain-services");
- if (mp != MDESC_NODE_NULL) {
- struct vio_dev *parent = vio_create_one(hp, mp,
- &root_vdev->dev);
+ dev = device_find_child(&root_vdev->dev, (void *) node,
+ vio_md_node_match);
+ if (dev) {
+ printk(KERN_INFO "VIO: Removing device %s\n", dev->bus_id);
- if (parent)
- walk_tree(hp, mp, parent);
+ device_unregister(dev);
}
}
+static struct mdesc_notifier_client vio_device_notifier = {
+ .add = vio_add,
+ .remove = vio_remove,
+ .node_name = "virtual-device-port",
+};
+
+static struct mdesc_notifier_client vio_ds_notifier = {
+ .add = vio_add,
+ .remove = vio_remove,
+ .node_name = "domain-services-port",
+};
+
const char *channel_devices_node = "channel-devices";
const char *channel_devices_compat = "SUNW,sun4v-channel-devices";
const char *cfg_handle_prop = "cfg-handle";
@@ -381,11 +401,19 @@ static int __init vio_init(void)
cdev_cfg_handle = *cfg_handle;
- create_devices(hp, root);
+ root_vdev = vio_create_one(hp, root, NULL);
+ err = -ENODEV;
+ if (!root_vdev) {
+ printk(KERN_ERR "VIO: Coult not create root device.\n");
+ goto out_release;
+ }
+
+ mdesc_register_notifier(&vio_device_notifier);
+ mdesc_register_notifier(&vio_ds_notifier);
mdesc_release(hp);
- return 0;
+ return err;
out_release:
mdesc_release(hp);
diff --git a/arch/sparc64/kernel/viohs.c b/arch/sparc64/kernel/viohs.c
index 15613add45d..09126fc338b 100644
--- a/arch/sparc64/kernel/viohs.c
+++ b/arch/sparc64/kernel/viohs.c
@@ -78,6 +78,24 @@ static int start_handshake(struct vio_driver_state *vio)
return 0;
}
+static void flush_rx_dring(struct vio_driver_state *vio)
+{
+ struct vio_dring_state *dr;
+ u64 ident;
+
+ BUG_ON(!(vio->dr_state & VIO_DR_STATE_RXREG));
+
+ dr = &vio->drings[VIO_DRIVER_RX_RING];
+ ident = dr->ident;
+
+ BUG_ON(!vio->desc_buf);
+ kfree(vio->desc_buf);
+ vio->desc_buf = NULL;
+
+ memset(dr, 0, sizeof(*dr));
+ dr->ident = ident;
+}
+
void vio_link_state_change(struct vio_driver_state *vio, int event)
{
if (event == LDC_EVENT_UP) {
@@ -98,6 +116,16 @@ void vio_link_state_change(struct vio_driver_state *vio, int event)
break;
}
start_handshake(vio);
+ } else if (event == LDC_EVENT_RESET) {
+ vio->hs_state = VIO_HS_INVALID;
+
+ if (vio->dr_state & VIO_DR_STATE_RXREG)
+ flush_rx_dring(vio);
+
+ vio->dr_state = 0x00;
+ memset(&vio->ver, 0, sizeof(vio->ver));
+
+ ldc_disconnect(vio->lp);
}
}
EXPORT_SYMBOL(vio_link_state_change);
@@ -396,6 +424,8 @@ static int process_dreg_info(struct vio_driver_state *vio,
if (vio->dr_state & VIO_DR_STATE_RXREG)
goto send_nack;
+ BUG_ON(vio->desc_buf);
+
vio->desc_buf = kzalloc(pkt->descr_size, GFP_ATOMIC);
if (!vio->desc_buf)
goto send_nack;
diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S
index 782dea81943..3f66e970d86 100644
--- a/arch/x86_64/ia32/ia32entry.S
+++ b/arch/x86_64/ia32/ia32entry.S
@@ -719,4 +719,5 @@ ia32_sys_call_table:
.quad compat_sys_signalfd
.quad compat_sys_timerfd
.quad sys_eventfd
+ .quad sys32_fallocate
ia32_syscall_end:
diff --git a/arch/x86_64/ia32/sys_ia32.c b/arch/x86_64/ia32/sys_ia32.c
index 99a78a3cce7..bee96d61443 100644
--- a/arch/x86_64/ia32/sys_ia32.c
+++ b/arch/x86_64/ia32/sys_ia32.c
@@ -879,3 +879,11 @@ asmlinkage long sys32_fadvise64(int fd, unsigned offset_lo, unsigned offset_hi,
return sys_fadvise64_64(fd, ((u64)offset_hi << 32) | offset_lo,
len, advice);
}
+
+asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_lo,
+ unsigned offset_hi, unsigned len_lo,
+ unsigned len_hi)
+{
+ return sys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo,
+ ((u64)len_hi << 32) | len_lo);
+}
diff --git a/arch/x86_64/kernel/early_printk.c b/arch/x86_64/kernel/early_printk.c
index 296d2b0c5d8..fd9aff3f389 100644
--- a/arch/x86_64/kernel/early_printk.c
+++ b/arch/x86_64/kernel/early_printk.c
@@ -6,6 +6,7 @@
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/fcntl.h>
+#include <xen/hvc-console.h>
/* Simple VGA output */
@@ -242,6 +243,10 @@ static int __init setup_early_printk(char *buf)
simnow_init(buf + 6);
early_console = &simnow_console;
keep_early = 1;
+#ifdef CONFIG_HVC_XEN
+ } else if (!strncmp(buf, "xen", 3)) {
+ early_console = &xenboot_console;
+#endif
}
if (keep_early)
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index aa1d1599179..f3fb8174559 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -174,7 +174,7 @@ static void do_mce_trigger(void)
if (events != atomic_read(&mce_logged) && trigger[0]) {
/* Small race window, but should be harmless. */
atomic_set(&mce_logged, events);
- call_usermodehelper(trigger, trigger_argv, NULL, -1);
+ call_usermodehelper(trigger, trigger_argv, NULL, UMH_NO_WAIT);
}
}
diff --git a/drivers/Makefile b/drivers/Makefile
index 503d8256944..6d9d7fab77f 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -15,6 +15,8 @@ obj-$(CONFIG_ACPI) += acpi/
obj-$(CONFIG_PNP) += pnp/
obj-$(CONFIG_ARM_AMBA) += amba/
+obj-$(CONFIG_XEN) += xen/
+
# char/ comes before serial/ etc so that the VT console is the boot-time
# default.
obj-y += char/
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 88a6fc7fd27..58f1338981b 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -40,6 +40,7 @@
#include <linux/jiffies.h>
#include <linux/kmod.h>
#include <linux/seq_file.h>
+#include <linux/reboot.h>
#include <asm/uaccess.h>
#include <acpi/acpi_bus.h>
@@ -59,7 +60,6 @@
#define ACPI_THERMAL_NOTIFY_CRITICAL 0xF0
#define ACPI_THERMAL_NOTIFY_HOT 0xF1
#define ACPI_THERMAL_MODE_ACTIVE 0x00
-#define ACPI_THERMAL_PATH_POWEROFF "/sbin/poweroff"
#define ACPI_THERMAL_MAX_ACTIVE 10
#define ACPI_THERMAL_MAX_LIMIT_STR_LEN 65
@@ -419,26 +419,6 @@ static int acpi_thermal_get_devices(struct acpi_thermal *tz)
return 0;
}
-static int acpi_thermal_call_usermode(char *path)
-{
- char *argv[2] = { NULL, NULL };
- char *envp[3] = { NULL, NULL, NULL };
-
-
- if (!path)
- return -EINVAL;
-
- argv[0] = path;
-
- /* minimal command environment */
- envp[0] = "HOME=/";
- envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
-
- call_usermodehelper(argv[0], argv, envp, 0);
-
- return 0;
-}
-
static int acpi_thermal_critical(struct acpi_thermal *tz)
{
if (!tz || !tz->trips.critical.flags.valid)
@@ -456,7 +436,7 @@ static int acpi_thermal_critical(struct acpi_thermal *tz)
acpi_bus_generate_event(tz->device, ACPI_THERMAL_NOTIFY_CRITICAL,
tz->trips.critical.flags.enabled);
- acpi_thermal_call_usermode(ACPI_THERMAL_PATH_POWEROFF);
+ orderly_poweroff(true);
return 0;
}
diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig
index bb4ae628149..bed9f58c2d5 100644
--- a/drivers/atm/Kconfig
+++ b/drivers/atm/Kconfig
@@ -172,7 +172,7 @@ config ATM_ZATM_DEBUG
config ATM_NICSTAR
tristate "IDT 77201 (NICStAR) (ForeRunnerLE)"
- depends on PCI && !64BIT
+ depends on PCI && !64BIT && VIRT_TO_BUS
help
The NICStAR chipset family is used in a large number of ATM NICs for
25 and for 155 Mbps, including IDT cards and the Fore ForeRunnerLE
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 77637e780d4..41b2204ebc6 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -1738,7 +1738,8 @@ static int __devinit eni_do_init(struct atm_dev *dev)
printk(KERN_ERR KERN_ERR DEV_LABEL "(itf %d): bad "
"magic - expected 0x%x, got 0x%x\n",dev->number,
ENI155_MAGIC,(unsigned) readl(&eprom->magic));
- return -EINVAL;
+ error = -EINVAL;
+ goto unmap;
}
}
eni_dev->phy = base+PHY_BASE;
@@ -1765,17 +1766,27 @@ static int __devinit eni_do_init(struct atm_dev *dev)
printk(")\n");
printk(KERN_ERR DEV_LABEL "(itf %d): ERROR - wrong id 0x%x\n",
dev->number,(unsigned) eni_in(MID_RES_ID_MCON));
- return -EINVAL;
+ error = -EINVAL;
+ goto unmap;
}
error = eni_dev->asic ? get_esi_asic(dev) : get_esi_fpga(dev,base);
- if (error) return error;
+ if (error)
+ goto unmap;
for (i = 0; i < ESI_LEN; i++)
printk("%s%02X",i ? "-" : "",dev->esi[i]);
printk(")\n");
printk(KERN_NOTICE DEV_LABEL "(itf %d): %s,%s\n",dev->number,
eni_in(MID_RES_ID_MCON) & 0x200 ? "ASIC" : "FPGA",
media_name[eni_in(MID_RES_ID_MCON) & DAUGTHER_ID]);
- return suni_init(dev);
+
+ error = suni_init(dev);
+ if (error)
+ goto unmap;
+out:
+ return error;
+unmap:
+ iounmap(base);
+ goto out;
}
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index 38b688f9f6a..737cea49f87 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -1710,7 +1710,7 @@ static int __devinit fs_init (struct fs_dev *dev)
/* This bit is documented as "RESERVED" */
if (isr & ISR_INIT_ERR) {
printk (KERN_ERR "Error initializing the FS... \n");
- return 1;
+ goto unmap;
}
if (isr & ISR_INIT) {
fs_dprintk (FS_DEBUG_INIT, "Ha! Initialized OK!\n");
@@ -1723,7 +1723,7 @@ static int __devinit fs_init (struct fs_dev *dev)
if (!to) {
printk (KERN_ERR "timeout initializing the FS... \n");
- return 1;
+ goto unmap;
}
/* XXX fix for fs155 */
@@ -1803,7 +1803,7 @@ static int __devinit fs_init (struct fs_dev *dev)
if (!dev->atm_vccs) {
printk (KERN_WARNING "Couldn't allocate memory for VCC buffers. Woops!\n");
/* XXX Clean up..... */
- return 1;
+ goto unmap;
}
dev->tx_inuse = kzalloc (dev->nchannels / 8 /* bits/byte */ , GFP_KERNEL);
@@ -1813,7 +1813,7 @@ static int __devinit fs_init (struct fs_dev *dev)
if (!dev->tx_inuse) {
printk (KERN_WARNING "Couldn't allocate memory for tx_inuse bits!\n");
/* XXX Clean up..... */
- return 1;
+ goto unmap;
}
/* -- RAS1 : FS155 and 50 differ. Default (0) should be OK for both */
/* -- RAS2 : FS50 only: Default is OK. */
@@ -1840,7 +1840,7 @@ static int __devinit fs_init (struct fs_dev *dev)
if (request_irq (dev->irq, fs_irq, IRQF_SHARED, "firestream", dev)) {
printk (KERN_WARNING "couldn't get irq %d for firestream.\n", pci_dev->irq);
/* XXX undo all previous stuff... */
- return 1;
+ goto unmap;
}
fs_dprintk (FS_DEBUG_INIT, "Grabbed irq %d for dev at %p.\n", dev->irq, dev);
@@ -1890,6 +1890,9 @@ static int __devinit fs_init (struct fs_dev *dev)
func_exit ();
return 0;
+unmap:
+ iounmap(dev->base);
+ return 1;
}
static int __devinit firestream_init_one (struct pci_dev *pci_dev,
@@ -2012,6 +2015,7 @@ static void __devexit firestream_remove_one (struct pci_dev *pdev)
for (i=0;i < FS_NR_RX_QUEUES;i++)
free_queue (dev, &dev->rx_rq[i]);
+ iounmap(dev->base);
fs_dprintk (FS_DEBUG_ALLOC, "Free fs-dev: %p\n", dev);
nxtdev = dev->next;
kfree (dev);
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 8f995ce8d73..f8b1700f4c1 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -65,7 +65,7 @@ static char const rcsid[] =
static unsigned int vpibits = 1;
-#define CONFIG_ATM_IDT77252_SEND_IDLE 1
+#define ATM_IDT77252_SEND_IDLE 1
/*
@@ -3404,7 +3404,7 @@ init_card(struct atm_dev *dev)
conf = SAR_CFG_TX_FIFO_SIZE_9 | /* Use maximum fifo size */
SAR_CFG_RXSTQ_SIZE_8k | /* Receive Status Queue is 8k */
SAR_CFG_IDLE_CLP | /* Set CLP on idle cells */
-#ifndef CONFIG_ATM_IDT77252_SEND_IDLE
+#ifndef ATM_IDT77252_SEND_IDLE
SAR_CFG_NO_IDLE | /* Do not send idle cells */
#endif
0;
@@ -3541,7 +3541,7 @@ init_card(struct atm_dev *dev)
printk("%s: Linkrate on ATM line : %u bit/s, %u cell/s.\n",
card->name, linkrate, card->link_pcr);
-#ifdef CONFIG_ATM_IDT77252_SEND_IDLE
+#ifdef ATM_IDT77252_SEND_IDLE
card->utopia_pcr = card->link_pcr;
#else
card->utopia_pcr = (160000000 / 8 / 54);
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index 0e2c1ae650e..55fd1b4543f 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -552,8 +552,8 @@ static inline void sram_write(const struct lanai_dev *lanai,
writel(val, sram_addr(lanai, offset));
}
-static int __init sram_test_word(
- const struct lanai_dev *lanai, int offset, u32 pattern)
+static int __devinit sram_test_word(const struct lanai_dev *lanai,
+ int offset, u32 pattern)
{
u32 readback;
sram_write(lanai, pattern, offset);
diff --git a/drivers/atm/nicstarmac.c b/drivers/atm/nicstarmac.c
index 480947f4e01..842e26c4555 100644
--- a/drivers/atm/nicstarmac.c
+++ b/drivers/atm/nicstarmac.c
@@ -134,7 +134,7 @@ nicstar_read_eprom_status( virt_addr_t base )
/* Send read instruction */
val = NICSTAR_REG_READ( base, NICSTAR_REG_GENERAL_PURPOSE ) & 0xFFFFFFF0;
- for (i=0; i<sizeof rdsrtab/sizeof rdsrtab[0]; i++)
+ for (i=0; i<ARRAY_SIZE(rdsrtab); i++)
{
NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE,
(val | rdsrtab[i]) );
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 8f65b88cf71..a4a31199240 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -427,4 +427,13 @@ config XILINX_SYSACE
help
Include support for the Xilinx SystemACE CompactFlash interface
+config XEN_BLKDEV_FRONTEND
+ tristate "Xen virtual block device support"
+ depends on XEN
+ default y
+ help
+ This driver implements the front-end of the Xen virtual
+ block device driver. It communicates with a back-end driver
+ in another domain which drives the actual block device.
+
endif # BLK_DEV
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 9ee08ab4ffa..3e31532df0e 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -29,3 +29,4 @@ obj-$(CONFIG_VIODASD) += viodasd.o
obj-$(CONFIG_BLK_DEV_SX8) += sx8.o
obj-$(CONFIG_BLK_DEV_UB) += ub.o
+obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 0f5e3caf85d..2288b55d916 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -45,8 +45,6 @@ struct vdc_req_entry {
struct vdc_port {
struct vio_driver_state vio;
- struct vdc *vp;
-
struct gendisk *disk;
struct vdc_completion *cmp;
@@ -72,8 +70,6 @@ struct vdc_port {
struct vio_disk_geom geom;
struct vio_disk_vtoc label;
-
- struct list_head list;
};
static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
@@ -81,15 +77,6 @@ static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
return container_of(vio, struct vdc_port, vio);
}
-struct vdc {
- /* Protects prot_list. */
- spinlock_t lock;
-
- struct vio_dev *dev;
-
- struct list_head port_list;
-};
-
/* Ordered from largest major to lowest */
static struct vio_version vdc_versions[] = {
{ .major = 1, .minor = 0 },
@@ -747,21 +734,23 @@ static struct vio_driver_ops vdc_vio_ops = {
.handshake_complete = vdc_handshake_complete,
};
+static void print_version(void)
+{
+ static int version_printed;
+
+ if (version_printed++ == 0)
+ printk(KERN_INFO "%s", version);
+}
+
static int __devinit vdc_port_probe(struct vio_dev *vdev,
const struct vio_device_id *id)
{
struct mdesc_handle *hp;
struct vdc_port *port;
- unsigned long flags;
- struct vdc *vp;
const u64 *port_id;
int err;
- vp = dev_get_drvdata(vdev->dev.parent);
- if (!vp) {
- printk(KERN_ERR PFX "Cannot find port parent vdc.\n");
- return -ENODEV;
- }
+ print_version();
hp = mdesc_grab();
@@ -783,7 +772,6 @@ static int __devinit vdc_port_probe(struct vio_dev *vdev,
goto err_out_release_mdesc;
}
- port->vp = vp;
port->dev_no = *port_id;
if (port->dev_no >= 26)
@@ -818,12 +806,6 @@ static int __devinit vdc_port_probe(struct vio_dev *vdev,
if (err)
goto err_out_free_tx_ring;
- INIT_LIST_HEAD(&port->list);
-
- spin_lock_irqsave(&vp->lock, flags);
- list_add(&port->list, &vp->port_list);
- spin_unlock_irqrestore(&vp->lock, flags);
-
dev_set_drvdata(&vdev->dev, port);
mdesc_release(hp);
@@ -879,58 +861,6 @@ static struct vio_driver vdc_port_driver = {
}
};
-static int __devinit vdc_probe(struct vio_dev *vdev,
- const struct vio_device_id *id)
-{
- static int vdc_version_printed;
- struct vdc *vp;
-
- if (vdc_version_printed++ == 0)
- printk(KERN_INFO "%s", version);
-
- vp = kzalloc(sizeof(struct vdc), GFP_KERNEL);
- if (!vp)
- return -ENOMEM;
-
- spin_lock_init(&vp->lock);
- vp->dev = vdev;
- INIT_LIST_HEAD(&vp->port_list);
-
- dev_set_drvdata(&vdev->dev, vp);
-
- return 0;
-}
-
-static int vdc_remove(struct vio_dev *vdev)
-{
-
- struct vdc *vp = dev_get_drvdata(&vdev->dev);
-
- if (vp) {
- kfree(vp);
- dev_set_drvdata(&vdev->dev, NULL);
- }
- return 0;
-}
-
-static struct vio_device_id vdc_match[] = {
- {
- .type = "block",
- },
- {},
-};
-MODULE_DEVICE_TABLE(vio, vdc_match);
-
-static struct vio_driver vdc_driver = {
- .id_table = vdc_match,
- .probe = vdc_probe,
- .remove = vdc_remove,
- .driver = {
- .name = "vdc",
- .owner = THIS_MODULE,
- }
-};
-
static int __init vdc_init(void)
{
int err;
@@ -940,19 +870,13 @@ static int __init vdc_init(void)
goto out_err;
vdc_major = err;
- err = vio_register_driver(&vdc_driver);
- if (err)
- goto out_unregister_blkdev;
err = vio_register_driver(&vdc_port_driver);
if (err)
- goto out_unregister_vdc;
+ goto out_unregister_blkdev;
return 0;
-out_unregister_vdc:
- vio_unregister_driver(&vdc_driver);
-
out_unregister_blkdev:
unregister_blkdev(vdc_major, VDCBLK_NAME);
vdc_major = 0;
@@ -964,7 +888,6 @@ out_err:
static void __exit vdc_exit(void)
{
vio_unregister_driver(&vdc_port_driver);
- vio_unregister_driver(&vdc_driver);
unregister_blkdev(vdc_major, VDCBLK_NAME);
}
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
new file mode 100644
index 00000000000..6746c29181f
--- /dev/null
+++ b/drivers/block/xen-blkfront.c
@@ -0,0 +1,988 @@
+/*
+ * blkfront.c
+ *
+ * XenLinux virtual block device driver.
+ *
+ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
+ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
+ * Copyright (c) 2004, Christian Limpach
+ * Copyright (c) 2004, Andrew Warfield
+ * Copyright (c) 2005, Christopher Clark
+ * Copyright (c) 2005, XenSource Ltd
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/blkdev.h>
+#include <linux/module.h>
+
+#include <xen/xenbus.h>
+#include <xen/grant_table.h>
+#include <xen/events.h>
+#include <xen/page.h>
+
+#include <xen/interface/grant_table.h>
+#include <xen/interface/io/blkif.h>
+
+#include <asm/xen/hypervisor.h>
+
+enum blkif_state {
+ BLKIF_STATE_DISCONNECTED,
+ BLKIF_STATE_CONNECTED,
+ BLKIF_STATE_SUSPENDED,
+};
+
+struct blk_shadow {
+ struct blkif_request req;
+ unsigned long request;
+ unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+};
+
+static struct block_device_operations xlvbd_block_fops;
+
+#define BLK_RING_SIZE __RING_SIZE((struct blkif_sring *)0, PAGE_SIZE)
+
+/*
+ * We have one of these per vbd, whether ide, scsi or 'other'. They
+ * hang in private_data off the gendisk structure. We may end up
+ * putting all kinds of interesting stuff here :-)
+ */
+struct blkfront_info
+{
+ struct xenbus_device *xbdev;
+ dev_t dev;
+ struct gendisk *gd;
+ int vdevice;
+ blkif_vdev_t handle;
+ enum blkif_state connected;
+ int ring_ref;
+ struct blkif_front_ring ring;
+ unsigned int evtchn, irq;
+ struct request_queue *rq;
+ struct work_struct work;
+ struct gnttab_free_callback callback;
+ struct blk_shadow shadow[BLK_RING_SIZE];
+ unsigned long shadow_free;
+ int feature_barrier;
+
+ /**
+ * The number of people holding this device open. We won't allow a
+ * hot-unplug unless this is 0.
+ */
+ int users;
+};
+
+static DEFINE_SPINLOCK(blkif_io_lock);
+
+#define MAXIMUM_OUTSTANDING_BLOCK_REQS \
+ (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE)
+#define GRANT_INVALID_REF 0
+
+#define PARTS_PER_DISK 16
+
+#define BLKIF_MAJOR(dev) ((dev)>>8)
+#define BLKIF_MINOR(dev) ((dev) & 0xff)
+
+#define DEV_NAME "xvd" /* name in /dev */
+
+/* Information about our VBDs. */
+#define MAX_VBDS 64
+static LIST_HEAD(vbds_list);
+
+static int get_id_from_freelist(struct blkfront_info *info)
+{
+ unsigned long free = info->shadow_free;
+ BUG_ON(free > BLK_RING_SIZE);
+ info->shadow_free = info->shadow[free].req.id;
+ info->shadow[free].req.id = 0x0fffffee; /* debug */
+ return free;
+}
+
+static void add_id_to_freelist(struct blkfront_info *info,
+ unsigned long id)
+{
+ info->shadow[id].req.id = info->shadow_free;
+ info->shadow[id].request = 0;
+ info->shadow_free = id;
+}
+
+static void blkif_restart_queue_callback(void *arg)
+{
+ struct blkfront_info *info = (struct blkfront_info *)arg;
+ schedule_work(&info->work);
+}
+
+/*
+ * blkif_queue_request
+ *
+ * request block io
+ *
+ * id: for guest use only.
+ * operation: BLKIF_OP_{READ,WRITE,PROBE}
+ * buffer: buffer to read/write into. this should be a
+ * virtual address in the guest os.
+ */
+static int blkif_queue_request(struct request *req)
+{
+ struct blkfront_info *info = req->rq_disk->private_data;
+ unsigned long buffer_mfn;
+ struct blkif_request *ring_req;
+ struct bio *bio;
+ struct bio_vec *bvec;
+ int idx;
+ unsigned long id;
+ unsigned int fsect, lsect;
+ int ref;
+ grant_ref_t gref_head;
+
+ if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
+ return 1;
+
+ if (gnttab_alloc_grant_references(
+ BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) {
+ gnttab_request_free_callback(
+ &info->callback,
+ blkif_restart_queue_callback,
+ info,
+ BLKIF_MAX_SEGMENTS_PER_REQUEST);
+ return 1;
+ }
+
+ /* Fill out a communications ring structure. */
+ ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
+ id = get_id_from_freelist(info);
+ info->shadow[id].request = (unsigned long)req;
+
+ ring_req->id = id;
+ ring_req->sector_number = (blkif_sector_t)req->sector;
+ ring_req->handle = info->handle;
+
+ ring_req->operation = rq_data_dir(req) ?
+ BLKIF_OP_WRITE : BLKIF_OP_READ;
+ if (blk_barrier_rq(req))
+ ring_req->operation = BLKIF_OP_WRITE_BARRIER;
+
+ ring_req->nr_segments = 0;
+ rq_for_each_bio (bio, req) {
+ bio_for_each_segment (bvec, bio, idx) {
+ BUG_ON(ring_req->nr_segments
+ == BLKIF_MAX_SEGMENTS_PER_REQUEST);
+ buffer_mfn = pfn_to_mfn(page_to_pfn(bvec->bv_page));
+ fsect = bvec->bv_offset >> 9;
+ lsect = fsect + (bvec->bv_len >> 9) - 1;
+ /* install a grant reference. */
+ ref = gnttab_claim_grant_reference(&gref_head);
+ BUG_ON(ref == -ENOSPC);
+
+ gnttab_grant_foreign_access_ref(
+ ref,
+ info->xbdev->otherend_id,
+ buffer_mfn,
+ rq_data_dir(req) );
+
+ info->shadow[id].frame[ring_req->nr_segments] =
+ mfn_to_pfn(buffer_mfn);
+
+ ring_req->seg[ring_req->nr_segments] =
+ (struct blkif_request_segment) {
+ .gref = ref,
+ .first_sect = fsect,
+ .last_sect = lsect };
+
+ ring_req->nr_segments++;
+ }
+ }
+
+ info->ring.req_prod_pvt++;
+
+ /* Keep a private copy so we can reissue requests when recovering. */
+ info->shadow[id].req = *ring_req;
+
+ gnttab_free_grant_references(gref_head);
+
+ return 0;
+}
+
+
+static inline void flush_requests(struct blkfront_info *info)
+{
+ int notify;
+
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify);
+
+ if (notify)
+ notify_remote_via_irq(info->irq);
+}
+
+/*
+ * do_blkif_request
+ * read a block; request is in a request queue
+ */
+static void do_blkif_request(request_queue_t *rq)
+{
+ struct blkfront_info *info = NULL;
+ struct request *req;
+ int queued;
+
+ pr_debug("Entered do_blkif_request\n");
+
+ queued = 0;
+
+ while ((req = elv_next_request(rq)) != NULL) {
+ info = req->rq_disk->private_data;
+ if (!blk_fs_request(req)) {
+ end_request(req, 0);
+ continue;
+ }
+
+ if (RING_FULL(&info->ring))
+ goto wait;
+
+ pr_debug("do_blk_req %p: cmd %p, sec %lx, "
+ "(%u/%li) buffer:%p [%s]\n",
+ req, req->cmd, (unsigned long)req->sector,
+ req->current_nr_sectors,
+ req->nr_sectors, req->buffer,
+ rq_data_dir(req) ? "write" : "read");
+
+
+ blkdev_dequeue_request(req);
+ if (blkif_queue_request(req)) {
+ blk_requeue_request(rq, req);
+wait:
+ /* Avoid pointless unplugs. */
+ blk_stop_queue(rq);
+ break;
+ }
+
+ queued++;
+ }
+
+ if (queued != 0)
+ flush_requests(info);
+}
+
+static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
+{
+ request_queue_t *rq;
+
+ rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
+ if (rq == NULL)
+ return -1;
+
+ elevator_init(rq, "noop");
+
+ /* Hard sector size and max sectors impersonate the equiv. hardware. */
+ blk_queue_hardsect_size(rq, sector_size);
+ blk_queue_max_sectors(rq, 512);
+
+ /* Each segment in a request is up to an aligned page in size. */
+ blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
+ blk_queue_max_segment_size(rq, PAGE_SIZE);
+
+ /* Ensure a merged request will fit in a single I/O ring slot. */
+ blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
+ blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
+
+ /* Make sure buffer addresses are sector-aligned. */
+ blk_queue_dma_alignment(rq, 511);
+
+ gd->queue = rq;
+
+ return 0;
+}
+
+
+static int xlvbd_barrier(struct blkfront_info *info)
+{
+ int err;
+
+ err = blk_queue_ordered(info->rq,
+ info->feature_barrier ? QUEUE_ORDERED_DRAIN : QUEUE_ORDERED_NONE,
+ NULL);
+
+ if (err)
+ return err;
+
+ printk(KERN_INFO "blkfront: %s: barriers %s\n",
+ info->gd->disk_name,
+ info->feature_barrier ? "enabled" : "disabled");
+ return 0;
+}
+
+
+static int xlvbd_alloc_gendisk(int minor, blkif_sector_t capacity,
+ int vdevice, u16 vdisk_info, u16 sector_size,
+ struct blkfront_info *info)
+{
+ struct gendisk *gd;
+ int nr_minors = 1;
+ int err = -ENODEV;
+
+ BUG_ON(info->gd != NULL);
+ BUG_ON(info->rq != NULL);
+
+ if ((minor % PARTS_PER_DISK) == 0)
+ nr_minors = PARTS_PER_DISK;
+
+ gd = alloc_disk(nr_minors);
+ if (gd == NULL)
+ goto out;
+
+ if (nr_minors > 1)
+ sprintf(gd->disk_name, "%s%c", DEV_NAME,
+ 'a' + minor / PARTS_PER_DISK);
+ else
+ sprintf(gd->disk_name, "%s%c%d", DEV_NAME,
+ 'a' + minor / PARTS_PER_DISK,
+ minor % PARTS_PER_DISK);
+
+ gd->major = XENVBD_MAJOR;
+ gd->first_minor = minor;
+ gd->fops = &xlvbd_block_fops;
+ gd->private_data = info;
+ gd->driverfs_dev = &(info->xbdev->dev);
+ set_capacity(gd, capacity);
+
+ if (xlvbd_init_blk_queue(gd, sector_size)) {
+ del_gendisk(gd);
+ goto out;
+ }
+
+ info->rq = gd->queue;
+ info->gd = gd;
+
+ if (info->feature_barrier)
+ xlvbd_barrier(info);
+
+ if (vdisk_info & VDISK_READONLY)
+ set_disk_ro(gd, 1);
+
+ if (vdisk_info & VDISK_REMOVABLE)
+ gd->flags |= GENHD_FL_REMOVABLE;
+
+ if (vdisk_info & VDISK_CDROM)
+ gd->flags |= GENHD_FL_CD;
+
+ return 0;
+
+ out:
+ return err;
+}
+
+static void kick_pending_request_queues(struct blkfront_info *info)
+{
+ if (!RING_FULL(&info->ring)) {
+ /* Re-enable calldowns. */
+ blk_start_queue(info->rq);
+ /* Kick things off immediately. */
+ do_blkif_request(info->rq);
+ }
+}
+
+static void blkif_restart_queue(struct work_struct *work)
+{
+ struct blkfront_info *info = container_of(work, struct blkfront_info, work);
+
+ spin_lock_irq(&blkif_io_lock);
+ if (info->connected == BLKIF_STATE_CONNECTED)
+ kick_pending_request_queues(info);
+ spin_unlock_irq(&blkif_io_lock);
+}
+
+static void blkif_free(struct blkfront_info *info, int suspend)
+{
+ /* Prevent new requests being issued until we fix things up. */
+ spin_lock_irq(&blkif_io_lock);
+ info->connected = suspend ?
+ BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
+ /* No more blkif_request(). */
+ if (info->rq)
+ blk_stop_queue(info->rq);
+ /* No more gnttab callback work. */
+ gnttab_cancel_free_callback(&info->callback);
+ spin_unlock_irq(&blkif_io_lock);
+
+ /* Flush gnttab callback work. Must be done with no locks held. */
+ flush_scheduled_work();
+
+ /* Free resources associated with old device channel. */
+ if (info->ring_ref != GRANT_INVALID_REF) {
+ gnttab_end_foreign_access(info->ring_ref, 0,
+ (unsigned long)info->ring.sring);
+ info->ring_ref = GRANT_INVALID_REF;
+ info->ring.sring = NULL;
+ }
+ if (info->irq)
+ unbind_from_irqhandler(info->irq, info);
+ info->evtchn = info->irq = 0;
+
+}
+
+static void blkif_completion(struct blk_shadow *s)
+{
+ int i;
+ for (i = 0; i < s->req.nr_segments; i++)
+ gnttab_end_foreign_access(s->req.seg[i].gref, 0, 0UL);
+}
+
+static irqreturn_t blkif_interrupt(int irq, void *dev_id)
+{
+ struct request *req;
+ struct blkif_response *bret;
+ RING_IDX i, rp;
+ unsigned long flags;
+ struct blkfront_info *info = (struct blkfront_info *)dev_id;
+ int uptodate;
+
+ spin_lock_irqsave(&blkif_io_lock, flags);
+
+ if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
+ spin_unlock_irqrestore(&blkif_io_lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ again:
+ rp = info->ring.sring->rsp_prod;
+ rmb(); /* Ensure we see queued responses up to 'rp'. */
+
+ for (i = info->ring.rsp_cons; i != rp; i++) {
+ unsigned long id;
+ int ret;
+
+ bret = RING_GET_RESPONSE(&info->ring, i);
+ id = bret->id;
+ req = (struct request *)info->shadow[id].request;
+
+ blkif_completion(&info->shadow[id]);
+
+ add_id_to_freelist(info, id);
+
+ uptodate = (bret->status == BLKIF_RSP_OKAY);
+ switch (bret->operation) {
+ case BLKIF_OP_WRITE_BARRIER:
+ if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
+ printk(KERN_WARNING "blkfront: %s: write barrier op failed\n",
+ info->gd->disk_name);
+ uptodate = -EOPNOTSUPP;
+ info->feature_barrier = 0;
+ xlvbd_barrier(info);
+ }
+ /* fall through */
+ case BLKIF_OP_READ:
+ case BLKIF_OP_WRITE:
+ if (unlikely(bret->status != BLKIF_RSP_OKAY))
+ dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
+ "request: %x\n", bret->status);
+
+ ret = end_that_request_first(req, uptodate,
+ req->hard_nr_sectors);
+ BUG_ON(ret);
+ end_that_request_last(req, uptodate);
+ break;
+ default:
+ BUG();
+ }
+ }
+
+ info->ring.rsp_cons = i;
+
+ if (i != info->ring.req_prod_pvt) {
+ int more_to_do;
+ RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
+ if (more_to_do)
+ goto again;
+ } else
+ info->ring.sring->rsp_event = i + 1;
+
+ kick_pending_request_queues(info);
+
+ spin_unlock_irqrestore(&blkif_io_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+
+static int setup_blkring(struct xenbus_device *dev,
+ struct blkfront_info *info)
+{
+ struct blkif_sring *sring;
+ int err;
+
+ info->ring_ref = GRANT_INVALID_REF;
+
+ sring = (struct blkif_sring *)__get_free_page(GFP_KERNEL);
+ if (!sring) {
+ xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
+ return -ENOMEM;
+ }
+ SHARED_RING_INIT(sring);
+ FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
+
+ err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
+ if (err < 0) {
+ free_page((unsigned long)sring);
+ info->ring.sring = NULL;
+ goto fail;
+ }
+ info->ring_ref = err;
+
+ err = xenbus_alloc_evtchn(dev, &info->evtchn);
+ if (err)
+ goto fail;
+
+ err = bind_evtchn_to_irqhandler(info->evtchn,
+ blkif_interrupt,
+ IRQF_SAMPLE_RANDOM, "blkif", info);
+ if (err <= 0) {
+ xenbus_dev_fatal(dev, err,
+ "bind_evtchn_to_irqhandler failed");
+ goto fail;
+ }
+ info->irq = err;
+
+ return 0;
+fail:
+ blkif_free(info, 0);
+ return err;
+}
+
+
+/* Common code used when first setting up, and when resuming. */
+static int talk_to_backend(struct xenbus_device *dev,
+ struct blkfront_info *info)
+{
+ const char *message = NULL;
+ struct xenbus_transaction xbt;
+ int err;
+
+ /* Create shared ring, alloc event channel. */
+ err = setup_blkring(dev, info);
+ if (err)
+ goto out;
+
+again:
+ err = xenbus_transaction_start(&xbt);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "starting transaction");
+ goto destroy_blkring;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename,
+ "ring-ref", "%u", info->ring_ref);
+ if (err) {
+ message = "writing ring-ref";
+ goto abort_transaction;
+ }
+ err = xenbus_printf(xbt, dev->nodename,
+ "event-channel", "%u", info->evtchn);
+ if (err) {
+ message = "writing event-channel";
+ goto abort_transaction;
+ }
+
+ err = xenbus_transaction_end(xbt, 0);
+ if (err) {
+ if (err == -EAGAIN)
+ goto again;
+ xenbus_dev_fatal(dev, err, "completing transaction");
+ goto destroy_blkring;
+ }
+
+ xenbus_switch_state(dev, XenbusStateInitialised);
+
+ return 0;
+
+ abort_transaction:
+ xenbus_transaction_end(xbt, 1);
+ if (message)
+ xenbus_dev_fatal(dev, err, "%s", message);
+ destroy_blkring:
+ blkif_free(info, 0);
+ out:
+ return err;
+}
+
+
+/**
+ * Entry point to this code when a new device is created. Allocate the basic
+ * structures and the ring buffer for communication with the backend, and
+ * inform the backend of the appropriate details for those. Switch to
+ * Initialised state.
+ */
+static int blkfront_probe(struct xenbus_device *dev,
+ const struct xenbus_device_id *id)
+{
+ int err, vdevice, i;
+ struct blkfront_info *info;
+
+ /* FIXME: Use dynamic device id if this is not set. */
+ err = xenbus_scanf(XBT_NIL, dev->nodename,
+ "virtual-device", "%i", &vdevice);
+ if (err != 1) {
+ xenbus_dev_fatal(dev, err, "reading virtual-device");
+ return err;
+ }
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
+ return -ENOMEM;
+ }
+
+ info->xbdev = dev;
+ info->vdevice = vdevice;
+ info->connected = BLKIF_STATE_DISCONNECTED;
+ INIT_WORK(&info->work, blkif_restart_queue);
+
+ for (i = 0; i < BLK_RING_SIZE; i++)
+ info->shadow[i].req.id = i+1;
+ info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
+
+ /* Front end dir is a number, which is used as the id. */
+ info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
+ dev->dev.driver_data = info;
+
+ err = talk_to_backend(dev, info);
+ if (err) {
+ kfree(info);
+ dev->dev.driver_data = NULL;
+ return err;
+ }
+
+ return 0;
+}
+
+
+static int blkif_recover(struct blkfront_info *info)
+{
+ int i;
+ struct blkif_request *req;
+ struct blk_shadow *copy;
+ int j;
+
+ /* Stage 1: Make a safe copy of the shadow state. */
+ copy = kmalloc(sizeof(info->shadow), GFP_KERNEL);
+ if (!copy)
+ return -ENOMEM;
+ memcpy(copy, info->shadow, sizeof(info->shadow));
+
+ /* Stage 2: Set up free list. */
+ memset(&info->shadow, 0, sizeof(info->shadow));
+ for (i = 0; i < BLK_RING_SIZE; i++)
+ info->shadow[i].req.id = i+1;
+ info->shadow_free = info->ring.req_prod_pvt;
+ info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
+
+ /* Stage 3: Find pending requests and requeue them. */
+ for (i = 0; i < BLK_RING_SIZE; i++) {
+ /* Not in use? */
+ if (copy[i].request == 0)
+ continue;
+
+ /* Grab a request slot and copy shadow state into it. */
+ req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
+ *req = copy[i].req;
+
+ /* We get a new request id, and must reset the shadow state. */
+ req->id = get_id_from_freelist(info);
+ memcpy(&info->shadow[req->id], &copy[i], sizeof(copy[i]));
+
+ /* Rewrite any grant references invalidated by susp/resume. */
+ for (j = 0; j < req->nr_segments; j++)
+ gnttab_grant_foreign_access_ref(
+ req->seg[j].gref,
+ info->xbdev->otherend_id,
+ pfn_to_mfn(info->shadow[req->id].frame[j]),
+ rq_data_dir(
+ (struct request *)
+ info->shadow[req->id].request));
+ info->shadow[req->id].req = *req;
+
+ info->ring.req_prod_pvt++;
+ }
+
+ kfree(copy);
+
+ xenbus_switch_state(info->xbdev, XenbusStateConnected);
+
+ spin_lock_irq(&blkif_io_lock);
+
+ /* Now safe for us to use the shared ring */
+ info->connected = BLKIF_STATE_CONNECTED;
+
+ /* Send off requeued requests */
+ flush_requests(info);
+
+ /* Kick any other new requests queued since we resumed */
+ kick_pending_request_queues(info);
+
+ spin_unlock_irq(&blkif_io_lock);
+
+ return 0;
+}
+
+/**
+ * We are reconnecting to the backend, due to a suspend/resume, or a backend
+ * driver restart. We tear down our blkif structure and recreate it, but
+ * leave the device-layer structures intact so that this is transparent to the
+ * rest of the kernel.
+ */
+static int blkfront_resume(struct xenbus_device *dev)
+{
+ struct blkfront_info *info = dev->dev.driver_data;
+ int err;
+
+ dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
+
+ blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
+
+ err = talk_to_backend(dev, info);
+ if (info->connected == BLKIF_STATE_SUSPENDED && !err)
+ err = blkif_recover(info);
+
+ return err;
+}
+
+
+/*
+ * Invoked when the backend is finally 'ready' (and has told produced
+ * the details about the physical device - #sectors, size, etc).
+ */
+static void blkfront_connect(struct blkfront_info *info)
+{
+ unsigned long long sectors;
+ unsigned long sector_size;
+ unsigned int binfo;
+ int err;
+
+ if ((info->connected == BLKIF_STATE_CONNECTED) ||
+ (info->connected == BLKIF_STATE_SUSPENDED) )
+ return;
+
+ dev_dbg(&info->xbdev->dev, "%s:%s.\n",
+ __func__, info->xbdev->otherend);
+
+ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+ "sectors", "%llu", &sectors,
+ "info", "%u", &binfo,
+ "sector-size", "%lu", &sector_size,
+ NULL);
+ if (err) {
+ xenbus_dev_fatal(info->xbdev, err,
+ "reading backend fields at %s",
+ info->xbdev->otherend);
+ return;
+ }
+
+ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+ "feature-barrier", "%lu", &info->feature_barrier,
+ NULL);
+ if (err)
+ info->feature_barrier = 0;
+
+ err = xlvbd_alloc_gendisk(BLKIF_MINOR(info->vdevice),
+ sectors, info->vdevice,
+ binfo, sector_size, info);
+ if (err) {
+ xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
+ info->xbdev->otherend);
+ return;
+ }
+
+ xenbus_switch_state(info->xbdev, XenbusStateConnected);
+
+ /* Kick pending requests. */
+ spin_lock_irq(&blkif_io_lock);
+ info->connected = BLKIF_STATE_CONNECTED;
+ kick_pending_request_queues(info);
+ spin_unlock_irq(&blkif_io_lock);
+
+ add_disk(info->gd);
+}
+
+/**
+ * Handle the change of state of the backend to Closing. We must delete our
+ * device-layer structures now, to ensure that writes are flushed through to
+ * the backend. Once is this done, we can switch to Closed in
+ * acknowledgement.
+ */
+static void blkfront_closing(struct xenbus_device *dev)
+{
+ struct blkfront_info *info = dev->dev.driver_data;
+ unsigned long flags;
+
+ dev_dbg(&dev->dev, "blkfront_closing: %s removed\n", dev->nodename);
+
+ if (info->rq == NULL)
+ goto out;
+
+ spin_lock_irqsave(&blkif_io_lock, flags);
+
+ del_gendisk(info->gd);
+
+ /* No more blkif_request(). */
+ blk_stop_queue(info->rq);
+
+ /* No more gnttab callback work. */
+ gnttab_cancel_free_callback(&info->callback);
+ spin_unlock_irqrestore(&blkif_io_lock, flags);
+
+ /* Flush gnttab callback work. Must be done with no locks held. */
+ flush_scheduled_work();
+
+ blk_cleanup_queue(info->rq);
+ info->rq = NULL;
+
+ out:
+ xenbus_frontend_closed(dev);
+}
+
+/**
+ * Callback received when the backend's state changes.
+ */
+static void backend_changed(struct xenbus_device *dev,
+ enum xenbus_state backend_state)
+{
+ struct blkfront_info *info = dev->dev.driver_data;
+ struct block_device *bd;
+
+ dev_dbg(&dev->dev, "blkfront:backend_changed.\n");
+
+ switch (backend_state) {
+ case XenbusStateInitialising:
+ case XenbusStateInitWait:
+ case XenbusStateInitialised:
+ case XenbusStateUnknown:
+ case XenbusStateClosed:
+ break;
+
+ case XenbusStateConnected:
+ blkfront_connect(info);
+ break;
+
+ case XenbusStateClosing:
+ bd = bdget(info->dev);
+ if (bd == NULL)
+ xenbus_dev_fatal(dev, -ENODEV, "bdget failed");
+
+ mutex_lock(&bd->bd_mutex);
+ if (info->users > 0)
+ xenbus_dev_error(dev, -EBUSY,
+ "Device in use; refusing to close");
+ else
+ blkfront_closing(dev);
+ mutex_unlock(&bd->bd_mutex);
+ bdput(bd);
+ break;
+ }
+}
+
+static int blkfront_remove(struct xenbus_device *dev)
+{
+ struct blkfront_info *info = dev->dev.driver_data;
+
+ dev_dbg(&dev->dev, "blkfront_remove: %s removed\n", dev->nodename);
+
+ blkif_free(info, 0);
+
+ kfree(info);
+
+ return 0;
+}
+
+static int blkif_open(struct inode *inode, struct file *filep)
+{
+ struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
+ info->users++;
+ return 0;
+}
+
+static int blkif_release(struct inode *inode, struct file *filep)
+{
+ struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
+ info->users--;
+ if (info->users == 0) {
+ /* Check whether we have been instructed to close. We will
+ have ignored this request initially, as the device was
+ still mounted. */
+ struct xenbus_device *dev = info->xbdev;
+ enum xenbus_state state = xenbus_read_driver_state(dev->otherend);
+
+ if (state == XenbusStateClosing)
+ blkfront_closing(dev);
+ }
+ return 0;
+}
+
+static struct block_device_operations xlvbd_block_fops =
+{
+ .owner = THIS_MODULE,
+ .open = blkif_open,
+ .release = blkif_release,
+};
+
+
+static struct xenbus_device_id blkfront_ids[] = {
+ { "vbd" },
+ { "" }
+};
+
+static struct xenbus_driver blkfront = {
+ .name = "vbd",
+ .owner = THIS_MODULE,
+ .ids = blkfront_ids,
+ .probe = blkfront_probe,
+ .remove = blkfront_remove,
+ .resume = blkfront_resume,
+ .otherend_changed = backend_changed,
+};
+
+static int __init xlblk_init(void)
+{
+ if (!is_running_on_xen())
+ return -ENODEV;
+
+ if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
+ printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n",
+ XENVBD_MAJOR, DEV_NAME);
+ return -ENODEV;
+ }
+
+ return xenbus_register_frontend(&blkfront);
+}
+module_init(xlblk_init);
+
+
+static void xlblk_exit(void)
+{
+ return xenbus_unregister_driver(&blkfront);
+}
+module_exit(xlblk_exit);
+
+MODULE_DESCRIPTION("Xen virtual block device frontend");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 97bd71bc3ae..9e8f21410d2 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -604,6 +604,14 @@ config HVC_BEAT
help
Toshiba's Cell Reference Set Beat Console device driver
+config HVC_XEN
+ bool "Xen Hypervisor Console support"
+ depends on XEN
+ select HVC_DRIVER
+ default y
+ help
+ Xen virtual console device driver
+
config HVCS
tristate "IBM Hypervisor Virtual Console Server support"
depends on PPC_PSERIES
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index f2996a95eb0..8852b8d643c 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_HVC_ISERIES) += hvc_iseries.o
obj-$(CONFIG_HVC_RTAS) += hvc_rtas.o
obj-$(CONFIG_HVC_BEAT) += hvc_beat.o
obj-$(CONFIG_HVC_DRIVER) += hvc_console.o
+obj-$(CONFIG_HVC_XEN) += hvc_xen.o
obj-$(CONFIG_RAW_DRIVER) += raw.o
obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o
obj-$(CONFIG_MSPEC) += mspec.o
diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
new file mode 100644
index 00000000000..dd68f8541c2
--- /dev/null
+++ b/drivers/char/hvc_xen.c
@@ -0,0 +1,159 @@
+/*
+ * xen console driver interface to hvc_console.c
+ *
+ * (c) 2007 Gerd Hoffmann <kraxel@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/types.h>
+
+#include <asm/xen/hypervisor.h>
+#include <xen/page.h>
+#include <xen/events.h>
+#include <xen/interface/io/console.h>
+#include <xen/hvc-console.h>
+
+#include "hvc_console.h"
+
+#define HVC_COOKIE 0x58656e /* "Xen" in hex */
+
+static struct hvc_struct *hvc;
+static int xencons_irq;
+
+/* ------------------------------------------------------------------ */
+
+static inline struct xencons_interface *xencons_interface(void)
+{
+ return mfn_to_virt(xen_start_info->console.domU.mfn);
+}
+
+static inline void notify_daemon(void)
+{
+ /* Use evtchn: this is called early, before irq is set up. */
+ notify_remote_via_evtchn(xen_start_info->console.domU.evtchn);
+}
+
+static int write_console(uint32_t vtermno, const char *data, int len)
+{
+ struct xencons_interface *intf = xencons_interface();
+ XENCONS_RING_IDX cons, prod;
+ int sent = 0;
+
+ cons = intf->out_cons;
+ prod = intf->out_prod;
+ mb(); /* update queue values before going on */
+ BUG_ON((prod - cons) > sizeof(intf->out));
+
+ while ((sent < len) && ((prod - cons) < sizeof(intf->out)))
+ intf->out[MASK_XENCONS_IDX(prod++, intf->out)] = data[sent++];
+
+ wmb(); /* write ring before updating pointer */
+ intf->out_prod = prod;
+
+ notify_daemon();
+ return sent;
+}
+
+static int read_console(uint32_t vtermno, char *buf, int len)
+{
+ struct xencons_interface *intf = xencons_interface();
+ XENCONS_RING_IDX cons, prod;
+ int recv = 0;
+
+ cons = intf->in_cons;
+ prod = intf->in_prod;
+ mb(); /* get pointers before reading ring */
+ BUG_ON((prod - cons) > sizeof(intf->in));
+
+ while (cons != prod && recv < len)
+ buf[recv++] = intf->in[MASK_XENCONS_IDX(cons++, intf->in)];
+
+ mb(); /* read ring before consuming */
+ intf->in_cons = cons;
+
+ notify_daemon();
+ return recv;
+}
+
+static struct hv_ops hvc_ops = {
+ .get_chars = read_console,
+ .put_chars = write_console,
+};
+
+static int __init xen_init(void)
+{
+ struct hvc_struct *hp;
+
+ if (!is_running_on_xen())
+ return 0;
+
+ xencons_irq = bind_evtchn_to_irq(xen_start_info->console.domU.evtchn);
+ if (xencons_irq < 0)
+ xencons_irq = 0 /* NO_IRQ */;
+ hp = hvc_alloc(HVC_COOKIE, xencons_irq, &hvc_ops, 256);
+ if (IS_ERR(hp))
+ return PTR_ERR(hp);
+
+ hvc = hp;
+ return 0;
+}
+
+static void __exit xen_fini(void)
+{
+ if (hvc)
+ hvc_remove(hvc);
+}
+
+static int xen_cons_init(void)
+{
+ if (!is_running_on_xen())
+ return 0;
+
+ hvc_instantiate(HVC_COOKIE, 0, &hvc_ops);
+ return 0;
+}
+
+module_init(xen_init);
+module_exit(xen_fini);
+console_initcall(xen_cons_init);
+
+static void xenboot_write_console(struct console *console, const char *string,
+ unsigned len)
+{
+ unsigned int linelen, off = 0;
+ const char *pos;
+
+ while (off < len && NULL != (pos = strchr(string+off, '\n'))) {
+ linelen = pos-string+off;
+ if (off + linelen > len)
+ break;
+ write_console(0, string+off, linelen);
+ write_console(0, "\r\n", 2);
+ off += linelen + 1;
+ }
+ if (off < len)
+ write_console(0, string+off, len-off);
+}
+
+struct console xenboot_console = {
+ .name = "xenboot",
+ .write = xenboot_write_console,
+ .flags = CON_PRINTBUFFER | CON_BOOT,
+};
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index dbb22403979..3d90fc00209 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -1770,7 +1770,8 @@ static int call_critical_overtemp(void)
"PATH=/sbin:/usr/sbin:/bin:/usr/bin",
NULL };
- return call_usermodehelper(critical_overtemp_path, argv, envp, 0);
+ return call_usermodehelper(critical_overtemp_path,
+ argv, envp, UMH_WAIT_EXEC);
}
diff --git a/drivers/macintosh/windfarm_core.c b/drivers/macintosh/windfarm_core.c
index e18d265d5d3..516d943227e 100644
--- a/drivers/macintosh/windfarm_core.c
+++ b/drivers/macintosh/windfarm_core.c
@@ -80,7 +80,8 @@ int wf_critical_overtemp(void)
"PATH=/sbin:/usr/sbin:/bin:/usr/bin",
NULL };
- return call_usermodehelper(critical_overtemp_path, argv, envp, 0);
+ return call_usermodehelper(critical_overtemp_path,
+ argv, envp, UMH_WAIT_EXEC);
}
EXPORT_SYMBOL_GPL(wf_critical_overtemp);
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 555d594d181..1cb22bfae75 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -33,6 +33,7 @@
#include <linux/moduleparam.h>
#include <linux/stringify.h>
#include <linux/stat.h>
+#include <linux/log2.h>
#include "ubi.h"
/* Maximum length of the 'mtd=' parameter */
@@ -369,7 +370,7 @@ static int attach_by_scanning(struct ubi_device *ubi)
out_wl:
ubi_wl_close(ubi);
out_vtbl:
- kfree(ubi->vtbl);
+ vfree(ubi->vtbl);
out_si:
ubi_scan_destroy_si(si);
return err;
@@ -422,8 +423,7 @@ static int io_init(struct ubi_device *ubi)
ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
/* Make sure minimal I/O unit is power of 2 */
- if (ubi->min_io_size == 0 ||
- (ubi->min_io_size & (ubi->min_io_size - 1))) {
+ if (!is_power_of_2(ubi->min_io_size)) {
ubi_err("bad min. I/O unit");
return -EINVAL;
}
@@ -593,8 +593,6 @@ static int attach_mtd_dev(const char *mtd_dev, int vid_hdr_offset,
if (err)
goto out_detach;
- ubi_devices_cnt += 1;
-
ubi_msg("attached mtd%d to ubi%d", ubi->mtd->index, ubi_devices_cnt);
ubi_msg("MTD device name: \"%s\"", ubi->mtd->name);
ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20);
@@ -624,12 +622,13 @@ static int attach_mtd_dev(const char *mtd_dev, int vid_hdr_offset,
wake_up_process(ubi->bgt_thread);
}
+ ubi_devices_cnt += 1;
return 0;
out_detach:
ubi_eba_close(ubi);
ubi_wl_close(ubi);
- kfree(ubi->vtbl);
+ vfree(ubi->vtbl);
out_free:
kfree(ubi);
out_mtd:
@@ -650,7 +649,7 @@ static void detach_mtd_dev(struct ubi_device *ubi)
uif_close(ubi);
ubi_eba_close(ubi);
ubi_wl_close(ubi);
- kfree(ubi->vtbl);
+ vfree(ubi->vtbl);
put_mtd_device(ubi->mtd);
kfree(ubi_devices[ubi_num]);
ubi_devices[ubi_num] = NULL;
@@ -686,13 +685,6 @@ static int __init ubi_init(void)
struct mtd_dev_param *p = &mtd_dev_param[i];
cond_resched();
-
- if (!p->name) {
- dbg_err("empty name");
- err = -EINVAL;
- goto out_detach;
- }
-
err = attach_mtd_dev(p->name, p->vid_hdr_offs, p->data_offs);
if (err)
goto out_detach;
@@ -799,7 +791,7 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
/* Get rid of the final newline */
if (buf[len - 1] == '\n')
- buf[len - 1] = 0;
+ buf[len - 1] = '\0';
for (i = 0; i < 3; i++)
tokens[i] = strsep(&pbuf, ",");
@@ -809,9 +801,6 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
return -EINVAL;
}
- if (tokens[0] == '\0')
- return -EINVAL;
-
p = &mtd_dev_param[mtd_devs];
strcpy(&p->name[0], tokens[0]);
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index 6612eb79bf1..fe4da1e96c5 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -64,6 +64,7 @@ static struct ubi_device *major_to_device(int major)
if (ubi_devices[i] && ubi_devices[i]->major == major)
return ubi_devices[i];
BUG();
+ return NULL;
}
/**
@@ -153,7 +154,7 @@ static int vol_cdev_release(struct inode *inode, struct file *file)
ubi_warn("update of volume %d not finished, volume is damaged",
vol->vol_id);
vol->updating = 0;
- kfree(vol->upd_buf);
+ vfree(vol->upd_buf);
}
ubi_close_volume(desc);
@@ -232,7 +233,7 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
tbuf_size = vol->usable_leb_size;
if (count < tbuf_size)
tbuf_size = ALIGN(count, ubi->min_io_size);
- tbuf = kmalloc(tbuf_size, GFP_KERNEL);
+ tbuf = vmalloc(tbuf_size);
if (!tbuf)
return -ENOMEM;
@@ -271,7 +272,7 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
len = count > tbuf_size ? tbuf_size : count;
} while (count);
- kfree(tbuf);
+ vfree(tbuf);
return err ? err : count_save - count;
}
@@ -320,7 +321,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
tbuf_size = vol->usable_leb_size;
if (count < tbuf_size)
tbuf_size = ALIGN(count, ubi->min_io_size);
- tbuf = kmalloc(tbuf_size, GFP_KERNEL);
+ tbuf = vmalloc(tbuf_size);
if (!tbuf)
return -ENOMEM;
@@ -355,7 +356,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
len = count > tbuf_size ? tbuf_size : count;
}
- kfree(tbuf);
+ vfree(tbuf);
return err ? err : count_save - count;
}
@@ -397,6 +398,7 @@ static ssize_t vol_cdev_write(struct file *file, const char __user *buf,
vol->corrupted = 1;
}
vol->checked = 1;
+ ubi_gluebi_updated(vol);
revoke_exclusive(desc, UBI_READWRITE);
}
@@ -413,19 +415,7 @@ static int vol_cdev_ioctl(struct inode *inode, struct file *file,
struct ubi_device *ubi = vol->ubi;
void __user *argp = (void __user *)arg;
- if (_IOC_NR(cmd) > VOL_CDEV_IOC_MAX_SEQ ||
- _IOC_TYPE(cmd) != UBI_VOL_IOC_MAGIC)
- return -ENOTTY;
-
- if (_IOC_DIR(cmd) && _IOC_READ)
- err = !access_ok(VERIFY_WRITE, argp, _IOC_SIZE(cmd));
- else if (_IOC_DIR(cmd) && _IOC_WRITE)
- err = !access_ok(VERIFY_READ, argp, _IOC_SIZE(cmd));
- if (err)
- return -EFAULT;
-
switch (cmd) {
-
/* Volume update command */
case UBI_IOCVOLUP:
{
@@ -471,7 +461,7 @@ static int vol_cdev_ioctl(struct inode *inode, struct file *file,
{
int32_t lnum;
- err = __get_user(lnum, (__user int32_t *)argp);
+ err = get_user(lnum, (__user int32_t *)argp);
if (err) {
err = -EFAULT;
break;
@@ -587,17 +577,6 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
struct ubi_volume_desc *desc;
void __user *argp = (void __user *)arg;
- if (_IOC_NR(cmd) > UBI_CDEV_IOC_MAX_SEQ ||
- _IOC_TYPE(cmd) != UBI_IOC_MAGIC)
- return -ENOTTY;
-
- if (_IOC_DIR(cmd) && _IOC_READ)
- err = !access_ok(VERIFY_WRITE, argp, _IOC_SIZE(cmd));
- else if (_IOC_DIR(cmd) && _IOC_WRITE)
- err = !access_ok(VERIFY_READ, argp, _IOC_SIZE(cmd));
- if (err)
- return -EFAULT;
-
if (!capable(CAP_SYS_RESOURCE))
return -EPERM;
@@ -612,7 +591,7 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
struct ubi_mkvol_req req;
dbg_msg("create volume");
- err = __copy_from_user(&req, argp,
+ err = copy_from_user(&req, argp,
sizeof(struct ubi_mkvol_req));
if (err) {
err = -EFAULT;
@@ -629,7 +608,7 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
if (err)
break;
- err = __put_user(req.vol_id, (__user int32_t *)argp);
+ err = put_user(req.vol_id, (__user int32_t *)argp);
if (err)
err = -EFAULT;
@@ -642,7 +621,7 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
int vol_id;
dbg_msg("remove volume");
- err = __get_user(vol_id, (__user int32_t *)argp);
+ err = get_user(vol_id, (__user int32_t *)argp);
if (err) {
err = -EFAULT;
break;
@@ -669,7 +648,7 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
struct ubi_rsvol_req req;
dbg_msg("re-size volume");
- err = __copy_from_user(&req, argp,
+ err = copy_from_user(&req, argp,
sizeof(struct ubi_rsvol_req));
if (err) {
err = -EFAULT;
@@ -707,7 +686,7 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
struct file_operations ubi_cdev_operations = {
.owner = THIS_MODULE,
.ioctl = ubi_cdev_ioctl,
- .llseek = no_llseek
+ .llseek = no_llseek,
};
/* UBI volume character device operations */
@@ -718,5 +697,5 @@ struct file_operations ubi_vol_cdev_operations = {
.llseek = vol_cdev_llseek,
.read = vol_cdev_read,
.write = vol_cdev_write,
- .ioctl = vol_cdev_ioctl
+ .ioctl = vol_cdev_ioctl,
};
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 86364221faf..310341e5cd4 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -35,12 +35,12 @@
void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr)
{
dbg_msg("erase counter header dump:");
- dbg_msg("magic %#08x", ubi32_to_cpu(ec_hdr->magic));
+ dbg_msg("magic %#08x", be32_to_cpu(ec_hdr->magic));
dbg_msg("version %d", (int)ec_hdr->version);
- dbg_msg("ec %llu", (long long)ubi64_to_cpu(ec_hdr->ec));
- dbg_msg("vid_hdr_offset %d", ubi32_to_cpu(ec_hdr->vid_hdr_offset));
- dbg_msg("data_offset %d", ubi32_to_cpu(ec_hdr->data_offset));
- dbg_msg("hdr_crc %#08x", ubi32_to_cpu(ec_hdr->hdr_crc));
+ dbg_msg("ec %llu", (long long)be64_to_cpu(ec_hdr->ec));
+ dbg_msg("vid_hdr_offset %d", be32_to_cpu(ec_hdr->vid_hdr_offset));
+ dbg_msg("data_offset %d", be32_to_cpu(ec_hdr->data_offset));
+ dbg_msg("hdr_crc %#08x", be32_to_cpu(ec_hdr->hdr_crc));
dbg_msg("erase counter header hexdump:");
ubi_dbg_hexdump(ec_hdr, UBI_EC_HDR_SIZE);
}
@@ -52,20 +52,20 @@ void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr)
void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr)
{
dbg_msg("volume identifier header dump:");
- dbg_msg("magic %08x", ubi32_to_cpu(vid_hdr->magic));
+ dbg_msg("magic %08x", be32_to_cpu(vid_hdr->magic));
dbg_msg("version %d", (int)vid_hdr->version);
dbg_msg("vol_type %d", (int)vid_hdr->vol_type);
dbg_msg("copy_flag %d", (int)vid_hdr->copy_flag);
dbg_msg("compat %d", (int)vid_hdr->compat);
- dbg_msg("vol_id %d", ubi32_to_cpu(vid_hdr->vol_id));
- dbg_msg("lnum %d", ubi32_to_cpu(vid_hdr->lnum));
- dbg_msg("leb_ver %u", ubi32_to_cpu(vid_hdr->leb_ver));
- dbg_msg("data_size %d", ubi32_to_cpu(vid_hdr->data_size));
- dbg_msg("used_ebs %d", ubi32_to_cpu(vid_hdr->used_ebs));
- dbg_msg("data_pad %d", ubi32_to_cpu(vid_hdr->data_pad));
+ dbg_msg("vol_id %d", be32_to_cpu(vid_hdr->vol_id));
+ dbg_msg("lnum %d", be32_to_cpu(vid_hdr->lnum));
+ dbg_msg("leb_ver %u", be32_to_cpu(vid_hdr->leb_ver));
+ dbg_msg("data_size %d", be32_to_cpu(vid_hdr->data_size));
+ dbg_msg("used_ebs %d", be32_to_cpu(vid_hdr->used_ebs));
+ dbg_msg("data_pad %d", be32_to_cpu(vid_hdr->data_pad));
dbg_msg("sqnum %llu",
- (unsigned long long)ubi64_to_cpu(vid_hdr->sqnum));
- dbg_msg("hdr_crc %08x", ubi32_to_cpu(vid_hdr->hdr_crc));
+ (unsigned long long)be64_to_cpu(vid_hdr->sqnum));
+ dbg_msg("hdr_crc %08x", be32_to_cpu(vid_hdr->hdr_crc));
dbg_msg("volume identifier header hexdump:");
}
@@ -91,7 +91,7 @@ void ubi_dbg_dump_vol_info(const struct ubi_volume *vol)
if (vol->name_len <= UBI_VOL_NAME_MAX &&
strnlen(vol->name, vol->name_len + 1) == vol->name_len) {
- dbg_msg("name %s", vol->name);
+ dbg_msg("name %s", vol->name);
} else {
dbg_msg("the 1st 5 characters of the name: %c%c%c%c%c",
vol->name[0], vol->name[1], vol->name[2],
@@ -106,30 +106,30 @@ void ubi_dbg_dump_vol_info(const struct ubi_volume *vol)
*/
void ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx)
{
- int name_len = ubi16_to_cpu(r->name_len);
+ int name_len = be16_to_cpu(r->name_len);
dbg_msg("volume table record %d dump:", idx);
- dbg_msg("reserved_pebs %d", ubi32_to_cpu(r->reserved_pebs));
- dbg_msg("alignment %d", ubi32_to_cpu(r->alignment));
- dbg_msg("data_pad %d", ubi32_to_cpu(r->data_pad));
+ dbg_msg("reserved_pebs %d", be32_to_cpu(r->reserved_pebs));
+ dbg_msg("alignment %d", be32_to_cpu(r->alignment));
+ dbg_msg("data_pad %d", be32_to_cpu(r->data_pad));
dbg_msg("vol_type %d", (int)r->vol_type);
dbg_msg("upd_marker %d", (int)r->upd_marker);
dbg_msg("name_len %d", name_len);
if (r->name[0] == '\0') {
- dbg_msg("name NULL");
+ dbg_msg("name NULL");
return;
}
if (name_len <= UBI_VOL_NAME_MAX &&
strnlen(&r->name[0], name_len + 1) == name_len) {
- dbg_msg("name %s", &r->name[0]);
+ dbg_msg("name %s", &r->name[0]);
} else {
dbg_msg("1st 5 characters of the name: %c%c%c%c%c",
r->name[0], r->name[1], r->name[2], r->name[3],
r->name[4]);
}
- dbg_msg("crc %#08x", ubi32_to_cpu(r->crc));
+ dbg_msg("crc %#08x", be32_to_cpu(r->crc));
}
/**
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index f816ad9a36c..ff8f39548cd 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -52,7 +52,6 @@ struct ubi_scan_volume;
struct ubi_scan_leb;
struct ubi_mkvol_req;
-void ubi_dbg_print(int type, const char *func, const char *fmt, ...);
void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr);
void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr);
void ubi_dbg_dump_vol_info(const struct ubi_volume *vol);
@@ -66,7 +65,6 @@ void ubi_dbg_hexdump(const void *buf, int size);
#define dbg_msg(fmt, ...) ({})
#define ubi_dbg_dump_stack() ({})
-#define ubi_dbg_print(func, fmt, ...) ({})
#define ubi_dbg_dump_ec_hdr(ec_hdr) ({})
#define ubi_dbg_dump_vid_hdr(vid_hdr) ({})
#define ubi_dbg_dump_vol_info(vol) ({})
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 7c6b223b3f8..8aff9385613 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -425,10 +425,10 @@ retry:
} else if (err == UBI_IO_BITFLIPS)
scrub = 1;
- ubi_assert(lnum < ubi32_to_cpu(vid_hdr->used_ebs));
- ubi_assert(len == ubi32_to_cpu(vid_hdr->data_size));
+ ubi_assert(lnum < be32_to_cpu(vid_hdr->used_ebs));
+ ubi_assert(len == be32_to_cpu(vid_hdr->data_size));
- crc = ubi32_to_cpu(vid_hdr->data_crc);
+ crc = be32_to_cpu(vid_hdr->data_crc);
ubi_free_vid_hdr(ubi, vid_hdr);
}
@@ -518,13 +518,13 @@ retry:
goto out_put;
}
- vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
if (err)
goto write_error;
data_size = offset + len;
- new_buf = kmalloc(data_size, GFP_KERNEL);
+ new_buf = vmalloc(data_size);
if (!new_buf) {
err = -ENOMEM;
goto out_put;
@@ -535,7 +535,7 @@ retry:
if (offset > 0) {
err = ubi_io_read_data(ubi, new_buf, pnum, 0, offset);
if (err && err != UBI_IO_BITFLIPS) {
- kfree(new_buf);
+ vfree(new_buf);
goto out_put;
}
}
@@ -544,11 +544,11 @@ retry:
err = ubi_io_write_data(ubi, new_buf, new_pnum, 0, data_size);
if (err) {
- kfree(new_buf);
+ vfree(new_buf);
goto write_error;
}
- kfree(new_buf);
+ vfree(new_buf);
ubi_free_vid_hdr(ubi, vid_hdr);
vol->eba_tbl[lnum] = new_pnum;
@@ -634,11 +634,11 @@ int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum,
}
vid_hdr->vol_type = UBI_VID_DYNAMIC;
- vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi));
- vid_hdr->vol_id = cpu_to_ubi32(vol_id);
- vid_hdr->lnum = cpu_to_ubi32(lnum);
+ vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->vol_id = cpu_to_be32(vol_id);
+ vid_hdr->lnum = cpu_to_be32(lnum);
vid_hdr->compat = ubi_get_compat(ubi, vol_id);
- vid_hdr->data_pad = cpu_to_ubi32(vol->data_pad);
+ vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
retry:
pnum = ubi_wl_get_peb(ubi, dtype);
@@ -692,7 +692,7 @@ write_error:
return err;
}
- vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
ubi_msg("try another PEB");
goto retry;
}
@@ -748,17 +748,17 @@ int ubi_eba_write_leb_st(struct ubi_device *ubi, int vol_id, int lnum,
return err;
}
- vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi));
- vid_hdr->vol_id = cpu_to_ubi32(vol_id);
- vid_hdr->lnum = cpu_to_ubi32(lnum);
+ vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->vol_id = cpu_to_be32(vol_id);
+ vid_hdr->lnum = cpu_to_be32(lnum);
vid_hdr->compat = ubi_get_compat(ubi, vol_id);
- vid_hdr->data_pad = cpu_to_ubi32(vol->data_pad);
+ vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
crc = crc32(UBI_CRC32_INIT, buf, data_size);
vid_hdr->vol_type = UBI_VID_STATIC;
- vid_hdr->data_size = cpu_to_ubi32(data_size);
- vid_hdr->used_ebs = cpu_to_ubi32(used_ebs);
- vid_hdr->data_crc = cpu_to_ubi32(crc);
+ vid_hdr->data_size = cpu_to_be32(data_size);
+ vid_hdr->used_ebs = cpu_to_be32(used_ebs);
+ vid_hdr->data_crc = cpu_to_be32(crc);
retry:
pnum = ubi_wl_get_peb(ubi, dtype);
@@ -813,7 +813,7 @@ write_error:
return err;
}
- vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
ubi_msg("try another PEB");
goto retry;
}
@@ -854,17 +854,17 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum,
return err;
}
- vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi));
- vid_hdr->vol_id = cpu_to_ubi32(vol_id);
- vid_hdr->lnum = cpu_to_ubi32(lnum);
+ vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+ vid_hdr->vol_id = cpu_to_be32(vol_id);
+ vid_hdr->lnum = cpu_to_be32(lnum);
vid_hdr->compat = ubi_get_compat(ubi, vol_id);
- vid_hdr->data_pad = cpu_to_ubi32(vol->data_pad);
+ vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
crc = crc32(UBI_CRC32_INIT, buf, len);
- vid_hdr->vol_type = UBI_VID_STATIC;
- vid_hdr->data_size = cpu_to_ubi32(len);
+ vid_hdr->vol_type = UBI_VID_DYNAMIC;
+ vid_hdr->data_size = cpu_to_be32(len);
vid_hdr->copy_flag = 1;
- vid_hdr->data_crc = cpu_to_ubi32(crc);
+ vid_hdr->data_crc = cpu_to_be32(crc);
retry:
pnum = ubi_wl_get_peb(ubi, dtype);
@@ -891,11 +891,13 @@ retry:
goto write_error;
}
- err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 1);
- if (err) {
- ubi_free_vid_hdr(ubi, vid_hdr);
- leb_write_unlock(ubi, vol_id, lnum);
- return err;
+ if (vol->eba_tbl[lnum] >= 0) {
+ err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 1);
+ if (err) {
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ leb_write_unlock(ubi, vol_id, lnum);
+ return err;
+ }
}
vol->eba_tbl[lnum] = pnum;
@@ -924,7 +926,7 @@ write_error:
return err;
}
- vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
ubi_msg("try another PEB");
goto retry;
}
@@ -965,19 +967,19 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
uint32_t crc;
void *buf, *buf1 = NULL;
- vol_id = ubi32_to_cpu(vid_hdr->vol_id);
- lnum = ubi32_to_cpu(vid_hdr->lnum);
+ vol_id = be32_to_cpu(vid_hdr->vol_id);
+ lnum = be32_to_cpu(vid_hdr->lnum);
dbg_eba("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to);
if (vid_hdr->vol_type == UBI_VID_STATIC) {
- data_size = ubi32_to_cpu(vid_hdr->data_size);
+ data_size = be32_to_cpu(vid_hdr->data_size);
aldata_size = ALIGN(data_size, ubi->min_io_size);
} else
data_size = aldata_size =
- ubi->leb_size - ubi32_to_cpu(vid_hdr->data_pad);
+ ubi->leb_size - be32_to_cpu(vid_hdr->data_pad);
- buf = kmalloc(aldata_size, GFP_KERNEL);
+ buf = vmalloc(aldata_size);
if (!buf)
return -ENOMEM;
@@ -987,7 +989,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
*/
err = leb_write_lock(ubi, vol_id, lnum);
if (err) {
- kfree(buf);
+ vfree(buf);
return err;
}
@@ -1054,10 +1056,10 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
*/
if (data_size > 0) {
vid_hdr->copy_flag = 1;
- vid_hdr->data_size = cpu_to_ubi32(data_size);
- vid_hdr->data_crc = cpu_to_ubi32(crc);
+ vid_hdr->data_size = cpu_to_be32(data_size);
+ vid_hdr->data_crc = cpu_to_be32(crc);
}
- vid_hdr->sqnum = cpu_to_ubi64(next_sqnum(ubi));
+ vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
if (err)
@@ -1082,7 +1084,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
* We've written the data and are going to read it back to make
* sure it was written correctly.
*/
- buf1 = kmalloc(aldata_size, GFP_KERNEL);
+ buf1 = vmalloc(aldata_size);
if (!buf1) {
err = -ENOMEM;
goto out_unlock;
@@ -1111,15 +1113,15 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
vol->eba_tbl[lnum] = to;
leb_write_unlock(ubi, vol_id, lnum);
- kfree(buf);
- kfree(buf1);
+ vfree(buf);
+ vfree(buf1);
return 0;
out_unlock:
leb_write_unlock(ubi, vol_id, lnum);
- kfree(buf);
- kfree(buf1);
+ vfree(buf);
+ vfree(buf1);
return err;
}
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index fc9478d605f..41ff74c60e1 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -282,7 +282,6 @@ int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol)
mtd->flags = MTD_WRITEABLE;
mtd->writesize = ubi->min_io_size;
mtd->owner = THIS_MODULE;
- mtd->size = vol->usable_leb_size * vol->reserved_pebs;
mtd->erasesize = vol->usable_leb_size;
mtd->read = gluebi_read;
mtd->write = gluebi_write;
@@ -290,6 +289,15 @@ int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol)
mtd->get_device = gluebi_get_device;
mtd->put_device = gluebi_put_device;
+ /*
+ * In case of dynamic volume, MTD device size is just volume size. In
+ * case of a static volume the size is equivalent to the amount of data
+ * bytes, which is zero at this moment and will be changed after volume
+ * update.
+ */
+ if (vol->vol_type == UBI_DYNAMIC_VOLUME)
+ mtd->size = vol->usable_leb_size * vol->reserved_pebs;
+
if (add_mtd_device(mtd)) {
ubi_err("cannot not add MTD device\n");
kfree(mtd->name);
@@ -321,3 +329,20 @@ int ubi_destroy_gluebi(struct ubi_volume *vol)
kfree(mtd->name);
return 0;
}
+
+/**
+ * ubi_gluebi_updated - UBI volume was updated notifier.
+ * @vol: volume description object
+ *
+ * This function is called every time an UBI volume is updated. This function
+ * does nothing if volume @vol is dynamic, and changes MTD device size if the
+ * volume is static. This is needed because static volumes cannot be read past
+ * data they contain.
+ */
+void ubi_gluebi_updated(struct ubi_volume *vol)
+{
+ struct mtd_info *mtd = &vol->gluebi_mtd;
+
+ if (vol->vol_type == UBI_STATIC_VOLUME)
+ mtd->size = vol->used_bytes;
+}
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 438914d0515..b0d8f4cede9 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -125,9 +125,9 @@ static int paranoid_check_all_ff(const struct ubi_device *ubi, int pnum,
* o %UBI_IO_BITFLIPS if all the requested data were successfully read, but
* correctable bit-flips were detected; this is harmless but may indicate
* that this eraseblock may become bad soon (but do not have to);
- * o %-EBADMSG if the MTD subsystem reported about data data integrity
- * problems, for example it can me an ECC error in case of NAND; this most
- * probably means that the data is corrupted;
+ * o %-EBADMSG if the MTD subsystem reported about data integrity problems, for
+ * example it can be an ECC error in case of NAND; this most probably means
+ * that the data is corrupted;
* o %-EIO if some I/O error occurred;
* o other negative error codes in case of other errors.
*/
@@ -298,7 +298,7 @@ retry:
memset(&ei, 0, sizeof(struct erase_info));
ei.mtd = ubi->mtd;
- ei.addr = pnum * ubi->peb_size;
+ ei.addr = (loff_t)pnum * ubi->peb_size;
ei.len = ubi->peb_size;
ei.callback = erase_callback;
ei.priv = (unsigned long)&wq;
@@ -382,7 +382,7 @@ static int torture_peb(const struct ubi_device *ubi, int pnum)
void *buf;
int err, i, patt_count;
- buf = kmalloc(ubi->peb_size, GFP_KERNEL);
+ buf = vmalloc(ubi->peb_size);
if (!buf)
return -ENOMEM;
@@ -437,7 +437,7 @@ out:
* physical eraseblock which means something is wrong with it.
*/
err = -EIO;
- kfree(buf);
+ vfree(buf);
return err;
}
@@ -557,9 +557,9 @@ static int validate_ec_hdr(const struct ubi_device *ubi,
long long ec;
int vid_hdr_offset, leb_start;
- ec = ubi64_to_cpu(ec_hdr->ec);
- vid_hdr_offset = ubi32_to_cpu(ec_hdr->vid_hdr_offset);
- leb_start = ubi32_to_cpu(ec_hdr->data_offset);
+ ec = be64_to_cpu(ec_hdr->ec);
+ vid_hdr_offset = be32_to_cpu(ec_hdr->vid_hdr_offset);
+ leb_start = be32_to_cpu(ec_hdr->data_offset);
if (ec_hdr->version != UBI_VERSION) {
ubi_err("node with incompatible UBI version found: "
@@ -640,7 +640,7 @@ int ubi_io_read_ec_hdr(const struct ubi_device *ubi, int pnum,
read_err = err;
}
- magic = ubi32_to_cpu(ec_hdr->magic);
+ magic = be32_to_cpu(ec_hdr->magic);
if (magic != UBI_EC_HDR_MAGIC) {
/*
* The magic field is wrong. Let's check if we have read all
@@ -684,7 +684,7 @@ int ubi_io_read_ec_hdr(const struct ubi_device *ubi, int pnum,
}
crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
- hdr_crc = ubi32_to_cpu(ec_hdr->hdr_crc);
+ hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
if (hdr_crc != crc) {
if (verbose) {
@@ -729,12 +729,12 @@ int ubi_io_write_ec_hdr(const struct ubi_device *ubi, int pnum,
dbg_io("write EC header to PEB %d", pnum);
ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
- ec_hdr->magic = cpu_to_ubi32(UBI_EC_HDR_MAGIC);
+ ec_hdr->magic = cpu_to_be32(UBI_EC_HDR_MAGIC);
ec_hdr->version = UBI_VERSION;
- ec_hdr->vid_hdr_offset = cpu_to_ubi32(ubi->vid_hdr_offset);
- ec_hdr->data_offset = cpu_to_ubi32(ubi->leb_start);
+ ec_hdr->vid_hdr_offset = cpu_to_be32(ubi->vid_hdr_offset);
+ ec_hdr->data_offset = cpu_to_be32(ubi->leb_start);
crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
- ec_hdr->hdr_crc = cpu_to_ubi32(crc);
+ ec_hdr->hdr_crc = cpu_to_be32(crc);
err = paranoid_check_ec_hdr(ubi, pnum, ec_hdr);
if (err)
@@ -757,13 +757,13 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
{
int vol_type = vid_hdr->vol_type;
int copy_flag = vid_hdr->copy_flag;
- int vol_id = ubi32_to_cpu(vid_hdr->vol_id);
- int lnum = ubi32_to_cpu(vid_hdr->lnum);
+ int vol_id = be32_to_cpu(vid_hdr->vol_id);
+ int lnum = be32_to_cpu(vid_hdr->lnum);
int compat = vid_hdr->compat;
- int data_size = ubi32_to_cpu(vid_hdr->data_size);
- int used_ebs = ubi32_to_cpu(vid_hdr->used_ebs);
- int data_pad = ubi32_to_cpu(vid_hdr->data_pad);
- int data_crc = ubi32_to_cpu(vid_hdr->data_crc);
+ int data_size = be32_to_cpu(vid_hdr->data_size);
+ int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
+ int data_pad = be32_to_cpu(vid_hdr->data_pad);
+ int data_crc = be32_to_cpu(vid_hdr->data_crc);
int usable_leb_size = ubi->leb_size - data_pad;
if (copy_flag != 0 && copy_flag != 1) {
@@ -914,7 +914,7 @@ int ubi_io_read_vid_hdr(const struct ubi_device *ubi, int pnum,
read_err = err;
}
- magic = ubi32_to_cpu(vid_hdr->magic);
+ magic = be32_to_cpu(vid_hdr->magic);
if (magic != UBI_VID_HDR_MAGIC) {
/*
* If we have read all 0xFF bytes, the VID header probably does
@@ -957,7 +957,7 @@ int ubi_io_read_vid_hdr(const struct ubi_device *ubi, int pnum,
}
crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
- hdr_crc = ubi32_to_cpu(vid_hdr->hdr_crc);
+ hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
if (hdr_crc != crc) {
if (verbose) {
@@ -1007,10 +1007,10 @@ int ubi_io_write_vid_hdr(const struct ubi_device *ubi, int pnum,
if (err)
return err > 0 ? -EINVAL: err;
- vid_hdr->magic = cpu_to_ubi32(UBI_VID_HDR_MAGIC);
+ vid_hdr->magic = cpu_to_be32(UBI_VID_HDR_MAGIC);
vid_hdr->version = UBI_VERSION;
crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
- vid_hdr->hdr_crc = cpu_to_ubi32(crc);
+ vid_hdr->hdr_crc = cpu_to_be32(crc);
err = paranoid_check_vid_hdr(ubi, pnum, vid_hdr);
if (err)
@@ -1060,7 +1060,7 @@ static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
int err;
uint32_t magic;
- magic = ubi32_to_cpu(ec_hdr->magic);
+ magic = be32_to_cpu(ec_hdr->magic);
if (magic != UBI_EC_HDR_MAGIC) {
ubi_err("bad magic %#08x, must be %#08x",
magic, UBI_EC_HDR_MAGIC);
@@ -1105,7 +1105,7 @@ static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
goto exit;
crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
- hdr_crc = ubi32_to_cpu(ec_hdr->hdr_crc);
+ hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
if (hdr_crc != crc) {
ubi_err("bad CRC, calculated %#08x, read %#08x", crc, hdr_crc);
ubi_err("paranoid check failed for PEB %d", pnum);
@@ -1137,7 +1137,7 @@ static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum,
int err;
uint32_t magic;
- magic = ubi32_to_cpu(vid_hdr->magic);
+ magic = be32_to_cpu(vid_hdr->magic);
if (magic != UBI_VID_HDR_MAGIC) {
ubi_err("bad VID header magic %#08x at PEB %d, must be %#08x",
magic, pnum, UBI_VID_HDR_MAGIC);
@@ -1187,7 +1187,7 @@ static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
goto exit;
crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC);
- hdr_crc = ubi32_to_cpu(vid_hdr->hdr_crc);
+ hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
if (hdr_crc != crc) {
ubi_err("bad VID header CRC at PEB %d, calculated %#08x, "
"read %#08x", pnum, crc, hdr_crc);
@@ -1224,9 +1224,10 @@ static int paranoid_check_all_ff(const struct ubi_device *ubi, int pnum,
void *buf;
loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
- buf = kzalloc(len, GFP_KERNEL);
+ buf = vmalloc(len);
if (!buf)
return -ENOMEM;
+ memset(buf, 0, len);
err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
if (err && err != -EUCLEAN) {
@@ -1242,7 +1243,7 @@ static int paranoid_check_all_ff(const struct ubi_device *ubi, int pnum,
goto fail;
}
- kfree(buf);
+ vfree(buf);
return 0;
fail:
@@ -1252,7 +1253,7 @@ fail:
err = 1;
error:
ubi_dbg_dump_stack();
- kfree(buf);
+ vfree(buf);
return err;
}
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index d352c4575c3..4a458e83e4e 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -37,14 +37,9 @@ int ubi_get_device_info(int ubi_num, struct ubi_device_info *di)
{
const struct ubi_device *ubi;
- if (!try_module_get(THIS_MODULE))
- return -ENODEV;
-
if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES ||
- !ubi_devices[ubi_num]) {
- module_put(THIS_MODULE);
+ !ubi_devices[ubi_num])
return -ENODEV;
- }
ubi = ubi_devices[ubi_num];
di->ubi_num = ubi->ubi_num;
@@ -52,7 +47,6 @@ int ubi_get_device_info(int ubi_num, struct ubi_device_info *di)
di->min_io_size = ubi->min_io_size;
di->ro_mode = ubi->ro_mode;
di->cdev = MKDEV(ubi->major, 0);
- module_put(THIS_MODULE);
return 0;
}
EXPORT_SYMBOL_GPL(ubi_get_device_info);
@@ -319,9 +313,14 @@ int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
offset + len > vol->usable_leb_size)
return -EINVAL;
- if (vol->vol_type == UBI_STATIC_VOLUME && lnum == vol->used_ebs - 1 &&
- offset + len > vol->last_eb_bytes)
- return -EINVAL;
+ if (vol->vol_type == UBI_STATIC_VOLUME) {
+ if (vol->used_ebs == 0)
+ /* Empty static UBI volume */
+ return 0;
+ if (lnum == vol->used_ebs - 1 &&
+ offset + len > vol->last_eb_bytes)
+ return -EINVAL;
+ }
if (vol->upd_marker)
return -EBADF;
diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c
index 38d4e6757dc..9e2338c8e2c 100644
--- a/drivers/mtd/ubi/misc.c
+++ b/drivers/mtd/ubi/misc.c
@@ -67,7 +67,7 @@ int ubi_check_volume(struct ubi_device *ubi, int vol_id)
if (vol->vol_type != UBI_STATIC_VOLUME)
return 0;
- buf = kmalloc(vol->usable_leb_size, GFP_KERNEL);
+ buf = vmalloc(vol->usable_leb_size);
if (!buf)
return -ENOMEM;
@@ -87,7 +87,7 @@ int ubi_check_volume(struct ubi_device *ubi, int vol_id)
}
}
- kfree(buf);
+ vfree(buf);
return err;
}
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index 473f3200b86..94ee5493441 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -24,7 +24,7 @@
* This unit is responsible for scanning the flash media, checking UBI
* headers and providing complete information about the UBI flash image.
*
- * The scanning information is reoresented by a &struct ubi_scan_info' object.
+ * The scanning information is represented by a &struct ubi_scan_info' object.
* Information about found volumes is represented by &struct ubi_scan_volume
* objects which are kept in volume RB-tree with root at the @volumes field.
* The RB-tree is indexed by the volume ID.
@@ -55,8 +55,19 @@ static int paranoid_check_si(const struct ubi_device *ubi,
static struct ubi_ec_hdr *ech;
static struct ubi_vid_hdr *vidh;
-int ubi_scan_add_to_list(struct ubi_scan_info *si, int pnum, int ec,
- struct list_head *list)
+/**
+ * add_to_list - add physical eraseblock to a list.
+ * @si: scanning information
+ * @pnum: physical eraseblock number to add
+ * @ec: erase counter of the physical eraseblock
+ * @list: the list to add to
+ *
+ * This function adds physical eraseblock @pnum to free, erase, corrupted or
+ * alien lists. Returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int add_to_list(struct ubi_scan_info *si, int pnum, int ec,
+ struct list_head *list)
{
struct ubi_scan_leb *seb;
@@ -121,9 +132,9 @@ static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr,
const struct ubi_scan_volume *sv, int pnum)
{
int vol_type = vid_hdr->vol_type;
- int vol_id = ubi32_to_cpu(vid_hdr->vol_id);
- int used_ebs = ubi32_to_cpu(vid_hdr->used_ebs);
- int data_pad = ubi32_to_cpu(vid_hdr->data_pad);
+ int vol_id = be32_to_cpu(vid_hdr->vol_id);
+ int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
+ int data_pad = be32_to_cpu(vid_hdr->data_pad);
if (sv->leb_count != 0) {
int sv_vol_type;
@@ -189,7 +200,7 @@ static struct ubi_scan_volume *add_volume(struct ubi_scan_info *si, int vol_id,
struct ubi_scan_volume *sv;
struct rb_node **p = &si->volumes.rb_node, *parent = NULL;
- ubi_assert(vol_id == ubi32_to_cpu(vid_hdr->vol_id));
+ ubi_assert(vol_id == be32_to_cpu(vid_hdr->vol_id));
/* Walk the volume RB-tree to look if this volume is already present */
while (*p) {
@@ -211,11 +222,10 @@ static struct ubi_scan_volume *add_volume(struct ubi_scan_info *si, int vol_id,
return ERR_PTR(-ENOMEM);
sv->highest_lnum = sv->leb_count = 0;
- si->max_sqnum = 0;
sv->vol_id = vol_id;
sv->root = RB_ROOT;
- sv->used_ebs = ubi32_to_cpu(vid_hdr->used_ebs);
- sv->data_pad = ubi32_to_cpu(vid_hdr->data_pad);
+ sv->used_ebs = be32_to_cpu(vid_hdr->used_ebs);
+ sv->data_pad = be32_to_cpu(vid_hdr->data_pad);
sv->compat = vid_hdr->compat;
sv->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME
: UBI_STATIC_VOLUME;
@@ -257,10 +267,10 @@ static int compare_lebs(const struct ubi_device *ubi,
int len, err, second_is_newer, bitflips = 0, corrupted = 0;
uint32_t data_crc, crc;
struct ubi_vid_hdr *vidh = NULL;
- unsigned long long sqnum2 = ubi64_to_cpu(vid_hdr->sqnum);
+ unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum);
if (seb->sqnum == 0 && sqnum2 == 0) {
- long long abs, v1 = seb->leb_ver, v2 = ubi32_to_cpu(vid_hdr->leb_ver);
+ long long abs, v1 = seb->leb_ver, v2 = be32_to_cpu(vid_hdr->leb_ver);
/*
* UBI constantly increases the logical eraseblock version
@@ -344,8 +354,8 @@ static int compare_lebs(const struct ubi_device *ubi,
/* Read the data of the copy and check the CRC */
- len = ubi32_to_cpu(vid_hdr->data_size);
- buf = kmalloc(len, GFP_KERNEL);
+ len = be32_to_cpu(vid_hdr->data_size);
+ buf = vmalloc(len);
if (!buf) {
err = -ENOMEM;
goto out_free_vidh;
@@ -355,7 +365,7 @@ static int compare_lebs(const struct ubi_device *ubi,
if (err && err != UBI_IO_BITFLIPS)
goto out_free_buf;
- data_crc = ubi32_to_cpu(vid_hdr->data_crc);
+ data_crc = be32_to_cpu(vid_hdr->data_crc);
crc = crc32(UBI_CRC32_INIT, buf, len);
if (crc != data_crc) {
dbg_bld("PEB %d CRC error: calculated %#08x, must be %#08x",
@@ -368,7 +378,7 @@ static int compare_lebs(const struct ubi_device *ubi,
bitflips = !!err;
}
- kfree(buf);
+ vfree(buf);
ubi_free_vid_hdr(ubi, vidh);
if (second_is_newer)
@@ -379,7 +389,7 @@ static int compare_lebs(const struct ubi_device *ubi,
return second_is_newer | (bitflips << 1) | (corrupted << 2);
out_free_buf:
- kfree(buf);
+ vfree(buf);
out_free_vidh:
ubi_free_vid_hdr(ubi, vidh);
ubi_assert(err < 0);
@@ -396,8 +406,12 @@ out_free_vidh:
* @vid_hdr: the volume identifier header
* @bitflips: if bit-flips were detected when this physical eraseblock was read
*
- * This function returns zero in case of success and a negative error code in
- * case of failure.
+ * This function adds information about a used physical eraseblock to the
+ * 'used' tree of the corresponding volume. The function is rather complex
+ * because it has to handle cases when this is not the first physical
+ * eraseblock belonging to the same logical eraseblock, and the newer one has
+ * to be picked, while the older one has to be dropped. This function returns
+ * zero in case of success and a negative error code in case of failure.
*/
int ubi_scan_add_used(const struct ubi_device *ubi, struct ubi_scan_info *si,
int pnum, int ec, const struct ubi_vid_hdr *vid_hdr,
@@ -410,10 +424,10 @@ int ubi_scan_add_used(const struct ubi_device *ubi, struct ubi_scan_info *si,
struct ubi_scan_leb *seb;
struct rb_node **p, *parent = NULL;
- vol_id = ubi32_to_cpu(vid_hdr->vol_id);
- lnum = ubi32_to_cpu(vid_hdr->lnum);
- sqnum = ubi64_to_cpu(vid_hdr->sqnum);
- leb_ver = ubi32_to_cpu(vid_hdr->leb_ver);
+ vol_id = be32_to_cpu(vid_hdr->vol_id);
+ lnum = be32_to_cpu(vid_hdr->lnum);
+ sqnum = be64_to_cpu(vid_hdr->sqnum);
+ leb_ver = be32_to_cpu(vid_hdr->leb_ver);
dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, ver %u, bitflips %d",
pnum, vol_id, lnum, ec, sqnum, leb_ver, bitflips);
@@ -422,6 +436,9 @@ int ubi_scan_add_used(const struct ubi_device *ubi, struct ubi_scan_info *si,
if (IS_ERR(sv) < 0)
return PTR_ERR(sv);
+ if (si->max_sqnum < sqnum)
+ si->max_sqnum = sqnum;
+
/*
* Walk the RB-tree of logical eraseblocks of volume @vol_id to look
* if this is the first instance of this logical eraseblock or not.
@@ -492,11 +509,11 @@ int ubi_scan_add_used(const struct ubi_device *ubi, struct ubi_scan_info *si,
return err;
if (cmp_res & 4)
- err = ubi_scan_add_to_list(si, seb->pnum,
- seb->ec, &si->corr);
+ err = add_to_list(si, seb->pnum, seb->ec,
+ &si->corr);
else
- err = ubi_scan_add_to_list(si, seb->pnum,
- seb->ec, &si->erase);
+ err = add_to_list(si, seb->pnum, seb->ec,
+ &si->erase);
if (err)
return err;
@@ -508,7 +525,7 @@ int ubi_scan_add_used(const struct ubi_device *ubi, struct ubi_scan_info *si,
if (sv->highest_lnum == lnum)
sv->last_data_size =
- ubi32_to_cpu(vid_hdr->data_size);
+ be32_to_cpu(vid_hdr->data_size);
return 0;
} else {
@@ -517,11 +534,9 @@ int ubi_scan_add_used(const struct ubi_device *ubi, struct ubi_scan_info *si,
* previously.
*/
if (cmp_res & 4)
- return ubi_scan_add_to_list(si, pnum, ec,
- &si->corr);
+ return add_to_list(si, pnum, ec, &si->corr);
else
- return ubi_scan_add_to_list(si, pnum, ec,
- &si->erase);
+ return add_to_list(si, pnum, ec, &si->erase);
}
}
@@ -547,12 +562,9 @@ int ubi_scan_add_used(const struct ubi_device *ubi, struct ubi_scan_info *si,
if (sv->highest_lnum <= lnum) {
sv->highest_lnum = lnum;
- sv->last_data_size = ubi32_to_cpu(vid_hdr->data_size);
+ sv->last_data_size = be32_to_cpu(vid_hdr->data_size);
}
- if (si->max_sqnum < sqnum)
- si->max_sqnum = sqnum;
-
sv->leb_count += 1;
rb_link_node(&seb->u.rb, parent, p);
rb_insert_color(&seb->u.rb, &sv->root);
@@ -674,7 +686,7 @@ int ubi_scan_erase_peb(const struct ubi_device *ubi,
return -EINVAL;
}
- ec_hdr->ec = cpu_to_ubi64(ec);
+ ec_hdr->ec = cpu_to_be64(ec);
err = ubi_io_sync_erase(ubi, pnum, 0);
if (err < 0)
@@ -754,7 +766,7 @@ struct ubi_scan_leb *ubi_scan_get_free_peb(const struct ubi_device *ubi,
* @si: scanning information
* @pnum: the physical eraseblock number
*
- * This function returns a zero if the physical eraseblock was succesfully
+ * This function returns a zero if the physical eraseblock was successfully
* handled and a negative error code in case of failure.
*/
static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum)
@@ -783,8 +795,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum
else if (err == UBI_IO_BITFLIPS)
bitflips = 1;
else if (err == UBI_IO_PEB_EMPTY)
- return ubi_scan_add_to_list(si, pnum, UBI_SCAN_UNKNOWN_EC,
- &si->erase);
+ return add_to_list(si, pnum, UBI_SCAN_UNKNOWN_EC, &si->erase);
else if (err == UBI_IO_BAD_EC_HDR) {
/*
* We have to also look at the VID header, possibly it is not
@@ -806,7 +817,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum
return -EINVAL;
}
- ec = ubi64_to_cpu(ech->ec);
+ ec = be64_to_cpu(ech->ec);
if (ec > UBI_MAX_ERASECOUNTER) {
/*
* Erase counter overflow. The EC headers have 64 bits
@@ -832,28 +843,28 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum
else if (err == UBI_IO_BAD_VID_HDR ||
(err == UBI_IO_PEB_FREE && ec_corr)) {
/* VID header is corrupted */
- err = ubi_scan_add_to_list(si, pnum, ec, &si->corr);
+ err = add_to_list(si, pnum, ec, &si->corr);
if (err)
return err;
goto adjust_mean_ec;
} else if (err == UBI_IO_PEB_FREE) {
/* No VID header - the physical eraseblock is free */
- err = ubi_scan_add_to_list(si, pnum, ec, &si->free);
+ err = add_to_list(si, pnum, ec, &si->free);
if (err)
return err;
goto adjust_mean_ec;
}
- vol_id = ubi32_to_cpu(vidh->vol_id);
+ vol_id = be32_to_cpu(vidh->vol_id);
if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOL_ID) {
- int lnum = ubi32_to_cpu(vidh->lnum);
+ int lnum = be32_to_cpu(vidh->lnum);
/* Unsupported internal volume */
switch (vidh->compat) {
case UBI_COMPAT_DELETE:
ubi_msg("\"delete\" compatible internal volume %d:%d"
" found, remove it", vol_id, lnum);
- err = ubi_scan_add_to_list(si, pnum, ec, &si->corr);
+ err = add_to_list(si, pnum, ec, &si->corr);
if (err)
return err;
break;
@@ -868,7 +879,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum
case UBI_COMPAT_PRESERVE:
ubi_msg("\"preserve\" compatible internal volume %d:%d"
" found", vol_id, lnum);
- err = ubi_scan_add_to_list(si, pnum, ec, &si->alien);
+ err = add_to_list(si, pnum, ec, &si->alien);
if (err)
return err;
si->alien_peb_count += 1;
@@ -1109,7 +1120,7 @@ static int paranoid_check_si(const struct ubi_device *ubi,
uint8_t *buf;
/*
- * At first, check that scanning information is ok.
+ * At first, check that scanning information is OK.
*/
ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
int leb_count = 0;
@@ -1249,12 +1260,12 @@ static int paranoid_check_si(const struct ubi_device *ubi,
goto bad_vid_hdr;
}
- if (seb->sqnum != ubi64_to_cpu(vidh->sqnum)) {
+ if (seb->sqnum != be64_to_cpu(vidh->sqnum)) {
ubi_err("bad sqnum %llu", seb->sqnum);
goto bad_vid_hdr;
}
- if (sv->vol_id != ubi32_to_cpu(vidh->vol_id)) {
+ if (sv->vol_id != be32_to_cpu(vidh->vol_id)) {
ubi_err("bad vol_id %d", sv->vol_id);
goto bad_vid_hdr;
}
@@ -1264,22 +1275,22 @@ static int paranoid_check_si(const struct ubi_device *ubi,
goto bad_vid_hdr;
}
- if (seb->lnum != ubi32_to_cpu(vidh->lnum)) {
+ if (seb->lnum != be32_to_cpu(vidh->lnum)) {
ubi_err("bad lnum %d", seb->lnum);
goto bad_vid_hdr;
}
- if (sv->used_ebs != ubi32_to_cpu(vidh->used_ebs)) {
+ if (sv->used_ebs != be32_to_cpu(vidh->used_ebs)) {
ubi_err("bad used_ebs %d", sv->used_ebs);
goto bad_vid_hdr;
}
- if (sv->data_pad != ubi32_to_cpu(vidh->data_pad)) {
+ if (sv->data_pad != be32_to_cpu(vidh->data_pad)) {
ubi_err("bad data_pad %d", sv->data_pad);
goto bad_vid_hdr;
}
- if (seb->leb_ver != ubi32_to_cpu(vidh->leb_ver)) {
+ if (seb->leb_ver != be32_to_cpu(vidh->leb_ver)) {
ubi_err("bad leb_ver %u", seb->leb_ver);
goto bad_vid_hdr;
}
@@ -1288,12 +1299,12 @@ static int paranoid_check_si(const struct ubi_device *ubi,
if (!last_seb)
continue;
- if (sv->highest_lnum != ubi32_to_cpu(vidh->lnum)) {
+ if (sv->highest_lnum != be32_to_cpu(vidh->lnum)) {
ubi_err("bad highest_lnum %d", sv->highest_lnum);
goto bad_vid_hdr;
}
- if (sv->last_data_size != ubi32_to_cpu(vidh->data_size)) {
+ if (sv->last_data_size != be32_to_cpu(vidh->data_size)) {
ubi_err("bad last_data_size %d", sv->last_data_size);
goto bad_vid_hdr;
}
@@ -1310,8 +1321,10 @@ static int paranoid_check_si(const struct ubi_device *ubi,
memset(buf, 1, ubi->peb_count);
for (pnum = 0; pnum < ubi->peb_count; pnum++) {
err = ubi_io_is_bad(ubi, pnum);
- if (err < 0)
+ if (err < 0) {
+ kfree(buf);
return err;
+ }
else if (err)
buf[pnum] = 0;
}
diff --git a/drivers/mtd/ubi/scan.h b/drivers/mtd/ubi/scan.h
index 3949f6192c7..140e82e2653 100644
--- a/drivers/mtd/ubi/scan.h
+++ b/drivers/mtd/ubi/scan.h
@@ -147,8 +147,6 @@ static inline void ubi_scan_move_to_list(struct ubi_scan_volume *sv,
list_add_tail(&seb->u.list, list);
}
-int ubi_scan_add_to_list(struct ubi_scan_info *si, int pnum, int ec,
- struct list_head *list);
int ubi_scan_add_used(const struct ubi_device *ubi, struct ubi_scan_info *si,
int pnum, int ec, const struct ubi_vid_hdr *vid_hdr,
int bitflips);
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index feb647f108f..5959f91be24 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -35,6 +35,7 @@
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/string.h>
+#include <linux/vmalloc.h>
#include <linux/mtd/mtd.h>
#include <mtd/ubi-header.h>
@@ -374,9 +375,11 @@ void ubi_calculate_reserved(struct ubi_device *ubi);
#ifdef CONFIG_MTD_UBI_GLUEBI
int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol);
int ubi_destroy_gluebi(struct ubi_volume *vol);
+void ubi_gluebi_updated(struct ubi_volume *vol);
#else
#define ubi_create_gluebi(ubi, vol) 0
#define ubi_destroy_gluebi(vol) 0
+#define ubi_gluebi_updated(vol)
#endif
/* eba.c */
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index 8925b977e3d..0efc586a832 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -150,7 +150,7 @@ int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes)
vol->updating = 0;
}
- vol->upd_buf = kmalloc(ubi->leb_size, GFP_KERNEL);
+ vol->upd_buf = vmalloc(ubi->leb_size);
if (!vol->upd_buf)
return -ENOMEM;
@@ -339,7 +339,7 @@ int ubi_more_update_data(struct ubi_device *ubi, int vol_id,
err = ubi_wl_flush(ubi);
if (err == 0) {
err = to_write;
- kfree(vol->upd_buf);
+ vfree(vol->upd_buf);
vol->updating = 0;
}
}
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 622d0d18952..ea0d5c825ab 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -228,7 +228,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
for (i = 0; i < ubi->vtbl_slots; i++)
if (ubi->volumes[i] &&
ubi->volumes[i]->name_len == req->name_len &&
- strcmp(ubi->volumes[i]->name, req->name) == 0) {
+ !strcmp(ubi->volumes[i]->name, req->name)) {
dbg_err("volume \"%s\" exists (ID %d)", req->name, i);
goto out_unlock;
}
@@ -243,7 +243,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
/* Reserve physical eraseblocks */
if (vol->reserved_pebs > ubi->avail_pebs) {
dbg_err("not enough PEBs, only %d available", ubi->avail_pebs);
- spin_unlock(&ubi->volumes_lock);
err = -ENOSPC;
goto out_unlock;
}
@@ -281,7 +280,8 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
vol->used_ebs = vol->reserved_pebs;
vol->last_eb_bytes = vol->usable_leb_size;
- vol->used_bytes = vol->used_ebs * vol->usable_leb_size;
+ vol->used_bytes =
+ (long long)vol->used_ebs * vol->usable_leb_size;
} else {
bytes = vol->used_bytes;
vol->last_eb_bytes = do_div(bytes, vol->usable_leb_size);
@@ -320,10 +320,10 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
/* Fill volume table record */
memset(&vtbl_rec, 0, sizeof(struct ubi_vtbl_record));
- vtbl_rec.reserved_pebs = cpu_to_ubi32(vol->reserved_pebs);
- vtbl_rec.alignment = cpu_to_ubi32(vol->alignment);
- vtbl_rec.data_pad = cpu_to_ubi32(vol->data_pad);
- vtbl_rec.name_len = cpu_to_ubi16(vol->name_len);
+ vtbl_rec.reserved_pebs = cpu_to_be32(vol->reserved_pebs);
+ vtbl_rec.alignment = cpu_to_be32(vol->alignment);
+ vtbl_rec.data_pad = cpu_to_be32(vol->data_pad);
+ vtbl_rec.name_len = cpu_to_be16(vol->name_len);
if (vol->vol_type == UBI_DYNAMIC_VOLUME)
vtbl_rec.vol_type = UBI_VID_DYNAMIC;
else
@@ -352,6 +352,7 @@ out_acc:
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs -= vol->reserved_pebs;
ubi->avail_pebs += vol->reserved_pebs;
+ ubi->volumes[vol_id] = NULL;
out_unlock:
spin_unlock(&ubi->volumes_lock);
kfree(vol);
@@ -368,6 +369,7 @@ out_sysfs:
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs -= vol->reserved_pebs;
ubi->avail_pebs += vol->reserved_pebs;
+ ubi->volumes[vol_id] = NULL;
spin_unlock(&ubi->volumes_lock);
volume_sysfs_close(vol);
return err;
@@ -503,7 +505,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
/* Change volume table record */
memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record));
- vtbl_rec.reserved_pebs = cpu_to_ubi32(reserved_pebs);
+ vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs);
err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
if (err)
goto out_acc;
@@ -537,7 +539,8 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
vol->used_ebs = reserved_pebs;
vol->last_eb_bytes = vol->usable_leb_size;
- vol->used_bytes = vol->used_ebs * vol->usable_leb_size;
+ vol->used_bytes =
+ (long long)vol->used_ebs * vol->usable_leb_size;
}
paranoid_check_volumes(ubi);
@@ -643,21 +646,33 @@ void ubi_free_volume(struct ubi_device *ubi, int vol_id)
* @ubi: UBI device description object
* @vol_id: volume ID
*/
-static void paranoid_check_volume(const struct ubi_device *ubi, int vol_id)
+static void paranoid_check_volume(struct ubi_device *ubi, int vol_id)
{
int idx = vol_id2idx(ubi, vol_id);
int reserved_pebs, alignment, data_pad, vol_type, name_len, upd_marker;
- const struct ubi_volume *vol = ubi->volumes[idx];
+ const struct ubi_volume *vol;
long long n;
const char *name;
- reserved_pebs = ubi32_to_cpu(ubi->vtbl[vol_id].reserved_pebs);
+ spin_lock(&ubi->volumes_lock);
+ reserved_pebs = be32_to_cpu(ubi->vtbl[vol_id].reserved_pebs);
+ vol = ubi->volumes[idx];
if (!vol) {
if (reserved_pebs) {
ubi_err("no volume info, but volume exists");
goto fail;
}
+ spin_unlock(&ubi->volumes_lock);
+ return;
+ }
+
+ if (vol->exclusive) {
+ /*
+ * The volume may be being created at the moment, do not check
+ * it (e.g., it may be in the middle of ubi_create_volume().
+ */
+ spin_unlock(&ubi->volumes_lock);
return;
}
@@ -726,7 +741,7 @@ static void paranoid_check_volume(const struct ubi_device *ubi, int vol_id)
goto fail;
}
- n = vol->used_ebs * vol->usable_leb_size;
+ n = (long long)vol->used_ebs * vol->usable_leb_size;
if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
if (vol->corrupted != 0) {
ubi_err("corrupted dynamic volume");
@@ -765,9 +780,9 @@ static void paranoid_check_volume(const struct ubi_device *ubi, int vol_id)
}
}
- alignment = ubi32_to_cpu(ubi->vtbl[vol_id].alignment);
- data_pad = ubi32_to_cpu(ubi->vtbl[vol_id].data_pad);
- name_len = ubi16_to_cpu(ubi->vtbl[vol_id].name_len);
+ alignment = be32_to_cpu(ubi->vtbl[vol_id].alignment);
+ data_pad = be32_to_cpu(ubi->vtbl[vol_id].data_pad);
+ name_len = be16_to_cpu(ubi->vtbl[vol_id].name_len);
upd_marker = ubi->vtbl[vol_id].upd_marker;
name = &ubi->vtbl[vol_id].name[0];
if (ubi->vtbl[vol_id].vol_type == UBI_VID_DYNAMIC)
@@ -782,12 +797,14 @@ static void paranoid_check_volume(const struct ubi_device *ubi, int vol_id)
goto fail;
}
+ spin_unlock(&ubi->volumes_lock);
return;
fail:
- ubi_err("paranoid check failed");
+ ubi_err("paranoid check failed for volume %d", vol_id);
ubi_dbg_dump_vol_info(vol);
ubi_dbg_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id);
+ spin_unlock(&ubi->volumes_lock);
BUG();
}
@@ -800,10 +817,8 @@ static void paranoid_check_volumes(struct ubi_device *ubi)
int i;
mutex_lock(&ubi->vtbl_mutex);
- spin_lock(&ubi->volumes_lock);
for (i = 0; i < ubi->vtbl_slots; i++)
paranoid_check_volume(ubi, i);
- spin_unlock(&ubi->volumes_lock);
mutex_unlock(&ubi->vtbl_mutex);
}
#endif
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index b6fd6bbd941..bc5df50813d 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -93,12 +93,9 @@ int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
vtbl_rec = &empty_vtbl_record;
else {
crc = crc32(UBI_CRC32_INIT, vtbl_rec, UBI_VTBL_RECORD_SIZE_CRC);
- vtbl_rec->crc = cpu_to_ubi32(crc);
+ vtbl_rec->crc = cpu_to_be32(crc);
}
- dbg_msg("change record %d", idx);
- ubi_dbg_dump_vtbl_record(vtbl_rec, idx);
-
mutex_lock(&ubi->vtbl_mutex);
memcpy(&ubi->vtbl[idx], vtbl_rec, sizeof(struct ubi_vtbl_record));
for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
@@ -141,18 +138,18 @@ static int vtbl_check(const struct ubi_device *ubi,
for (i = 0; i < ubi->vtbl_slots; i++) {
cond_resched();
- reserved_pebs = ubi32_to_cpu(vtbl[i].reserved_pebs);
- alignment = ubi32_to_cpu(vtbl[i].alignment);
- data_pad = ubi32_to_cpu(vtbl[i].data_pad);
+ reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs);
+ alignment = be32_to_cpu(vtbl[i].alignment);
+ data_pad = be32_to_cpu(vtbl[i].data_pad);
upd_marker = vtbl[i].upd_marker;
vol_type = vtbl[i].vol_type;
- name_len = ubi16_to_cpu(vtbl[i].name_len);
+ name_len = be16_to_cpu(vtbl[i].name_len);
name = &vtbl[i].name[0];
crc = crc32(UBI_CRC32_INIT, &vtbl[i], UBI_VTBL_RECORD_SIZE_CRC);
- if (ubi32_to_cpu(vtbl[i].crc) != crc) {
+ if (be32_to_cpu(vtbl[i].crc) != crc) {
ubi_err("bad CRC at record %u: %#08x, not %#08x",
- i, crc, ubi32_to_cpu(vtbl[i].crc));
+ i, crc, be32_to_cpu(vtbl[i].crc));
ubi_dbg_dump_vtbl_record(&vtbl[i], i);
return 1;
}
@@ -225,8 +222,8 @@ static int vtbl_check(const struct ubi_device *ubi,
/* Checks that all names are unique */
for (i = 0; i < ubi->vtbl_slots - 1; i++) {
for (n = i + 1; n < ubi->vtbl_slots; n++) {
- int len1 = ubi16_to_cpu(vtbl[i].name_len);
- int len2 = ubi16_to_cpu(vtbl[n].name_len);
+ int len1 = be16_to_cpu(vtbl[i].name_len);
+ int len2 = be16_to_cpu(vtbl[n].name_len);
if (len1 > 0 && len1 == len2 &&
!strncmp(vtbl[i].name, vtbl[n].name, len1)) {
@@ -288,13 +285,13 @@ retry:
}
vid_hdr->vol_type = UBI_VID_DYNAMIC;
- vid_hdr->vol_id = cpu_to_ubi32(UBI_LAYOUT_VOL_ID);
+ vid_hdr->vol_id = cpu_to_be32(UBI_LAYOUT_VOL_ID);
vid_hdr->compat = UBI_LAYOUT_VOLUME_COMPAT;
vid_hdr->data_size = vid_hdr->used_ebs =
- vid_hdr->data_pad = cpu_to_ubi32(0);
- vid_hdr->lnum = cpu_to_ubi32(copy);
- vid_hdr->sqnum = cpu_to_ubi64(++si->max_sqnum);
- vid_hdr->leb_ver = cpu_to_ubi32(old_seb ? old_seb->leb_ver + 1: 0);
+ vid_hdr->data_pad = cpu_to_be32(0);
+ vid_hdr->lnum = cpu_to_be32(copy);
+ vid_hdr->sqnum = cpu_to_be64(++si->max_sqnum);
+ vid_hdr->leb_ver = cpu_to_be32(old_seb ? old_seb->leb_ver + 1: 0);
/* The EC header is already there, write the VID header */
err = ubi_io_write_vid_hdr(ubi, new_seb->pnum, vid_hdr);
@@ -317,14 +314,15 @@ retry:
return err;
write_error:
- kfree(new_seb);
- /* May be this physical eraseblock went bad, try to pick another one */
- if (++tries <= 5) {
- err = ubi_scan_add_to_list(si, new_seb->pnum, new_seb->ec,
- &si->corr);
- if (!err)
- goto retry;
+ if (err == -EIO && ++tries <= 5) {
+ /*
+ * Probably this physical eraseblock went bad, try to pick
+ * another one.
+ */
+ list_add_tail(&new_seb->u.list, &si->corr);
+ goto retry;
}
+ kfree(new_seb);
out_free:
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
@@ -380,11 +378,12 @@ static struct ubi_vtbl_record *process_lvol(const struct ubi_device *ubi,
/* Read both LEB 0 and LEB 1 into memory */
ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) {
- leb[seb->lnum] = kzalloc(ubi->vtbl_size, GFP_KERNEL);
+ leb[seb->lnum] = vmalloc(ubi->vtbl_size);
if (!leb[seb->lnum]) {
err = -ENOMEM;
goto out_free;
}
+ memset(leb[seb->lnum], 0, ubi->vtbl_size);
err = ubi_io_read_data(ubi, leb[seb->lnum], seb->pnum, 0,
ubi->vtbl_size);
@@ -415,7 +414,7 @@ static struct ubi_vtbl_record *process_lvol(const struct ubi_device *ubi,
}
/* Both LEB 1 and LEB 2 are OK and consistent */
- kfree(leb[1]);
+ vfree(leb[1]);
return leb[0];
} else {
/* LEB 0 is corrupted or does not exist */
@@ -436,13 +435,13 @@ static struct ubi_vtbl_record *process_lvol(const struct ubi_device *ubi,
goto out_free;
ubi_msg("volume table was restored");
- kfree(leb[0]);
+ vfree(leb[0]);
return leb[1];
}
out_free:
- kfree(leb[0]);
- kfree(leb[1]);
+ vfree(leb[0]);
+ vfree(leb[1]);
return ERR_PTR(err);
}
@@ -460,9 +459,10 @@ static struct ubi_vtbl_record *create_empty_lvol(const struct ubi_device *ubi,
int i;
struct ubi_vtbl_record *vtbl;
- vtbl = kzalloc(ubi->vtbl_size, GFP_KERNEL);
+ vtbl = vmalloc(ubi->vtbl_size);
if (!vtbl)
return ERR_PTR(-ENOMEM);
+ memset(vtbl, 0, ubi->vtbl_size);
for (i = 0; i < ubi->vtbl_slots; i++)
memcpy(&vtbl[i], &empty_vtbl_record, UBI_VTBL_RECORD_SIZE);
@@ -472,7 +472,7 @@ static struct ubi_vtbl_record *create_empty_lvol(const struct ubi_device *ubi,
err = create_vtbl(ubi, si, i, vtbl);
if (err) {
- kfree(vtbl);
+ vfree(vtbl);
return ERR_PTR(err);
}
}
@@ -500,19 +500,19 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
for (i = 0; i < ubi->vtbl_slots; i++) {
cond_resched();
- if (ubi32_to_cpu(vtbl[i].reserved_pebs) == 0)
+ if (be32_to_cpu(vtbl[i].reserved_pebs) == 0)
continue; /* Empty record */
vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL);
if (!vol)
return -ENOMEM;
- vol->reserved_pebs = ubi32_to_cpu(vtbl[i].reserved_pebs);
- vol->alignment = ubi32_to_cpu(vtbl[i].alignment);
- vol->data_pad = ubi32_to_cpu(vtbl[i].data_pad);
+ vol->reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs);
+ vol->alignment = be32_to_cpu(vtbl[i].alignment);
+ vol->data_pad = be32_to_cpu(vtbl[i].data_pad);
vol->vol_type = vtbl[i].vol_type == UBI_VID_DYNAMIC ?
UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
- vol->name_len = ubi16_to_cpu(vtbl[i].name_len);
+ vol->name_len = be16_to_cpu(vtbl[i].name_len);
vol->usable_leb_size = ubi->leb_size - vol->data_pad;
memcpy(vol->name, vtbl[i].name, vol->name_len);
vol->name[vol->name_len] = '\0';
@@ -531,7 +531,8 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
vol->used_ebs = vol->reserved_pebs;
vol->last_eb_bytes = vol->usable_leb_size;
- vol->used_bytes = vol->used_ebs * vol->usable_leb_size;
+ vol->used_bytes =
+ (long long)vol->used_ebs * vol->usable_leb_size;
continue;
}
@@ -561,7 +562,8 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
}
vol->used_ebs = sv->used_ebs;
- vol->used_bytes = (vol->used_ebs - 1) * vol->usable_leb_size;
+ vol->used_bytes =
+ (long long)(vol->used_ebs - 1) * vol->usable_leb_size;
vol->used_bytes += sv->last_data_size;
vol->last_eb_bytes = sv->last_data_size;
}
@@ -578,7 +580,8 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
vol->usable_leb_size = ubi->leb_size;
vol->used_ebs = vol->reserved_pebs;
vol->last_eb_bytes = vol->reserved_pebs;
- vol->used_bytes = vol->used_ebs * (ubi->leb_size - vol->data_pad);
+ vol->used_bytes =
+ (long long)vol->used_ebs * (ubi->leb_size - vol->data_pad);
vol->vol_id = UBI_LAYOUT_VOL_ID;
ubi_assert(!ubi->volumes[i]);
@@ -718,7 +721,7 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
int i, err;
struct ubi_scan_volume *sv;
- empty_vtbl_record.crc = cpu_to_ubi32(0xf116c36b);
+ empty_vtbl_record.crc = cpu_to_be32(0xf116c36b);
/*
* The number of supported volumes is limited by the eraseblock size
@@ -783,7 +786,7 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
return 0;
out_free:
- kfree(ubi->vtbl);
+ vfree(ubi->vtbl);
for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++)
if (ubi->volumes[i]) {
kfree(ubi->volumes[i]);
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index ab2174a56bc..9de95376209 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -667,7 +667,7 @@ static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int tortur
dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
- ec_hdr->ec = cpu_to_ubi64(ec);
+ ec_hdr->ec = cpu_to_be64(ec);
err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
if (err)
@@ -1060,9 +1060,8 @@ out_unlock:
static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
int cancel)
{
- int err;
struct ubi_wl_entry *e = wl_wrk->e;
- int pnum = e->pnum;
+ int pnum = e->pnum, err, need;
if (cancel) {
dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
@@ -1097,62 +1096,70 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
kfree(wl_wrk);
kmem_cache_free(wl_entries_slab, e);
- if (err != -EIO) {
+ if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
+ err == -EBUSY) {
+ int err1;
+
+ /* Re-schedule the LEB for erasure */
+ err1 = schedule_erase(ubi, e, 0);
+ if (err1) {
+ err = err1;
+ goto out_ro;
+ }
+ return err;
+ } else if (err != -EIO) {
/*
* If this is not %-EIO, we have no idea what to do. Scheduling
* this physical eraseblock for erasure again would cause
* errors again and again. Well, lets switch to RO mode.
*/
- ubi_ro_mode(ubi);
- return err;
+ goto out_ro;
}
/* It is %-EIO, the PEB went bad */
if (!ubi->bad_allowed) {
ubi_err("bad physical eraseblock %d detected", pnum);
- ubi_ro_mode(ubi);
- err = -EIO;
- } else {
- int need;
-
- spin_lock(&ubi->volumes_lock);
- need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1;
- if (need > 0) {
- need = ubi->avail_pebs >= need ? need : ubi->avail_pebs;
- ubi->avail_pebs -= need;
- ubi->rsvd_pebs += need;
- ubi->beb_rsvd_pebs += need;
- if (need > 0)
- ubi_msg("reserve more %d PEBs", need);
- }
+ goto out_ro;
+ }
- if (ubi->beb_rsvd_pebs == 0) {
- spin_unlock(&ubi->volumes_lock);
- ubi_err("no reserved physical eraseblocks");
- ubi_ro_mode(ubi);
- return -EIO;
- }
+ spin_lock(&ubi->volumes_lock);
+ need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1;
+ if (need > 0) {
+ need = ubi->avail_pebs >= need ? need : ubi->avail_pebs;
+ ubi->avail_pebs -= need;
+ ubi->rsvd_pebs += need;
+ ubi->beb_rsvd_pebs += need;
+ if (need > 0)
+ ubi_msg("reserve more %d PEBs", need);
+ }
+ if (ubi->beb_rsvd_pebs == 0) {
spin_unlock(&ubi->volumes_lock);
- ubi_msg("mark PEB %d as bad", pnum);
+ ubi_err("no reserved physical eraseblocks");
+ goto out_ro;
+ }
- err = ubi_io_mark_bad(ubi, pnum);
- if (err) {
- ubi_ro_mode(ubi);
- return err;
- }
+ spin_unlock(&ubi->volumes_lock);
+ ubi_msg("mark PEB %d as bad", pnum);
- spin_lock(&ubi->volumes_lock);
- ubi->beb_rsvd_pebs -= 1;
- ubi->bad_peb_count += 1;
- ubi->good_peb_count -= 1;
- ubi_calculate_reserved(ubi);
- if (ubi->beb_rsvd_pebs == 0)
- ubi_warn("last PEB from the reserved pool was used");
- spin_unlock(&ubi->volumes_lock);
- }
+ err = ubi_io_mark_bad(ubi, pnum);
+ if (err)
+ goto out_ro;
+
+ spin_lock(&ubi->volumes_lock);
+ ubi->beb_rsvd_pebs -= 1;
+ ubi->bad_peb_count += 1;
+ ubi->good_peb_count -= 1;
+ ubi_calculate_reserved(ubi);
+ if (ubi->beb_rsvd_pebs == 0)
+ ubi_warn("last PEB from the reserved pool was used");
+ spin_unlock(&ubi->volumes_lock);
+
+ return err;
+out_ro:
+ ubi_ro_mode(ubi);
return err;
}
@@ -1634,7 +1641,7 @@ static int paranoid_check_ec(const struct ubi_device *ubi, int pnum, int ec)
goto out_free;
}
- read_ec = ubi64_to_cpu(ec_hdr->ec);
+ read_ec = be64_to_cpu(ec_hdr->ec);
if (ec != read_ec) {
ubi_err("paranoid check failed for PEB %d", pnum);
ubi_err("read EC is %lld, should be %d", read_ec, ec);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 43d03178064..5fb659f8b20 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2486,6 +2486,18 @@ source "drivers/atm/Kconfig"
source "drivers/s390/net/Kconfig"
+config XEN_NETDEV_FRONTEND
+ tristate "Xen network device frontend driver"
+ depends on XEN
+ default y
+ help
+ The network device frontend driver allows the kernel to
+ access network devices exported exported by a virtual
+ machine containing a physical network device driver. The
+ frontend driver is intended for unprivileged guest domains;
+ if you are compiling a kernel for a Xen guest, you almost
+ certainly want to enable this.
+
config ISERIES_VETH
tristate "iSeries Virtual Ethernet driver support"
depends on PPC_ISERIES
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index eb4167622a6..0e286ab8855 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -127,6 +127,8 @@ obj-$(CONFIG_PPPOL2TP) += pppox.o pppol2tp.o
obj-$(CONFIG_SLIP) += slip.o
obj-$(CONFIG_SLHC) += slhc.o
+obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o
+
obj-$(CONFIG_DUMMY) += dummy.o
obj-$(CONFIG_IFB) += ifb.o
obj-$(CONFIG_MACVLAN) += macvlan.o
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index d23861c8658..a729da061bb 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -54,8 +54,8 @@
#define DRV_MODULE_NAME "bnx2"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "1.6.2"
-#define DRV_MODULE_RELDATE "July 6, 2007"
+#define DRV_MODULE_VERSION "1.6.3"
+#define DRV_MODULE_RELDATE "July 16, 2007"
#define RUN_AT(x) (jiffies + (x))
@@ -126,91 +126,102 @@ static struct pci_device_id bnx2_pci_tbl[] = {
static struct flash_spec flash_table[] =
{
+#define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
+#define NONBUFFERED_FLAGS (BNX2_NV_WREN)
/* Slow EEPROM */
{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
- 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
+ BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
"EEPROM - slow"},
/* Expansion entry 0001 */
{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
- 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+ NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
"Entry 0001"},
/* Saifun SA25F010 (non-buffered flash) */
/* strap, cfg1, & write1 need updates */
{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
- 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+ NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
"Non-buffered flash (128kB)"},
/* Saifun SA25F020 (non-buffered flash) */
/* strap, cfg1, & write1 need updates */
{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
- 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+ NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
"Non-buffered flash (256kB)"},
/* Expansion entry 0100 */
{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
- 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+ NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
"Entry 0100"},
/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
- 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
+ NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
"Entry 0101: ST M45PE10 (128kB non-bufferred)"},
/* Entry 0110: ST M45PE20 (non-buffered flash)*/
{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
- 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
+ NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
"Entry 0110: ST M45PE20 (256kB non-bufferred)"},
/* Saifun SA25F005 (non-buffered flash) */
/* strap, cfg1, & write1 need updates */
{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
- 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+ NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
"Non-buffered flash (64kB)"},
/* Fast EEPROM */
{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
- 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
+ BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
"EEPROM - fast"},
/* Expansion entry 1001 */
{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
- 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+ NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
"Entry 1001"},
/* Expansion entry 1010 */
{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
- 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+ NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
"Entry 1010"},
/* ATMEL AT45DB011B (buffered flash) */
{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
- 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
+ BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
"Buffered flash (128kB)"},
/* Expansion entry 1100 */
{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
- 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+ NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
"Entry 1100"},
/* Expansion entry 1101 */
{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
- 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+ NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
"Entry 1101"},
/* Ateml Expansion entry 1110 */
{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
- 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
+ BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
"Entry 1110 (Atmel)"},
/* ATMEL AT45DB021B (buffered flash) */
{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
- 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
+ BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
"Buffered flash (256kB)"},
};
+static struct flash_spec flash_5709 = {
+ .flags = BNX2_NV_BUFFERED,
+ .page_bits = BCM5709_FLASH_PAGE_BITS,
+ .page_size = BCM5709_FLASH_PAGE_SIZE,
+ .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
+ .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
+ .name = "5709 Buffered flash (256kB)",
+};
+
MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
static inline u32 bnx2_tx_avail(struct bnx2 *bp)
@@ -3289,7 +3300,7 @@ bnx2_enable_nvram_write(struct bnx2 *bp)
val = REG_RD(bp, BNX2_MISC_CFG);
REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
- if (!bp->flash_info->buffered) {
+ if (bp->flash_info->flags & BNX2_NV_WREN) {
int j;
REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
@@ -3349,7 +3360,7 @@ bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
u32 cmd;
int j;
- if (bp->flash_info->buffered)
+ if (bp->flash_info->flags & BNX2_NV_BUFFERED)
/* Buffered flash, no erase needed */
return 0;
@@ -3392,8 +3403,8 @@ bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
/* Build the command word. */
cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
- /* Calculate an offset of a buffered flash. */
- if (bp->flash_info->buffered) {
+ /* Calculate an offset of a buffered flash, not needed for 5709. */
+ if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
offset = ((offset / bp->flash_info->page_size) <<
bp->flash_info->page_bits) +
(offset % bp->flash_info->page_size);
@@ -3439,8 +3450,8 @@ bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
/* Build the command word. */
cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
- /* Calculate an offset of a buffered flash. */
- if (bp->flash_info->buffered) {
+ /* Calculate an offset of a buffered flash, not needed for 5709. */
+ if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
offset = ((offset / bp->flash_info->page_size) <<
bp->flash_info->page_bits) +
(offset % bp->flash_info->page_size);
@@ -3478,15 +3489,19 @@ static int
bnx2_init_nvram(struct bnx2 *bp)
{
u32 val;
- int j, entry_count, rc;
+ int j, entry_count, rc = 0;
struct flash_spec *flash;
+ if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ bp->flash_info = &flash_5709;
+ goto get_flash_size;
+ }
+
/* Determine the selected interface. */
val = REG_RD(bp, BNX2_NVM_CFG1);
entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
- rc = 0;
if (val & 0x40000000) {
/* Flash interface has been reconfigured */
@@ -3542,6 +3557,7 @@ bnx2_init_nvram(struct bnx2 *bp)
return -ENODEV;
}
+get_flash_size:
val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
if (val)
@@ -3706,7 +3722,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
buf = align_buf;
}
- if (bp->flash_info->buffered == 0) {
+ if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
flash_buffer = kmalloc(264, GFP_KERNEL);
if (flash_buffer == NULL) {
rc = -ENOMEM;
@@ -3739,7 +3755,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
bnx2_enable_nvram_access(bp);
cmd_flags = BNX2_NVM_COMMAND_FIRST;
- if (bp->flash_info->buffered == 0) {
+ if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
int j;
/* Read the whole page into the buffer
@@ -3767,7 +3783,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
/* Loop to write back the buffer data from page_start to
* data_start */
i = 0;
- if (bp->flash_info->buffered == 0) {
+ if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
/* Erase the page */
if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
goto nvram_write_end;
@@ -3791,7 +3807,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
/* Loop to write the new data from data_start to data_end */
for (addr = data_start; addr < data_end; addr += 4, i += 4) {
if ((addr == page_end - 4) ||
- ((bp->flash_info->buffered) &&
+ ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
(addr == data_end - 4))) {
cmd_flags |= BNX2_NVM_COMMAND_LAST;
@@ -3808,7 +3824,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
/* Loop to write back the buffer data from data_end
* to page_end */
- if (bp->flash_info->buffered == 0) {
+ if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
for (addr = data_end; addr < page_end;
addr += 4, i += 4) {
@@ -4107,7 +4123,7 @@ bnx2_init_chip(struct bnx2 *bp)
if (CHIP_NUM(bp) == CHIP_NUM_5708)
REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
else
- REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
+ REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
if (CHIP_ID(bp) == CHIP_ID_5706_A1)
@@ -4127,10 +4143,6 @@ bnx2_init_chip(struct bnx2 *bp)
REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
- if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
- BNX2_PORT_FEATURE_ASF_ENABLED)
- bp->flags |= ASF_ENABLE_FLAG;
-
/* Initialize the receive filter. */
bnx2_set_rx_mode(bp->dev);
@@ -5786,8 +5798,9 @@ bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
bp->stats_ticks = USEC_PER_SEC;
}
- if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
- bp->stats_ticks &= 0xffff00;
+ if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
+ bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
+ bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
if (netif_running(bp->dev)) {
bnx2_netif_stop(bp);
@@ -6629,6 +6642,18 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
if (i != 2)
bp->fw_version[j++] = '.';
}
+ if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
+ BNX2_PORT_FEATURE_ASF_ENABLED) {
+ bp->flags |= ASF_ENABLE_FLAG;
+
+ for (i = 0; i < 30; i++) {
+ reg = REG_RD_IND(bp, bp->shmem_base +
+ BNX2_BC_STATE_CONDITION);
+ if (reg & BNX2_CONDITION_MFW_RUN_MASK)
+ break;
+ msleep(10);
+ }
+ }
reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
reg &= BNX2_CONDITION_MFW_RUN_MASK;
if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
@@ -6672,7 +6697,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->rx_ticks_int = 18;
bp->rx_ticks = 18;
- bp->stats_ticks = 1000000 & 0xffff00;
+ bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
bp->timer_interval = HZ;
bp->current_interval = HZ;
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index d8cd1afeb23..102adfe1e92 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6433,6 +6433,11 @@ struct sw_bd {
#define ST_MICRO_FLASH_PAGE_SIZE 256
#define ST_MICRO_FLASH_BASE_TOTAL_SIZE 65536
+#define BCM5709_FLASH_PAGE_BITS 8
+#define BCM5709_FLASH_PHY_PAGE_SIZE (1 << BCM5709_FLASH_PAGE_BITS)
+#define BCM5709_FLASH_BYTE_ADDR_MASK (BCM5709_FLASH_PHY_PAGE_SIZE-1)
+#define BCM5709_FLASH_PAGE_SIZE 256
+
#define NVRAM_TIMEOUT_COUNT 30000
@@ -6449,7 +6454,10 @@ struct flash_spec {
u32 config2;
u32 config3;
u32 write1;
- u32 buffered;
+ u32 flags;
+#define BNX2_NV_BUFFERED 0x00000001
+#define BNX2_NV_TRANSLATE 0x00000002
+#define BNX2_NV_WREN 0x00000004
u32 page_bits;
u32 page_size;
u32 addr_mask;
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 84aa2117c0e..355c6cf3d11 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -320,7 +320,7 @@ static int eppconfig(struct baycom_state *bc)
sprintf(portarg, "%ld", bc->pdev->port->base);
printk(KERN_DEBUG "%s: %s -s -p %s -m %s\n", bc_drvname, eppconfig_path, portarg, modearg);
- return call_usermodehelper(eppconfig_path, argv, envp, 1);
+ return call_usermodehelper(eppconfig_path, argv, envp, UMH_WAIT_PROC);
}
/* ---------------------------------------------------------------------- */
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index 5891a0fbdc8..f87176055d0 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -824,6 +824,7 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
struct pppol2tp_session *session;
struct pppol2tp_tunnel *tunnel;
struct udphdr *uh;
+ unsigned int len;
error = -ENOTCONN;
if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
@@ -912,14 +913,15 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
}
/* Queue the packet to IP for output */
+ len = skb->len;
error = ip_queue_xmit(skb, 1);
/* Update stats */
if (error >= 0) {
tunnel->stats.tx_packets++;
- tunnel->stats.tx_bytes += skb->len;
+ tunnel->stats.tx_bytes += len;
session->stats.tx_packets++;
- session->stats.tx_bytes += skb->len;
+ session->stats.tx_bytes += len;
} else {
tunnel->stats.tx_errors++;
session->stats.tx_errors++;
@@ -958,6 +960,7 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
__wsum csum = 0;
struct sk_buff *skb2 = NULL;
struct udphdr *uh;
+ unsigned int len;
if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
goto abort;
@@ -1046,18 +1049,25 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
printk("\n");
}
+ memset(&(IPCB(skb2)->opt), 0, sizeof(IPCB(skb2)->opt));
+ IPCB(skb2)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
+ IPSKB_REROUTED);
+ nf_reset(skb2);
+
/* Get routing info from the tunnel socket */
+ dst_release(skb2->dst);
skb2->dst = sk_dst_get(sk_tun);
/* Queue the packet to IP for output */
+ len = skb2->len;
rc = ip_queue_xmit(skb2, 1);
/* Update stats */
if (rc >= 0) {
tunnel->stats.tx_packets++;
- tunnel->stats.tx_bytes += skb2->len;
+ tunnel->stats.tx_bytes += len;
session->stats.tx_packets++;
- session->stats.tx_bytes += skb2->len;
+ session->stats.tx_bytes += len;
} else {
tunnel->stats.tx_errors++;
session->stats.tx_errors++;
diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c
index 8a667c13fae..b801e3b3a11 100644
--- a/drivers/net/sunvnet.c
+++ b/drivers/net/sunvnet.c
@@ -12,6 +12,7 @@
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/etherdevice.h>
+#include <linux/mutex.h>
#include <asm/vio.h>
#include <asm/ldc.h>
@@ -497,6 +498,8 @@ static void vnet_event(void *arg, int event)
vio_link_state_change(vio, event);
spin_unlock_irqrestore(&vio->lock, flags);
+ if (event == LDC_EVENT_RESET)
+ vio_port_up(vio);
return;
}
@@ -875,6 +878,115 @@ err_out:
return err;
}
+static LIST_HEAD(vnet_list);
+static DEFINE_MUTEX(vnet_list_mutex);
+
+static struct vnet * __devinit vnet_new(const u64 *local_mac)
+{
+ struct net_device *dev;
+ struct vnet *vp;
+ int err, i;
+
+ dev = alloc_etherdev(sizeof(*vp));
+ if (!dev) {
+ printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for (i = 0; i < ETH_ALEN; i++)
+ dev->dev_addr[i] = (*local_mac >> (5 - i) * 8) & 0xff;
+
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
+
+ vp = netdev_priv(dev);
+
+ spin_lock_init(&vp->lock);
+ vp->dev = dev;
+
+ INIT_LIST_HEAD(&vp->port_list);
+ for (i = 0; i < VNET_PORT_HASH_SIZE; i++)
+ INIT_HLIST_HEAD(&vp->port_hash[i]);
+ INIT_LIST_HEAD(&vp->list);
+ vp->local_mac = *local_mac;
+
+ dev->open = vnet_open;
+ dev->stop = vnet_close;
+ dev->set_multicast_list = vnet_set_rx_mode;
+ dev->set_mac_address = vnet_set_mac_addr;
+ dev->tx_timeout = vnet_tx_timeout;
+ dev->ethtool_ops = &vnet_ethtool_ops;
+ dev->watchdog_timeo = VNET_TX_TIMEOUT;
+ dev->change_mtu = vnet_change_mtu;
+ dev->hard_start_xmit = vnet_start_xmit;
+
+ err = register_netdev(dev);
+ if (err) {
+ printk(KERN_ERR PFX "Cannot register net device, "
+ "aborting.\n");
+ goto err_out_free_dev;
+ }
+
+ printk(KERN_INFO "%s: Sun LDOM vnet ", dev->name);
+
+ for (i = 0; i < 6; i++)
+ printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':');
+
+ list_add(&vp->list, &vnet_list);
+
+ return vp;
+
+err_out_free_dev:
+ free_netdev(dev);
+
+ return ERR_PTR(err);
+}
+
+static struct vnet * __devinit vnet_find_or_create(const u64 *local_mac)
+{
+ struct vnet *iter, *vp;
+
+ mutex_lock(&vnet_list_mutex);
+ vp = NULL;
+ list_for_each_entry(iter, &vnet_list, list) {
+ if (iter->local_mac == *local_mac) {
+ vp = iter;
+ break;
+ }
+ }
+ if (!vp)
+ vp = vnet_new(local_mac);
+ mutex_unlock(&vnet_list_mutex);
+
+ return vp;
+}
+
+static const char *local_mac_prop = "local-mac-address";
+
+static struct vnet * __devinit vnet_find_parent(struct mdesc_handle *hp,
+ u64 port_node)
+{
+ const u64 *local_mac = NULL;
+ u64 a;
+
+ mdesc_for_each_arc(a, hp, port_node, MDESC_ARC_TYPE_BACK) {
+ u64 target = mdesc_arc_target(hp, a);
+ const char *name;
+
+ name = mdesc_get_property(hp, target, "name", NULL);
+ if (!name || strcmp(name, "network"))
+ continue;
+
+ local_mac = mdesc_get_property(hp, target,
+ local_mac_prop, NULL);
+ if (local_mac)
+ break;
+ }
+ if (!local_mac)
+ return ERR_PTR(-ENODEV);
+
+ return vnet_find_or_create(local_mac);
+}
+
static struct ldc_channel_config vnet_ldc_cfg = {
.event = vnet_event,
.mtu = 64,
@@ -887,6 +999,14 @@ static struct vio_driver_ops vnet_vio_ops = {
.handshake_complete = vnet_handshake_complete,
};
+static void print_version(void)
+{
+ static int version_printed;
+
+ if (version_printed++ == 0)
+ printk(KERN_INFO "%s", version);
+}
+
const char *remote_macaddr_prop = "remote-mac-address";
static int __devinit vnet_port_probe(struct vio_dev *vdev,
@@ -899,14 +1019,17 @@ static int __devinit vnet_port_probe(struct vio_dev *vdev,
const u64 *rmac;
int len, i, err, switch_port;
- vp = dev_get_drvdata(vdev->dev.parent);
- if (!vp) {
- printk(KERN_ERR PFX "Cannot find port parent vnet.\n");
- return -ENODEV;
- }
+ print_version();
hp = mdesc_grab();
+ vp = vnet_find_parent(hp, vdev->mp);
+ if (IS_ERR(vp)) {
+ printk(KERN_ERR PFX "Cannot find port parent vnet.\n");
+ err = PTR_ERR(vp);
+ goto err_out_put_mdesc;
+ }
+
rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len);
err = -ENODEV;
if (!rmac) {
@@ -1025,139 +1148,14 @@ static struct vio_driver vnet_port_driver = {
}
};
-const char *local_mac_prop = "local-mac-address";
-
-static int __devinit vnet_probe(struct vio_dev *vdev,
- const struct vio_device_id *id)
-{
- static int vnet_version_printed;
- struct mdesc_handle *hp;
- struct net_device *dev;
- struct vnet *vp;
- const u64 *mac;
- int err, i, len;
-
- if (vnet_version_printed++ == 0)
- printk(KERN_INFO "%s", version);
-
- hp = mdesc_grab();
-
- mac = mdesc_get_property(hp, vdev->mp, local_mac_prop, &len);
- if (!mac) {
- printk(KERN_ERR PFX "vnet lacks %s property.\n",
- local_mac_prop);
- err = -ENODEV;
- goto err_out;
- }
-
- dev = alloc_etherdev(sizeof(*vp));
- if (!dev) {
- printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
- err = -ENOMEM;
- goto err_out;
- }
-
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = (*mac >> (5 - i) * 8) & 0xff;
-
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
-
- SET_NETDEV_DEV(dev, &vdev->dev);
-
- vp = netdev_priv(dev);
-
- spin_lock_init(&vp->lock);
- vp->dev = dev;
- vp->vdev = vdev;
-
- INIT_LIST_HEAD(&vp->port_list);
- for (i = 0; i < VNET_PORT_HASH_SIZE; i++)
- INIT_HLIST_HEAD(&vp->port_hash[i]);
-
- dev->open = vnet_open;
- dev->stop = vnet_close;
- dev->set_multicast_list = vnet_set_rx_mode;
- dev->set_mac_address = vnet_set_mac_addr;
- dev->tx_timeout = vnet_tx_timeout;
- dev->ethtool_ops = &vnet_ethtool_ops;
- dev->watchdog_timeo = VNET_TX_TIMEOUT;
- dev->change_mtu = vnet_change_mtu;
- dev->hard_start_xmit = vnet_start_xmit;
-
- err = register_netdev(dev);
- if (err) {
- printk(KERN_ERR PFX "Cannot register net device, "
- "aborting.\n");
- goto err_out_free_dev;
- }
-
- printk(KERN_INFO "%s: Sun LDOM vnet ", dev->name);
-
- for (i = 0; i < 6; i++)
- printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':');
-
- dev_set_drvdata(&vdev->dev, vp);
-
- mdesc_release(hp);
-
- return 0;
-
-err_out_free_dev:
- free_netdev(dev);
-
-err_out:
- mdesc_release(hp);
- return err;
-}
-
-static int vnet_remove(struct vio_dev *vdev)
-{
-
- struct vnet *vp = dev_get_drvdata(&vdev->dev);
-
- if (vp) {
- /* XXX unregister port, or at least check XXX */
- unregister_netdevice(vp->dev);
- dev_set_drvdata(&vdev->dev, NULL);
- }
- return 0;
-}
-
-static struct vio_device_id vnet_match[] = {
- {
- .type = "network",
- },
- {},
-};
-MODULE_DEVICE_TABLE(vio, vnet_match);
-
-static struct vio_driver vnet_driver = {
- .id_table = vnet_match,
- .probe = vnet_probe,
- .remove = vnet_remove,
- .driver = {
- .name = "vnet",
- .owner = THIS_MODULE,
- }
-};
-
static int __init vnet_init(void)
{
- int err = vio_register_driver(&vnet_driver);
-
- if (!err) {
- err = vio_register_driver(&vnet_port_driver);
- if (err)
- vio_unregister_driver(&vnet_driver);
- }
-
- return err;
+ return vio_register_driver(&vnet_port_driver);
}
static void __exit vnet_exit(void)
{
vio_unregister_driver(&vnet_port_driver);
- vio_unregister_driver(&vnet_driver);
}
module_init(vnet_init);
diff --git a/drivers/net/sunvnet.h b/drivers/net/sunvnet.h
index 1c887302d46..7d3a0cac727 100644
--- a/drivers/net/sunvnet.h
+++ b/drivers/net/sunvnet.h
@@ -60,11 +60,13 @@ struct vnet {
struct net_device *dev;
u32 msg_enable;
- struct vio_dev *vdev;
struct list_head port_list;
struct hlist_head port_hash[VNET_PORT_HASH_SIZE];
+
+ struct list_head list;
+ u64 local_mac;
};
#endif /* _SUNVNET_H */
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
new file mode 100644
index 00000000000..489f69c5d6c
--- /dev/null
+++ b/drivers/net/xen-netfront.c
@@ -0,0 +1,1863 @@
+/*
+ * Virtual network driver for conversing with remote driver backends.
+ *
+ * Copyright (c) 2002-2005, K A Fraser
+ * Copyright (c) 2005, XenSource Ltd
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/moduleparam.h>
+#include <linux/mm.h>
+#include <net/ip.h>
+
+#include <xen/xenbus.h>
+#include <xen/events.h>
+#include <xen/page.h>
+#include <xen/grant_table.h>
+
+#include <xen/interface/io/netif.h>
+#include <xen/interface/memory.h>
+#include <xen/interface/grant_table.h>
+
+static struct ethtool_ops xennet_ethtool_ops;
+
+struct netfront_cb {
+ struct page *page;
+ unsigned offset;
+};
+
+#define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
+
+#define RX_COPY_THRESHOLD 256
+
+#define GRANT_INVALID_REF 0
+
+#define NET_TX_RING_SIZE __RING_SIZE((struct xen_netif_tx_sring *)0, PAGE_SIZE)
+#define NET_RX_RING_SIZE __RING_SIZE((struct xen_netif_rx_sring *)0, PAGE_SIZE)
+#define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
+
+struct netfront_info {
+ struct list_head list;
+ struct net_device *netdev;
+
+ struct net_device_stats stats;
+
+ struct xen_netif_tx_front_ring tx;
+ struct xen_netif_rx_front_ring rx;
+
+ spinlock_t tx_lock;
+ spinlock_t rx_lock;
+
+ unsigned int evtchn;
+
+ /* Receive-ring batched refills. */
+#define RX_MIN_TARGET 8
+#define RX_DFL_MIN_TARGET 64
+#define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
+ unsigned rx_min_target, rx_max_target, rx_target;
+ struct sk_buff_head rx_batch;
+
+ struct timer_list rx_refill_timer;
+
+ /*
+ * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
+ * are linked from tx_skb_freelist through skb_entry.link.
+ *
+ * NB. Freelist index entries are always going to be less than
+ * PAGE_OFFSET, whereas pointers to skbs will always be equal or
+ * greater than PAGE_OFFSET: we use this property to distinguish
+ * them.
+ */
+ union skb_entry {
+ struct sk_buff *skb;
+ unsigned link;
+ } tx_skbs[NET_TX_RING_SIZE];
+ grant_ref_t gref_tx_head;
+ grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
+ unsigned tx_skb_freelist;
+
+ struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
+ grant_ref_t gref_rx_head;
+ grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
+
+ struct xenbus_device *xbdev;
+ int tx_ring_ref;
+ int rx_ring_ref;
+
+ unsigned long rx_pfn_array[NET_RX_RING_SIZE];
+ struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
+ struct mmu_update rx_mmu[NET_RX_RING_SIZE];
+};
+
+struct netfront_rx_info {
+ struct xen_netif_rx_response rx;
+ struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
+};
+
+/*
+ * Access macros for acquiring freeing slots in tx_skbs[].
+ */
+
+static void add_id_to_freelist(unsigned *head, union skb_entry *list,
+ unsigned short id)
+{
+ list[id].link = *head;
+ *head = id;
+}
+
+static unsigned short get_id_from_freelist(unsigned *head,
+ union skb_entry *list)
+{
+ unsigned int id = *head;
+ *head = list[id].link;
+ return id;
+}
+
+static int xennet_rxidx(RING_IDX idx)
+{
+ return idx & (NET_RX_RING_SIZE - 1);
+}
+
+static struct sk_buff *xennet_get_rx_skb(struct netfront_info *np,
+ RING_IDX ri)
+{
+ int i = xennet_rxidx(ri);
+ struct sk_buff *skb = np->rx_skbs[i];
+ np->rx_skbs[i] = NULL;
+ return skb;
+}
+
+static grant_ref_t xennet_get_rx_ref(struct netfront_info *np,
+ RING_IDX ri)
+{
+ int i = xennet_rxidx(ri);
+ grant_ref_t ref = np->grant_rx_ref[i];
+ np->grant_rx_ref[i] = GRANT_INVALID_REF;
+ return ref;
+}
+
+#ifdef CONFIG_SYSFS
+static int xennet_sysfs_addif(struct net_device *netdev);
+static void xennet_sysfs_delif(struct net_device *netdev);
+#else /* !CONFIG_SYSFS */
+#define xennet_sysfs_addif(dev) (0)
+#define xennet_sysfs_delif(dev) do { } while (0)
+#endif
+
+static int xennet_can_sg(struct net_device *dev)
+{
+ return dev->features & NETIF_F_SG;
+}
+
+
+static void rx_refill_timeout(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ netif_rx_schedule(dev);
+}
+
+static int netfront_tx_slot_available(struct netfront_info *np)
+{
+ return ((np->tx.req_prod_pvt - np->tx.rsp_cons) <
+ (TX_MAX_TARGET - MAX_SKB_FRAGS - 2));
+}
+
+static void xennet_maybe_wake_tx(struct net_device *dev)
+{
+ struct netfront_info *np = netdev_priv(dev);
+
+ if (unlikely(netif_queue_stopped(dev)) &&
+ netfront_tx_slot_available(np) &&
+ likely(netif_running(dev)))
+ netif_wake_queue(dev);
+}
+
+static void xennet_alloc_rx_buffers(struct net_device *dev)
+{
+ unsigned short id;
+ struct netfront_info *np = netdev_priv(dev);
+ struct sk_buff *skb;
+ struct page *page;
+ int i, batch_target, notify;
+ RING_IDX req_prod = np->rx.req_prod_pvt;
+ struct xen_memory_reservation reservation;
+ grant_ref_t ref;
+ unsigned long pfn;
+ void *vaddr;
+ int nr_flips;
+ struct xen_netif_rx_request *req;
+
+ if (unlikely(!netif_carrier_ok(dev)))
+ return;
+
+ /*
+ * Allocate skbuffs greedily, even though we batch updates to the
+ * receive ring. This creates a less bursty demand on the memory
+ * allocator, so should reduce the chance of failed allocation requests
+ * both for ourself and for other kernel subsystems.
+ */
+ batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
+ for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
+ skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!skb))
+ goto no_skb;
+
+ page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
+ if (!page) {
+ kfree_skb(skb);
+no_skb:
+ /* Any skbuffs queued for refill? Force them out. */
+ if (i != 0)
+ goto refill;
+ /* Could not allocate any skbuffs. Try again later. */
+ mod_timer(&np->rx_refill_timer,
+ jiffies + (HZ/10));
+ break;
+ }
+
+ skb_shinfo(skb)->frags[0].page = page;
+ skb_shinfo(skb)->nr_frags = 1;
+ __skb_queue_tail(&np->rx_batch, skb);
+ }
+
+ /* Is the batch large enough to be worthwhile? */
+ if (i < (np->rx_target/2)) {
+ if (req_prod > np->rx.sring->req_prod)
+ goto push;
+ return;
+ }
+
+ /* Adjust our fill target if we risked running out of buffers. */
+ if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
+ ((np->rx_target *= 2) > np->rx_max_target))
+ np->rx_target = np->rx_max_target;
+
+ refill:
+ for (nr_flips = i = 0; ; i++) {
+ skb = __skb_dequeue(&np->rx_batch);
+ if (skb == NULL)
+ break;
+
+ skb->dev = dev;
+
+ id = xennet_rxidx(req_prod + i);
+
+ BUG_ON(np->rx_skbs[id]);
+ np->rx_skbs[id] = skb;
+
+ ref = gnttab_claim_grant_reference(&np->gref_rx_head);
+ BUG_ON((signed short)ref < 0);
+ np->grant_rx_ref[id] = ref;
+
+ pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page);
+ vaddr = page_address(skb_shinfo(skb)->frags[0].page);
+
+ req = RING_GET_REQUEST(&np->rx, req_prod + i);
+ gnttab_grant_foreign_access_ref(ref,
+ np->xbdev->otherend_id,
+ pfn_to_mfn(pfn),
+ 0);
+
+ req->id = id;
+ req->gref = ref;
+ }
+
+ if (nr_flips != 0) {
+ reservation.extent_start = np->rx_pfn_array;
+ reservation.nr_extents = nr_flips;
+ reservation.extent_order = 0;
+ reservation.address_bits = 0;
+ reservation.domid = DOMID_SELF;
+
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ /* After all PTEs have been zapped, flush the TLB. */
+ np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] =
+ UVMF_TLB_FLUSH|UVMF_ALL;
+
+ /* Give away a batch of pages. */
+ np->rx_mcl[i].op = __HYPERVISOR_memory_op;
+ np->rx_mcl[i].args[0] = XENMEM_decrease_reservation;
+ np->rx_mcl[i].args[1] = (unsigned long)&reservation;
+
+ /* Zap PTEs and give away pages in one big
+ * multicall. */
+ (void)HYPERVISOR_multicall(np->rx_mcl, i+1);
+
+ /* Check return status of HYPERVISOR_memory_op(). */
+ if (unlikely(np->rx_mcl[i].result != i))
+ panic("Unable to reduce memory reservation\n");
+ } else {
+ if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
+ &reservation) != i)
+ panic("Unable to reduce memory reservation\n");
+ }
+ } else {
+ wmb(); /* barrier so backend seens requests */
+ }
+
+ /* Above is a suitable barrier to ensure backend will see requests. */
+ np->rx.req_prod_pvt = req_prod + i;
+ push:
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
+ if (notify)
+ notify_remote_via_irq(np->netdev->irq);
+}
+
+static int xennet_open(struct net_device *dev)
+{
+ struct netfront_info *np = netdev_priv(dev);
+
+ memset(&np->stats, 0, sizeof(np->stats));
+
+ spin_lock_bh(&np->rx_lock);
+ if (netif_carrier_ok(dev)) {
+ xennet_alloc_rx_buffers(dev);
+ np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
+ if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
+ netif_rx_schedule(dev);
+ }
+ spin_unlock_bh(&np->rx_lock);
+
+ xennet_maybe_wake_tx(dev);
+
+ return 0;
+}
+
+static void xennet_tx_buf_gc(struct net_device *dev)
+{
+ RING_IDX cons, prod;
+ unsigned short id;
+ struct netfront_info *np = netdev_priv(dev);
+ struct sk_buff *skb;
+
+ BUG_ON(!netif_carrier_ok(dev));
+
+ do {
+ prod = np->tx.sring->rsp_prod;
+ rmb(); /* Ensure we see responses up to 'rp'. */
+
+ for (cons = np->tx.rsp_cons; cons != prod; cons++) {
+ struct xen_netif_tx_response *txrsp;
+
+ txrsp = RING_GET_RESPONSE(&np->tx, cons);
+ if (txrsp->status == NETIF_RSP_NULL)
+ continue;
+
+ id = txrsp->id;
+ skb = np->tx_skbs[id].skb;
+ if (unlikely(gnttab_query_foreign_access(
+ np->grant_tx_ref[id]) != 0)) {
+ printk(KERN_ALERT "xennet_tx_buf_gc: warning "
+ "-- grant still in use by backend "
+ "domain.\n");
+ BUG();
+ }
+ gnttab_end_foreign_access_ref(
+ np->grant_tx_ref[id], GNTMAP_readonly);
+ gnttab_release_grant_reference(
+ &np->gref_tx_head, np->grant_tx_ref[id]);
+ np->grant_tx_ref[id] = GRANT_INVALID_REF;
+ add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id);
+ dev_kfree_skb_irq(skb);
+ }
+
+ np->tx.rsp_cons = prod;
+
+ /*
+ * Set a new event, then check for race with update of tx_cons.
+ * Note that it is essential to schedule a callback, no matter
+ * how few buffers are pending. Even if there is space in the
+ * transmit ring, higher layers may be blocked because too much
+ * data is outstanding: in such cases notification from Xen is
+ * likely to be the only kick that we'll get.
+ */
+ np->tx.sring->rsp_event =
+ prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
+ mb(); /* update shared area */
+ } while ((cons == prod) && (prod != np->tx.sring->rsp_prod));
+
+ xennet_maybe_wake_tx(dev);
+}
+
+static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
+ struct xen_netif_tx_request *tx)
+{
+ struct netfront_info *np = netdev_priv(dev);
+ char *data = skb->data;
+ unsigned long mfn;
+ RING_IDX prod = np->tx.req_prod_pvt;
+ int frags = skb_shinfo(skb)->nr_frags;
+ unsigned int offset = offset_in_page(data);
+ unsigned int len = skb_headlen(skb);
+ unsigned int id;
+ grant_ref_t ref;
+ int i;
+
+ /* While the header overlaps a page boundary (including being
+ larger than a page), split it it into page-sized chunks. */
+ while (len > PAGE_SIZE - offset) {
+ tx->size = PAGE_SIZE - offset;
+ tx->flags |= NETTXF_more_data;
+ len -= tx->size;
+ data += tx->size;
+ offset = 0;
+
+ id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
+ np->tx_skbs[id].skb = skb_get(skb);
+ tx = RING_GET_REQUEST(&np->tx, prod++);
+ tx->id = id;
+ ref = gnttab_claim_grant_reference(&np->gref_tx_head);
+ BUG_ON((signed short)ref < 0);
+
+ mfn = virt_to_mfn(data);
+ gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
+ mfn, GNTMAP_readonly);
+
+ tx->gref = np->grant_tx_ref[id] = ref;
+ tx->offset = offset;
+ tx->size = len;
+ tx->flags = 0;
+ }
+
+ /* Grant backend access to each skb fragment page. */
+ for (i = 0; i < frags; i++) {
+ skb_frag_t *frag = skb_shinfo(skb)->frags + i;
+
+ tx->flags |= NETTXF_more_data;
+
+ id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
+ np->tx_skbs[id].skb = skb_get(skb);
+ tx = RING_GET_REQUEST(&np->tx, prod++);
+ tx->id = id;
+ ref = gnttab_claim_grant_reference(&np->gref_tx_head);
+ BUG_ON((signed short)ref < 0);
+
+ mfn = pfn_to_mfn(page_to_pfn(frag->page));
+ gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
+ mfn, GNTMAP_readonly);
+
+ tx->gref = np->grant_tx_ref[id] = ref;
+ tx->offset = frag->page_offset;
+ tx->size = frag->size;
+ tx->flags = 0;
+ }
+
+ np->tx.req_prod_pvt = prod;
+}
+
+static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ unsigned short id;
+ struct netfront_info *np = netdev_priv(dev);
+ struct xen_netif_tx_request *tx;
+ struct xen_netif_extra_info *extra;
+ char *data = skb->data;
+ RING_IDX i;
+ grant_ref_t ref;
+ unsigned long mfn;
+ int notify;
+ int frags = skb_shinfo(skb)->nr_frags;
+ unsigned int offset = offset_in_page(data);
+ unsigned int len = skb_headlen(skb);
+
+ frags += (offset + len + PAGE_SIZE - 1) / PAGE_SIZE;
+ if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
+ printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n",
+ frags);
+ dump_stack();
+ goto drop;
+ }
+
+ spin_lock_irq(&np->tx_lock);
+
+ if (unlikely(!netif_carrier_ok(dev) ||
+ (frags > 1 && !xennet_can_sg(dev)) ||
+ netif_needs_gso(dev, skb))) {
+ spin_unlock_irq(&np->tx_lock);
+ goto drop;
+ }
+
+ i = np->tx.req_prod_pvt;
+
+ id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
+ np->tx_skbs[id].skb = skb;
+
+ tx = RING_GET_REQUEST(&np->tx, i);
+
+ tx->id = id;
+ ref = gnttab_claim_grant_reference(&np->gref_tx_head);
+ BUG_ON((signed short)ref < 0);
+ mfn = virt_to_mfn(data);
+ gnttab_grant_foreign_access_ref(
+ ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
+ tx->gref = np->grant_tx_ref[id] = ref;
+ tx->offset = offset;
+ tx->size = len;
+ extra = NULL;
+
+ tx->flags = 0;
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ /* local packet? */
+ tx->flags |= NETTXF_csum_blank | NETTXF_data_validated;
+ else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
+ /* remote but checksummed. */
+ tx->flags |= NETTXF_data_validated;
+
+ if (skb_shinfo(skb)->gso_size) {
+ struct xen_netif_extra_info *gso;
+
+ gso = (struct xen_netif_extra_info *)
+ RING_GET_REQUEST(&np->tx, ++i);
+
+ if (extra)
+ extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
+ else
+ tx->flags |= NETTXF_extra_info;
+
+ gso->u.gso.size = skb_shinfo(skb)->gso_size;
+ gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
+ gso->u.gso.pad = 0;
+ gso->u.gso.features = 0;
+
+ gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
+ gso->flags = 0;
+ extra = gso;
+ }
+
+ np->tx.req_prod_pvt = i + 1;
+
+ xennet_make_frags(skb, dev, tx);
+ tx->size = skb->len;
+
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
+ if (notify)
+ notify_remote_via_irq(np->netdev->irq);
+
+ xennet_tx_buf_gc(dev);
+
+ if (!netfront_tx_slot_available(np))
+ netif_stop_queue(dev);
+
+ spin_unlock_irq(&np->tx_lock);
+
+ np->stats.tx_bytes += skb->len;
+ np->stats.tx_packets++;
+
+ return 0;
+
+ drop:
+ np->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+static int xennet_close(struct net_device *dev)
+{
+ struct netfront_info *np = netdev_priv(dev);
+ netif_stop_queue(np->netdev);
+ return 0;
+}
+
+static struct net_device_stats *xennet_get_stats(struct net_device *dev)
+{
+ struct netfront_info *np = netdev_priv(dev);
+ return &np->stats;
+}
+
+static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb,
+ grant_ref_t ref)
+{
+ int new = xennet_rxidx(np->rx.req_prod_pvt);
+
+ BUG_ON(np->rx_skbs[new]);
+ np->rx_skbs[new] = skb;
+ np->grant_rx_ref[new] = ref;
+ RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
+ RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
+ np->rx.req_prod_pvt++;
+}
+
+static int xennet_get_extras(struct netfront_info *np,
+ struct xen_netif_extra_info *extras,
+ RING_IDX rp)
+
+{
+ struct xen_netif_extra_info *extra;
+ struct device *dev = &np->netdev->dev;
+ RING_IDX cons = np->rx.rsp_cons;
+ int err = 0;
+
+ do {
+ struct sk_buff *skb;
+ grant_ref_t ref;
+
+ if (unlikely(cons + 1 == rp)) {
+ if (net_ratelimit())
+ dev_warn(dev, "Missing extra info\n");
+ err = -EBADR;
+ break;
+ }
+
+ extra = (struct xen_netif_extra_info *)
+ RING_GET_RESPONSE(&np->rx, ++cons);
+
+ if (unlikely(!extra->type ||
+ extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
+ if (net_ratelimit())
+ dev_warn(dev, "Invalid extra type: %d\n",
+ extra->type);
+ err = -EINVAL;
+ } else {
+ memcpy(&extras[extra->type - 1], extra,
+ sizeof(*extra));
+ }
+
+ skb = xennet_get_rx_skb(np, cons);
+ ref = xennet_get_rx_ref(np, cons);
+ xennet_move_rx_slot(np, skb, ref);
+ } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
+
+ np->rx.rsp_cons = cons;
+ return err;
+}
+
+static int xennet_get_responses(struct netfront_info *np,
+ struct netfront_rx_info *rinfo, RING_IDX rp,
+ struct sk_buff_head *list)
+{
+ struct xen_netif_rx_response *rx = &rinfo->rx;
+ struct xen_netif_extra_info *extras = rinfo->extras;
+ struct device *dev = &np->netdev->dev;
+ RING_IDX cons = np->rx.rsp_cons;
+ struct sk_buff *skb = xennet_get_rx_skb(np, cons);
+ grant_ref_t ref = xennet_get_rx_ref(np, cons);
+ int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
+ int frags = 1;
+ int err = 0;
+ unsigned long ret;
+
+ if (rx->flags & NETRXF_extra_info) {
+ err = xennet_get_extras(np, extras, rp);
+ cons = np->rx.rsp_cons;
+ }
+
+ for (;;) {
+ if (unlikely(rx->status < 0 ||
+ rx->offset + rx->status > PAGE_SIZE)) {
+ if (net_ratelimit())
+ dev_warn(dev, "rx->offset: %x, size: %u\n",
+ rx->offset, rx->status);
+ xennet_move_rx_slot(np, skb, ref);
+ err = -EINVAL;
+ goto next;
+ }
+
+ /*
+ * This definitely indicates a bug, either in this driver or in
+ * the backend driver. In future this should flag the bad
+ * situation to the system controller to reboot the backed.
+ */
+ if (ref == GRANT_INVALID_REF) {
+ if (net_ratelimit())
+ dev_warn(dev, "Bad rx response id %d.\n",
+ rx->id);
+ err = -EINVAL;
+ goto next;
+ }
+
+ ret = gnttab_end_foreign_access_ref(ref, 0);
+ BUG_ON(!ret);
+
+ gnttab_release_grant_reference(&np->gref_rx_head, ref);
+
+ __skb_queue_tail(list, skb);
+
+next:
+ if (!(rx->flags & NETRXF_more_data))
+ break;
+
+ if (cons + frags == rp) {
+ if (net_ratelimit())
+ dev_warn(dev, "Need more frags\n");
+ err = -ENOENT;
+ break;
+ }
+
+ rx = RING_GET_RESPONSE(&np->rx, cons + frags);
+ skb = xennet_get_rx_skb(np, cons + frags);
+ ref = xennet_get_rx_ref(np, cons + frags);
+ frags++;
+ }
+
+ if (unlikely(frags > max)) {
+ if (net_ratelimit())
+ dev_warn(dev, "Too many frags\n");
+ err = -E2BIG;
+ }
+
+ if (unlikely(err))
+ np->rx.rsp_cons = cons + frags;
+
+ return err;
+}
+
+static int xennet_set_skb_gso(struct sk_buff *skb,
+ struct xen_netif_extra_info *gso)
+{
+ if (!gso->u.gso.size) {
+ if (net_ratelimit())
+ printk(KERN_WARNING "GSO size must not be zero.\n");
+ return -EINVAL;
+ }
+
+ /* Currently only TCPv4 S.O. is supported. */
+ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
+ if (net_ratelimit())
+ printk(KERN_WARNING "Bad GSO type %d.\n", gso->u.gso.type);
+ return -EINVAL;
+ }
+
+ skb_shinfo(skb)->gso_size = gso->u.gso.size;
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+
+ /* Header must be checked, and gso_segs computed. */
+ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
+ skb_shinfo(skb)->gso_segs = 0;
+
+ return 0;
+}
+
+static RING_IDX xennet_fill_frags(struct netfront_info *np,
+ struct sk_buff *skb,
+ struct sk_buff_head *list)
+{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ int nr_frags = shinfo->nr_frags;
+ RING_IDX cons = np->rx.rsp_cons;
+ skb_frag_t *frag = shinfo->frags + nr_frags;
+ struct sk_buff *nskb;
+
+ while ((nskb = __skb_dequeue(list))) {
+ struct xen_netif_rx_response *rx =
+ RING_GET_RESPONSE(&np->rx, ++cons);
+
+ frag->page = skb_shinfo(nskb)->frags[0].page;
+ frag->page_offset = rx->offset;
+ frag->size = rx->status;
+
+ skb->data_len += rx->status;
+
+ skb_shinfo(nskb)->nr_frags = 0;
+ kfree_skb(nskb);
+
+ frag++;
+ nr_frags++;
+ }
+
+ shinfo->nr_frags = nr_frags;
+ return cons;
+}
+
+static int skb_checksum_setup(struct sk_buff *skb)
+{
+ struct iphdr *iph;
+ unsigned char *th;
+ int err = -EPROTO;
+
+ if (skb->protocol != htons(ETH_P_IP))
+ goto out;
+
+ iph = (void *)skb->data;
+ th = skb->data + 4 * iph->ihl;
+ if (th >= skb_tail_pointer(skb))
+ goto out;
+
+ skb->csum_start = th - skb->head;
+ switch (iph->protocol) {
+ case IPPROTO_TCP:
+ skb->csum_offset = offsetof(struct tcphdr, check);
+ break;
+ case IPPROTO_UDP:
+ skb->csum_offset = offsetof(struct udphdr, check);
+ break;
+ default:
+ if (net_ratelimit())
+ printk(KERN_ERR "Attempting to checksum a non-"
+ "TCP/UDP packet, dropping a protocol"
+ " %d packet", iph->protocol);
+ goto out;
+ }
+
+ if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb))
+ goto out;
+
+ err = 0;
+
+out:
+ return err;
+}
+
+static int handle_incoming_queue(struct net_device *dev,
+ struct sk_buff_head *rxq)
+{
+ struct netfront_info *np = netdev_priv(dev);
+ int packets_dropped = 0;
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue(rxq)) != NULL) {
+ struct page *page = NETFRONT_SKB_CB(skb)->page;
+ void *vaddr = page_address(page);
+ unsigned offset = NETFRONT_SKB_CB(skb)->offset;
+
+ memcpy(skb->data, vaddr + offset,
+ skb_headlen(skb));
+
+ if (page != skb_shinfo(skb)->frags[0].page)
+ __free_page(page);
+
+ /* Ethernet work: Delayed to here as it peeks the header. */
+ skb->protocol = eth_type_trans(skb, dev);
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (skb_checksum_setup(skb)) {
+ kfree_skb(skb);
+ packets_dropped++;
+ np->stats.rx_errors++;
+ continue;
+ }
+ }
+
+ np->stats.rx_packets++;
+ np->stats.rx_bytes += skb->len;
+
+ /* Pass it up. */
+ netif_receive_skb(skb);
+ dev->last_rx = jiffies;
+ }
+
+ return packets_dropped;
+}
+
+static int xennet_poll(struct net_device *dev, int *pbudget)
+{
+ struct netfront_info *np = netdev_priv(dev);
+ struct sk_buff *skb;
+ struct netfront_rx_info rinfo;
+ struct xen_netif_rx_response *rx = &rinfo.rx;
+ struct xen_netif_extra_info *extras = rinfo.extras;
+ RING_IDX i, rp;
+ int work_done, budget, more_to_do = 1;
+ struct sk_buff_head rxq;
+ struct sk_buff_head errq;
+ struct sk_buff_head tmpq;
+ unsigned long flags;
+ unsigned int len;
+ int err;
+
+ spin_lock(&np->rx_lock);
+
+ if (unlikely(!netif_carrier_ok(dev))) {
+ spin_unlock(&np->rx_lock);
+ return 0;
+ }
+
+ skb_queue_head_init(&rxq);
+ skb_queue_head_init(&errq);
+ skb_queue_head_init(&tmpq);
+
+ budget = *pbudget;
+ if (budget > dev->quota)
+ budget = dev->quota;
+ rp = np->rx.sring->rsp_prod;
+ rmb(); /* Ensure we see queued responses up to 'rp'. */
+
+ i = np->rx.rsp_cons;
+ work_done = 0;
+ while ((i != rp) && (work_done < budget)) {
+ memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
+ memset(extras, 0, sizeof(rinfo.extras));
+
+ err = xennet_get_responses(np, &rinfo, rp, &tmpq);
+
+ if (unlikely(err)) {
+err:
+ while ((skb = __skb_dequeue(&tmpq)))
+ __skb_queue_tail(&errq, skb);
+ np->stats.rx_errors++;
+ i = np->rx.rsp_cons;
+ continue;
+ }
+
+ skb = __skb_dequeue(&tmpq);
+
+ if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
+ struct xen_netif_extra_info *gso;
+ gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
+
+ if (unlikely(xennet_set_skb_gso(skb, gso))) {
+ __skb_queue_head(&tmpq, skb);
+ np->rx.rsp_cons += skb_queue_len(&tmpq);
+ goto err;
+ }
+ }
+
+ NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page;
+ NETFRONT_SKB_CB(skb)->offset = rx->offset;
+
+ len = rx->status;
+ if (len > RX_COPY_THRESHOLD)
+ len = RX_COPY_THRESHOLD;
+ skb_put(skb, len);
+
+ if (rx->status > len) {
+ skb_shinfo(skb)->frags[0].page_offset =
+ rx->offset + len;
+ skb_shinfo(skb)->frags[0].size = rx->status - len;
+ skb->data_len = rx->status - len;
+ } else {
+ skb_shinfo(skb)->frags[0].page = NULL;
+ skb_shinfo(skb)->nr_frags = 0;
+ }
+
+ i = xennet_fill_frags(np, skb, &tmpq);
+
+ /*
+ * Truesize approximates the size of true data plus
+ * any supervisor overheads. Adding hypervisor
+ * overheads has been shown to significantly reduce
+ * achievable bandwidth with the default receive
+ * buffer size. It is therefore not wise to account
+ * for it here.
+ *
+ * After alloc_skb(RX_COPY_THRESHOLD), truesize is set
+ * to RX_COPY_THRESHOLD + the supervisor
+ * overheads. Here, we add the size of the data pulled
+ * in xennet_fill_frags().
+ *
+ * We also adjust for any unused space in the main
+ * data area by subtracting (RX_COPY_THRESHOLD -
+ * len). This is especially important with drivers
+ * which split incoming packets into header and data,
+ * using only 66 bytes of the main data area (see the
+ * e1000 driver for example.) On such systems,
+ * without this last adjustement, our achievable
+ * receive throughout using the standard receive
+ * buffer size was cut by 25%(!!!).
+ */
+ skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len);
+ skb->len += skb->data_len;
+
+ if (rx->flags & NETRXF_csum_blank)
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ else if (rx->flags & NETRXF_data_validated)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ __skb_queue_tail(&rxq, skb);
+
+ np->rx.rsp_cons = ++i;
+ work_done++;
+ }
+
+ while ((skb = __skb_dequeue(&errq)))
+ kfree_skb(skb);
+
+ work_done -= handle_incoming_queue(dev, &rxq);
+
+ /* If we get a callback with very few responses, reduce fill target. */
+ /* NB. Note exponential increase, linear decrease. */
+ if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
+ ((3*np->rx_target) / 4)) &&
+ (--np->rx_target < np->rx_min_target))
+ np->rx_target = np->rx_min_target;
+
+ xennet_alloc_rx_buffers(dev);
+
+ *pbudget -= work_done;
+ dev->quota -= work_done;
+
+ if (work_done < budget) {
+ local_irq_save(flags);
+
+ RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
+ if (!more_to_do)
+ __netif_rx_complete(dev);
+
+ local_irq_restore(flags);
+ }
+
+ spin_unlock(&np->rx_lock);
+
+ return more_to_do;
+}
+
+static int xennet_change_mtu(struct net_device *dev, int mtu)
+{
+ int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN;
+
+ if (mtu > max)
+ return -EINVAL;
+ dev->mtu = mtu;
+ return 0;
+}
+
+static void xennet_release_tx_bufs(struct netfront_info *np)
+{
+ struct sk_buff *skb;
+ int i;
+
+ for (i = 0; i < NET_TX_RING_SIZE; i++) {
+ /* Skip over entries which are actually freelist references */
+ if ((unsigned long)np->tx_skbs[i].skb < PAGE_OFFSET)
+ continue;
+
+ skb = np->tx_skbs[i].skb;
+ gnttab_end_foreign_access_ref(np->grant_tx_ref[i],
+ GNTMAP_readonly);
+ gnttab_release_grant_reference(&np->gref_tx_head,
+ np->grant_tx_ref[i]);
+ np->grant_tx_ref[i] = GRANT_INVALID_REF;
+ add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i);
+ dev_kfree_skb_irq(skb);
+ }
+}
+
+static void xennet_release_rx_bufs(struct netfront_info *np)
+{
+ struct mmu_update *mmu = np->rx_mmu;
+ struct multicall_entry *mcl = np->rx_mcl;
+ struct sk_buff_head free_list;
+ struct sk_buff *skb;
+ unsigned long mfn;
+ int xfer = 0, noxfer = 0, unused = 0;
+ int id, ref;
+
+ dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n",
+ __func__);
+ return;
+
+ skb_queue_head_init(&free_list);
+
+ spin_lock_bh(&np->rx_lock);
+
+ for (id = 0; id < NET_RX_RING_SIZE; id++) {
+ ref = np->grant_rx_ref[id];
+ if (ref == GRANT_INVALID_REF) {
+ unused++;
+ continue;
+ }
+
+ skb = np->rx_skbs[id];
+ mfn = gnttab_end_foreign_transfer_ref(ref);
+ gnttab_release_grant_reference(&np->gref_rx_head, ref);
+ np->grant_rx_ref[id] = GRANT_INVALID_REF;
+
+ if (0 == mfn) {
+ skb_shinfo(skb)->nr_frags = 0;
+ dev_kfree_skb(skb);
+ noxfer++;
+ continue;
+ }
+
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ /* Remap the page. */
+ struct page *page = skb_shinfo(skb)->frags[0].page;
+ unsigned long pfn = page_to_pfn(page);
+ void *vaddr = page_address(page);
+
+ MULTI_update_va_mapping(mcl, (unsigned long)vaddr,
+ mfn_pte(mfn, PAGE_KERNEL),
+ 0);
+ mcl++;
+ mmu->ptr = ((u64)mfn << PAGE_SHIFT)
+ | MMU_MACHPHYS_UPDATE;
+ mmu->val = pfn;
+ mmu++;
+
+ set_phys_to_machine(pfn, mfn);
+ }
+ __skb_queue_tail(&free_list, skb);
+ xfer++;
+ }
+
+ dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n",
+ __func__, xfer, noxfer, unused);
+
+ if (xfer) {
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ /* Do all the remapping work and M2P updates. */
+ MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu,
+ 0, DOMID_SELF);
+ mcl++;
+ HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl);
+ }
+ }
+
+ while ((skb = __skb_dequeue(&free_list)) != NULL)
+ dev_kfree_skb(skb);
+
+ spin_unlock_bh(&np->rx_lock);
+}
+
+static void xennet_uninit(struct net_device *dev)
+{
+ struct netfront_info *np = netdev_priv(dev);
+ xennet_release_tx_bufs(np);
+ xennet_release_rx_bufs(np);
+ gnttab_free_grant_references(np->gref_tx_head);
+ gnttab_free_grant_references(np->gref_rx_head);
+}
+
+static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev)
+{
+ int i, err;
+ struct net_device *netdev;
+ struct netfront_info *np;
+
+ netdev = alloc_etherdev(sizeof(struct netfront_info));
+ if (!netdev) {
+ printk(KERN_WARNING "%s> alloc_etherdev failed.\n",
+ __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ np = netdev_priv(netdev);
+ np->xbdev = dev;
+
+ spin_lock_init(&np->tx_lock);
+ spin_lock_init(&np->rx_lock);
+
+ skb_queue_head_init(&np->rx_batch);
+ np->rx_target = RX_DFL_MIN_TARGET;
+ np->rx_min_target = RX_DFL_MIN_TARGET;
+ np->rx_max_target = RX_MAX_TARGET;
+
+ init_timer(&np->rx_refill_timer);
+ np->rx_refill_timer.data = (unsigned long)netdev;
+ np->rx_refill_timer.function = rx_refill_timeout;
+
+ /* Initialise tx_skbs as a free chain containing every entry. */
+ np->tx_skb_freelist = 0;
+ for (i = 0; i < NET_TX_RING_SIZE; i++) {
+ np->tx_skbs[i].link = i+1;
+ np->grant_tx_ref[i] = GRANT_INVALID_REF;
+ }
+
+ /* Clear out rx_skbs */
+ for (i = 0; i < NET_RX_RING_SIZE; i++) {
+ np->rx_skbs[i] = NULL;
+ np->grant_rx_ref[i] = GRANT_INVALID_REF;
+ }
+
+ /* A grant for every tx ring slot */
+ if (gnttab_alloc_grant_references(TX_MAX_TARGET,
+ &np->gref_tx_head) < 0) {
+ printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+ /* A grant for every rx ring slot */
+ if (gnttab_alloc_grant_references(RX_MAX_TARGET,
+ &np->gref_rx_head) < 0) {
+ printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n");
+ err = -ENOMEM;
+ goto exit_free_tx;
+ }
+
+ netdev->open = xennet_open;
+ netdev->hard_start_xmit = xennet_start_xmit;
+ netdev->stop = xennet_close;
+ netdev->get_stats = xennet_get_stats;
+ netdev->poll = xennet_poll;
+ netdev->uninit = xennet_uninit;
+ netdev->change_mtu = xennet_change_mtu;
+ netdev->weight = 64;
+ netdev->features = NETIF_F_IP_CSUM;
+
+ SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops);
+ SET_MODULE_OWNER(netdev);
+ SET_NETDEV_DEV(netdev, &dev->dev);
+
+ np->netdev = netdev;
+
+ netif_carrier_off(netdev);
+
+ return netdev;
+
+ exit_free_tx:
+ gnttab_free_grant_references(np->gref_tx_head);
+ exit:
+ free_netdev(netdev);
+ return ERR_PTR(err);
+}
+
+/**
+ * Entry point to this code when a new device is created. Allocate the basic
+ * structures and the ring buffers for communication with the backend, and
+ * inform the backend of the appropriate details for those.
+ */
+static int __devinit netfront_probe(struct xenbus_device *dev,
+ const struct xenbus_device_id *id)
+{
+ int err;
+ struct net_device *netdev;
+ struct netfront_info *info;
+
+ netdev = xennet_create_dev(dev);
+ if (IS_ERR(netdev)) {
+ err = PTR_ERR(netdev);
+ xenbus_dev_fatal(dev, err, "creating netdev");
+ return err;
+ }
+
+ info = netdev_priv(netdev);
+ dev->dev.driver_data = info;
+
+ err = register_netdev(info->netdev);
+ if (err) {
+ printk(KERN_WARNING "%s: register_netdev err=%d\n",
+ __func__, err);
+ goto fail;
+ }
+
+ err = xennet_sysfs_addif(info->netdev);
+ if (err) {
+ unregister_netdev(info->netdev);
+ printk(KERN_WARNING "%s: add sysfs failed err=%d\n",
+ __func__, err);
+ goto fail;
+ }
+
+ return 0;
+
+ fail:
+ free_netdev(netdev);
+ dev->dev.driver_data = NULL;
+ return err;
+}
+
+static void xennet_end_access(int ref, void *page)
+{
+ /* This frees the page as a side-effect */
+ if (ref != GRANT_INVALID_REF)
+ gnttab_end_foreign_access(ref, 0, (unsigned long)page);
+}
+
+static void xennet_disconnect_backend(struct netfront_info *info)
+{
+ /* Stop old i/f to prevent errors whilst we rebuild the state. */
+ spin_lock_bh(&info->rx_lock);
+ spin_lock_irq(&info->tx_lock);
+ netif_carrier_off(info->netdev);
+ spin_unlock_irq(&info->tx_lock);
+ spin_unlock_bh(&info->rx_lock);
+
+ if (info->netdev->irq)
+ unbind_from_irqhandler(info->netdev->irq, info->netdev);
+ info->evtchn = info->netdev->irq = 0;
+
+ /* End access and free the pages */
+ xennet_end_access(info->tx_ring_ref, info->tx.sring);
+ xennet_end_access(info->rx_ring_ref, info->rx.sring);
+
+ info->tx_ring_ref = GRANT_INVALID_REF;
+ info->rx_ring_ref = GRANT_INVALID_REF;
+ info->tx.sring = NULL;
+ info->rx.sring = NULL;
+}
+
+/**
+ * We are reconnecting to the backend, due to a suspend/resume, or a backend
+ * driver restart. We tear down our netif structure and recreate it, but
+ * leave the device-layer structures intact so that this is transparent to the
+ * rest of the kernel.
+ */
+static int netfront_resume(struct xenbus_device *dev)
+{
+ struct netfront_info *info = dev->dev.driver_data;
+
+ dev_dbg(&dev->dev, "%s\n", dev->nodename);
+
+ xennet_disconnect_backend(info);
+ return 0;
+}
+
+static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
+{
+ char *s, *e, *macstr;
+ int i;
+
+ macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
+ if (IS_ERR(macstr))
+ return PTR_ERR(macstr);
+
+ for (i = 0; i < ETH_ALEN; i++) {
+ mac[i] = simple_strtoul(s, &e, 16);
+ if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
+ kfree(macstr);
+ return -ENOENT;
+ }
+ s = e+1;
+ }
+
+ kfree(macstr);
+ return 0;
+}
+
+static irqreturn_t xennet_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct netfront_info *np = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&np->tx_lock, flags);
+
+ if (likely(netif_carrier_ok(dev))) {
+ xennet_tx_buf_gc(dev);
+ /* Under tx_lock: protects access to rx shared-ring indexes. */
+ if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
+ netif_rx_schedule(dev);
+ }
+
+ spin_unlock_irqrestore(&np->tx_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
+{
+ struct xen_netif_tx_sring *txs;
+ struct xen_netif_rx_sring *rxs;
+ int err;
+ struct net_device *netdev = info->netdev;
+
+ info->tx_ring_ref = GRANT_INVALID_REF;
+ info->rx_ring_ref = GRANT_INVALID_REF;
+ info->rx.sring = NULL;
+ info->tx.sring = NULL;
+ netdev->irq = 0;
+
+ err = xen_net_read_mac(dev, netdev->dev_addr);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
+ goto fail;
+ }
+
+ txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_KERNEL);
+ if (!txs) {
+ err = -ENOMEM;
+ xenbus_dev_fatal(dev, err, "allocating tx ring page");
+ goto fail;
+ }
+ SHARED_RING_INIT(txs);
+ FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
+
+ err = xenbus_grant_ring(dev, virt_to_mfn(txs));
+ if (err < 0) {
+ free_page((unsigned long)txs);
+ goto fail;
+ }
+
+ info->tx_ring_ref = err;
+ rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_KERNEL);
+ if (!rxs) {
+ err = -ENOMEM;
+ xenbus_dev_fatal(dev, err, "allocating rx ring page");
+ goto fail;
+ }
+ SHARED_RING_INIT(rxs);
+ FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
+
+ err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
+ if (err < 0) {
+ free_page((unsigned long)rxs);
+ goto fail;
+ }
+ info->rx_ring_ref = err;
+
+ err = xenbus_alloc_evtchn(dev, &info->evtchn);
+ if (err)
+ goto fail;
+
+ err = bind_evtchn_to_irqhandler(info->evtchn, xennet_interrupt,
+ IRQF_SAMPLE_RANDOM, netdev->name,
+ netdev);
+ if (err < 0)
+ goto fail;
+ netdev->irq = err;
+ return 0;
+
+ fail:
+ return err;
+}
+
+/* Common code used when first setting up, and when resuming. */
+static int talk_to_backend(struct xenbus_device *dev,
+ struct netfront_info *info)
+{
+ const char *message;
+ struct xenbus_transaction xbt;
+ int err;
+
+ /* Create shared ring, alloc event channel. */
+ err = setup_netfront(dev, info);
+ if (err)
+ goto out;
+
+again:
+ err = xenbus_transaction_start(&xbt);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "starting transaction");
+ goto destroy_ring;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u",
+ info->tx_ring_ref);
+ if (err) {
+ message = "writing tx ring-ref";
+ goto abort_transaction;
+ }
+ err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u",
+ info->rx_ring_ref);
+ if (err) {
+ message = "writing rx ring-ref";
+ goto abort_transaction;
+ }
+ err = xenbus_printf(xbt, dev->nodename,
+ "event-channel", "%u", info->evtchn);
+ if (err) {
+ message = "writing event-channel";
+ goto abort_transaction;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
+ 1);
+ if (err) {
+ message = "writing request-rx-copy";
+ goto abort_transaction;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
+ if (err) {
+ message = "writing feature-rx-notify";
+ goto abort_transaction;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
+ if (err) {
+ message = "writing feature-sg";
+ goto abort_transaction;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
+ if (err) {
+ message = "writing feature-gso-tcpv4";
+ goto abort_transaction;
+ }
+
+ err = xenbus_transaction_end(xbt, 0);
+ if (err) {
+ if (err == -EAGAIN)
+ goto again;
+ xenbus_dev_fatal(dev, err, "completing transaction");
+ goto destroy_ring;
+ }
+
+ return 0;
+
+ abort_transaction:
+ xenbus_transaction_end(xbt, 1);
+ xenbus_dev_fatal(dev, err, "%s", message);
+ destroy_ring:
+ xennet_disconnect_backend(info);
+ out:
+ return err;
+}
+
+static int xennet_set_sg(struct net_device *dev, u32 data)
+{
+ if (data) {
+ struct netfront_info *np = netdev_priv(dev);
+ int val;
+
+ if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
+ "%d", &val) < 0)
+ val = 0;
+ if (!val)
+ return -ENOSYS;
+ } else if (dev->mtu > ETH_DATA_LEN)
+ dev->mtu = ETH_DATA_LEN;
+
+ return ethtool_op_set_sg(dev, data);
+}
+
+static int xennet_set_tso(struct net_device *dev, u32 data)
+{
+ if (data) {
+ struct netfront_info *np = netdev_priv(dev);
+ int val;
+
+ if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
+ "feature-gso-tcpv4", "%d", &val) < 0)
+ val = 0;
+ if (!val)
+ return -ENOSYS;
+ }
+
+ return ethtool_op_set_tso(dev, data);
+}
+
+static void xennet_set_features(struct net_device *dev)
+{
+ /* Turn off all GSO bits except ROBUST. */
+ dev->features &= (1 << NETIF_F_GSO_SHIFT) - 1;
+ dev->features |= NETIF_F_GSO_ROBUST;
+ xennet_set_sg(dev, 0);
+
+ /* We need checksum offload to enable scatter/gather and TSO. */
+ if (!(dev->features & NETIF_F_IP_CSUM))
+ return;
+
+ if (!xennet_set_sg(dev, 1))
+ xennet_set_tso(dev, 1);
+}
+
+static int xennet_connect(struct net_device *dev)
+{
+ struct netfront_info *np = netdev_priv(dev);
+ int i, requeue_idx, err;
+ struct sk_buff *skb;
+ grant_ref_t ref;
+ struct xen_netif_rx_request *req;
+ unsigned int feature_rx_copy;
+
+ err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
+ "feature-rx-copy", "%u", &feature_rx_copy);
+ if (err != 1)
+ feature_rx_copy = 0;
+
+ if (!feature_rx_copy) {
+ dev_info(&dev->dev,
+ "backend does not support copying recieve path");
+ return -ENODEV;
+ }
+
+ err = talk_to_backend(np->xbdev, np);
+ if (err)
+ return err;
+
+ xennet_set_features(dev);
+
+ spin_lock_bh(&np->rx_lock);
+ spin_lock_irq(&np->tx_lock);
+
+ /* Step 1: Discard all pending TX packet fragments. */
+ xennet_release_tx_bufs(np);
+
+ /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
+ for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
+ if (!np->rx_skbs[i])
+ continue;
+
+ skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i);
+ ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
+ req = RING_GET_REQUEST(&np->rx, requeue_idx);
+
+ gnttab_grant_foreign_access_ref(
+ ref, np->xbdev->otherend_id,
+ pfn_to_mfn(page_to_pfn(skb_shinfo(skb)->
+ frags->page)),
+ 0);
+ req->gref = ref;
+ req->id = requeue_idx;
+
+ requeue_idx++;
+ }
+
+ np->rx.req_prod_pvt = requeue_idx;
+
+ /*
+ * Step 3: All public and private state should now be sane. Get
+ * ready to start sending and receiving packets and give the driver
+ * domain a kick because we've probably just requeued some
+ * packets.
+ */
+ netif_carrier_on(np->netdev);
+ notify_remote_via_irq(np->netdev->irq);
+ xennet_tx_buf_gc(dev);
+ xennet_alloc_rx_buffers(dev);
+
+ spin_unlock_irq(&np->tx_lock);
+ spin_unlock_bh(&np->rx_lock);
+
+ return 0;
+}
+
+/**
+ * Callback received when the backend's state changes.
+ */
+static void backend_changed(struct xenbus_device *dev,
+ enum xenbus_state backend_state)
+{
+ struct netfront_info *np = dev->dev.driver_data;
+ struct net_device *netdev = np->netdev;
+
+ dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
+
+ switch (backend_state) {
+ case XenbusStateInitialising:
+ case XenbusStateInitialised:
+ case XenbusStateConnected:
+ case XenbusStateUnknown:
+ case XenbusStateClosed:
+ break;
+
+ case XenbusStateInitWait:
+ if (dev->state != XenbusStateInitialising)
+ break;
+ if (xennet_connect(netdev) != 0)
+ break;
+ xenbus_switch_state(dev, XenbusStateConnected);
+ break;
+
+ case XenbusStateClosing:
+ xenbus_frontend_closed(dev);
+ break;
+ }
+}
+
+static struct ethtool_ops xennet_ethtool_ops =
+{
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = ethtool_op_set_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = xennet_set_sg,
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = xennet_set_tso,
+ .get_link = ethtool_op_get_link,
+};
+
+#ifdef CONFIG_SYSFS
+static ssize_t show_rxbuf_min(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *netdev = to_net_dev(dev);
+ struct netfront_info *info = netdev_priv(netdev);
+
+ return sprintf(buf, "%u\n", info->rx_min_target);
+}
+
+static ssize_t store_rxbuf_min(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct net_device *netdev = to_net_dev(dev);
+ struct netfront_info *np = netdev_priv(netdev);
+ char *endp;
+ unsigned long target;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ target = simple_strtoul(buf, &endp, 0);
+ if (endp == buf)
+ return -EBADMSG;
+
+ if (target < RX_MIN_TARGET)
+ target = RX_MIN_TARGET;
+ if (target > RX_MAX_TARGET)
+ target = RX_MAX_TARGET;
+
+ spin_lock_bh(&np->rx_lock);
+ if (target > np->rx_max_target)
+ np->rx_max_target = target;
+ np->rx_min_target = target;
+ if (target > np->rx_target)
+ np->rx_target = target;
+
+ xennet_alloc_rx_buffers(netdev);
+
+ spin_unlock_bh(&np->rx_lock);
+ return len;
+}
+
+static ssize_t show_rxbuf_max(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *netdev = to_net_dev(dev);
+ struct netfront_info *info = netdev_priv(netdev);
+
+ return sprintf(buf, "%u\n", info->rx_max_target);
+}
+
+static ssize_t store_rxbuf_max(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct net_device *netdev = to_net_dev(dev);
+ struct netfront_info *np = netdev_priv(netdev);
+ char *endp;
+ unsigned long target;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ target = simple_strtoul(buf, &endp, 0);
+ if (endp == buf)
+ return -EBADMSG;
+
+ if (target < RX_MIN_TARGET)
+ target = RX_MIN_TARGET;
+ if (target > RX_MAX_TARGET)
+ target = RX_MAX_TARGET;
+
+ spin_lock_bh(&np->rx_lock);
+ if (target < np->rx_min_target)
+ np->rx_min_target = target;
+ np->rx_max_target = target;
+ if (target < np->rx_target)
+ np->rx_target = target;
+
+ xennet_alloc_rx_buffers(netdev);
+
+ spin_unlock_bh(&np->rx_lock);
+ return len;
+}
+
+static ssize_t show_rxbuf_cur(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *netdev = to_net_dev(dev);
+ struct netfront_info *info = netdev_priv(netdev);
+
+ return sprintf(buf, "%u\n", info->rx_target);
+}
+
+static struct device_attribute xennet_attrs[] = {
+ __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min),
+ __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max),
+ __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL),
+};
+
+static int xennet_sysfs_addif(struct net_device *netdev)
+{
+ int i;
+ int err;
+
+ for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
+ err = device_create_file(&netdev->dev,
+ &xennet_attrs[i]);
+ if (err)
+ goto fail;
+ }
+ return 0;
+
+ fail:
+ while (--i >= 0)
+ device_remove_file(&netdev->dev, &xennet_attrs[i]);
+ return err;
+}
+
+static void xennet_sysfs_delif(struct net_device *netdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++)
+ device_remove_file(&netdev->dev, &xennet_attrs[i]);
+}
+
+#endif /* CONFIG_SYSFS */
+
+static struct xenbus_device_id netfront_ids[] = {
+ { "vif" },
+ { "" }
+};
+
+
+static int __devexit xennet_remove(struct xenbus_device *dev)
+{
+ struct netfront_info *info = dev->dev.driver_data;
+
+ dev_dbg(&dev->dev, "%s\n", dev->nodename);
+
+ unregister_netdev(info->netdev);
+
+ xennet_disconnect_backend(info);
+
+ del_timer_sync(&info->rx_refill_timer);
+
+ xennet_sysfs_delif(info->netdev);
+
+ free_netdev(info->netdev);
+
+ return 0;
+}
+
+static struct xenbus_driver netfront = {
+ .name = "vif",
+ .owner = THIS_MODULE,
+ .ids = netfront_ids,
+ .probe = netfront_probe,
+ .remove = __devexit_p(xennet_remove),
+ .resume = netfront_resume,
+ .otherend_changed = backend_changed,
+};
+
+static int __init netif_init(void)
+{
+ if (!is_running_on_xen())
+ return -ENODEV;
+
+ if (is_initial_xendomain())
+ return 0;
+
+ printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n");
+
+ return xenbus_register_frontend(&netfront);
+}
+module_init(netif_init);
+
+
+static void __exit netif_exit(void)
+{
+ if (is_initial_xendomain())
+ return;
+
+ return xenbus_unregister_driver(&netfront);
+}
+module_exit(netif_exit);
+
+MODULE_DESCRIPTION("Xen virtual network device frontend");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index 03baf1c64a2..ed112ee1601 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -147,7 +147,7 @@ static int pnp_dock_event(int dock, struct pnp_docking_station_info *info)
info->location_id, info->serial, info->capabilities);
envp[i] = NULL;
- value = call_usermodehelper (argv [0], argv, envp, 0);
+ value = call_usermodehelper (argv [0], argv, envp, UMH_WAIT_EXEC);
kfree (buf);
kfree (envp);
return 0;
diff --git a/drivers/sbus/char/bbc_envctrl.c b/drivers/sbus/char/bbc_envctrl.c
index a54e4140683..e821a155b65 100644
--- a/drivers/sbus/char/bbc_envctrl.c
+++ b/drivers/sbus/char/bbc_envctrl.c
@@ -7,6 +7,7 @@
#include <linux/kthread.h>
#include <linux/delay.h>
#include <linux/kmod.h>
+#include <linux/reboot.h>
#include <asm/oplib.h>
#include <asm/ebus.h>
@@ -170,8 +171,6 @@ static void get_current_temps(struct bbc_cpu_temperature *tp)
static void do_envctrl_shutdown(struct bbc_cpu_temperature *tp)
{
static int shutting_down = 0;
- static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
- char *argv[] = { "/sbin/shutdown", "-h", "now", NULL };
char *type = "???";
s8 val = -1;
@@ -195,7 +194,7 @@ static void do_envctrl_shutdown(struct bbc_cpu_temperature *tp)
printk(KERN_CRIT "kenvctrld: Shutting down the system now.\n");
shutting_down = 1;
- if (call_usermodehelper("/sbin/shutdown", argv, envp, 0) < 0)
+ if (orderly_poweroff(true) < 0)
printk(KERN_CRIT "envctrl: shutdown execution failed\n");
}
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c
index 8328acab47f..dadabef116b 100644
--- a/drivers/sbus/char/envctrl.c
+++ b/drivers/sbus/char/envctrl.c
@@ -26,6 +26,7 @@
#include <linux/ioport.h>
#include <linux/miscdevice.h>
#include <linux/kmod.h>
+#include <linux/reboot.h>
#include <asm/ebus.h>
#include <asm/uaccess.h>
@@ -966,10 +967,6 @@ static struct i2c_child_t *envctrl_get_i2c_child(unsigned char mon_type)
static void envctrl_do_shutdown(void)
{
static int inprog = 0;
- static char *envp[] = {
- "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
- char *argv[] = {
- "/sbin/shutdown", "-h", "now", NULL };
int ret;
if (inprog != 0)
@@ -977,7 +974,7 @@ static void envctrl_do_shutdown(void)
inprog = 1;
printk(KERN_CRIT "kenvctrld: WARNING: Shutting down the system now.\n");
- ret = call_usermodehelper("/sbin/shutdown", argv, envp, 0);
+ ret = orderly_poweroff(true);
if (ret < 0) {
printk(KERN_CRIT "kenvctrld: WARNING: system shutdown failed!\n");
inprog = 0; /* unlikely to succeed, but we could try again */
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
new file mode 100644
index 00000000000..56592f0d6ce
--- /dev/null
+++ b/drivers/xen/Makefile
@@ -0,0 +1,2 @@
+obj-y += grant-table.o
+obj-y += xenbus/
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
new file mode 100644
index 00000000000..ea94dbabf9a
--- /dev/null
+++ b/drivers/xen/grant-table.c
@@ -0,0 +1,582 @@
+/******************************************************************************
+ * grant_table.c
+ *
+ * Granting foreign access to our memory reservation.
+ *
+ * Copyright (c) 2005-2006, Christopher Clark
+ * Copyright (c) 2004-2005, K A Fraser
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/uaccess.h>
+
+#include <xen/interface/xen.h>
+#include <xen/page.h>
+#include <xen/grant_table.h>
+
+#include <asm/pgtable.h>
+#include <asm/sync_bitops.h>
+
+
+/* External tools reserve first few grant table entries. */
+#define NR_RESERVED_ENTRIES 8
+#define GNTTAB_LIST_END 0xffffffff
+#define GREFS_PER_GRANT_FRAME (PAGE_SIZE / sizeof(struct grant_entry))
+
+static grant_ref_t **gnttab_list;
+static unsigned int nr_grant_frames;
+static unsigned int boot_max_nr_grant_frames;
+static int gnttab_free_count;
+static grant_ref_t gnttab_free_head;
+static DEFINE_SPINLOCK(gnttab_list_lock);
+
+static struct grant_entry *shared;
+
+static struct gnttab_free_callback *gnttab_free_callback_list;
+
+static int gnttab_expand(unsigned int req_entries);
+
+#define RPP (PAGE_SIZE / sizeof(grant_ref_t))
+
+static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
+{
+ return &gnttab_list[(entry) / RPP][(entry) % RPP];
+}
+/* This can be used as an l-value */
+#define gnttab_entry(entry) (*__gnttab_entry(entry))
+
+static int get_free_entries(unsigned count)
+{
+ unsigned long flags;
+ int ref, rc;
+ grant_ref_t head;
+
+ spin_lock_irqsave(&gnttab_list_lock, flags);
+
+ if ((gnttab_free_count < count) &&
+ ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
+ spin_unlock_irqrestore(&gnttab_list_lock, flags);
+ return rc;
+ }
+
+ ref = head = gnttab_free_head;
+ gnttab_free_count -= count;
+ while (count-- > 1)
+ head = gnttab_entry(head);
+ gnttab_free_head = gnttab_entry(head);
+ gnttab_entry(head) = GNTTAB_LIST_END;
+
+ spin_unlock_irqrestore(&gnttab_list_lock, flags);
+
+ return ref;
+}
+
+static void do_free_callbacks(void)
+{
+ struct gnttab_free_callback *callback, *next;
+
+ callback = gnttab_free_callback_list;
+ gnttab_free_callback_list = NULL;
+
+ while (callback != NULL) {
+ next = callback->next;
+ if (gnttab_free_count >= callback->count) {
+ callback->next = NULL;
+ callback->fn(callback->arg);
+ } else {
+ callback->next = gnttab_free_callback_list;
+ gnttab_free_callback_list = callback;
+ }
+ callback = next;
+ }
+}
+
+static inline void check_free_callbacks(void)
+{
+ if (unlikely(gnttab_free_callback_list))
+ do_free_callbacks();
+}
+
+static void put_free_entry(grant_ref_t ref)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&gnttab_list_lock, flags);
+ gnttab_entry(ref) = gnttab_free_head;
+ gnttab_free_head = ref;
+ gnttab_free_count++;
+ check_free_callbacks();
+ spin_unlock_irqrestore(&gnttab_list_lock, flags);
+}
+
+static void update_grant_entry(grant_ref_t ref, domid_t domid,
+ unsigned long frame, unsigned flags)
+{
+ /*
+ * Introducing a valid entry into the grant table:
+ * 1. Write ent->domid.
+ * 2. Write ent->frame:
+ * GTF_permit_access: Frame to which access is permitted.
+ * GTF_accept_transfer: Pseudo-phys frame slot being filled by new
+ * frame, or zero if none.
+ * 3. Write memory barrier (WMB).
+ * 4. Write ent->flags, inc. valid type.
+ */
+ shared[ref].frame = frame;
+ shared[ref].domid = domid;
+ wmb();
+ shared[ref].flags = flags;
+}
+
+/*
+ * Public grant-issuing interface functions
+ */
+void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
+ unsigned long frame, int readonly)
+{
+ update_grant_entry(ref, domid, frame,
+ GTF_permit_access | (readonly ? GTF_readonly : 0));
+}
+EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
+
+int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
+ int readonly)
+{
+ int ref;
+
+ ref = get_free_entries(1);
+ if (unlikely(ref < 0))
+ return -ENOSPC;
+
+ gnttab_grant_foreign_access_ref(ref, domid, frame, readonly);
+
+ return ref;
+}
+EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
+
+int gnttab_query_foreign_access(grant_ref_t ref)
+{
+ u16 nflags;
+
+ nflags = shared[ref].flags;
+
+ return (nflags & (GTF_reading|GTF_writing));
+}
+EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
+
+int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
+{
+ u16 flags, nflags;
+
+ nflags = shared[ref].flags;
+ do {
+ flags = nflags;
+ if (flags & (GTF_reading|GTF_writing)) {
+ printk(KERN_ALERT "WARNING: g.e. still in use!\n");
+ return 0;
+ }
+ } while ((nflags = sync_cmpxchg(&shared[ref].flags, flags, 0)) != flags);
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
+
+void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
+ unsigned long page)
+{
+ if (gnttab_end_foreign_access_ref(ref, readonly)) {
+ put_free_entry(ref);
+ if (page != 0)
+ free_page(page);
+ } else {
+ /* XXX This needs to be fixed so that the ref and page are
+ placed on a list to be freed up later. */
+ printk(KERN_WARNING
+ "WARNING: leaking g.e. and page still in use!\n");
+ }
+}
+EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
+
+int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn)
+{
+ int ref;
+
+ ref = get_free_entries(1);
+ if (unlikely(ref < 0))
+ return -ENOSPC;
+ gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
+
+ return ref;
+}
+EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer);
+
+void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
+ unsigned long pfn)
+{
+ update_grant_entry(ref, domid, pfn, GTF_accept_transfer);
+}
+EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref);
+
+unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
+{
+ unsigned long frame;
+ u16 flags;
+
+ /*
+ * If a transfer is not even yet started, try to reclaim the grant
+ * reference and return failure (== 0).
+ */
+ while (!((flags = shared[ref].flags) & GTF_transfer_committed)) {
+ if (sync_cmpxchg(&shared[ref].flags, flags, 0) == flags)
+ return 0;
+ cpu_relax();
+ }
+
+ /* If a transfer is in progress then wait until it is completed. */
+ while (!(flags & GTF_transfer_completed)) {
+ flags = shared[ref].flags;
+ cpu_relax();
+ }
+
+ rmb(); /* Read the frame number /after/ reading completion status. */
+ frame = shared[ref].frame;
+ BUG_ON(frame == 0);
+
+ return frame;
+}
+EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref);
+
+unsigned long gnttab_end_foreign_transfer(grant_ref_t ref)
+{
+ unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
+ put_free_entry(ref);
+ return frame;
+}
+EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer);
+
+void gnttab_free_grant_reference(grant_ref_t ref)
+{
+ put_free_entry(ref);
+}
+EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
+
+void gnttab_free_grant_references(grant_ref_t head)
+{
+ grant_ref_t ref;
+ unsigned long flags;
+ int count = 1;
+ if (head == GNTTAB_LIST_END)
+ return;
+ spin_lock_irqsave(&gnttab_list_lock, flags);
+ ref = head;
+ while (gnttab_entry(ref) != GNTTAB_LIST_END) {
+ ref = gnttab_entry(ref);
+ count++;
+ }
+ gnttab_entry(ref) = gnttab_free_head;
+ gnttab_free_head = head;
+ gnttab_free_count += count;
+ check_free_callbacks();
+ spin_unlock_irqrestore(&gnttab_list_lock, flags);
+}
+EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
+
+int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
+{
+ int h = get_free_entries(count);
+
+ if (h < 0)
+ return -ENOSPC;
+
+ *head = h;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
+
+int gnttab_empty_grant_references(const grant_ref_t *private_head)
+{
+ return (*private_head == GNTTAB_LIST_END);
+}
+EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
+
+int gnttab_claim_grant_reference(grant_ref_t *private_head)
+{
+ grant_ref_t g = *private_head;
+ if (unlikely(g == GNTTAB_LIST_END))
+ return -ENOSPC;
+ *private_head = gnttab_entry(g);
+ return g;
+}
+EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
+
+void gnttab_release_grant_reference(grant_ref_t *private_head,
+ grant_ref_t release)
+{
+ gnttab_entry(release) = *private_head;
+ *private_head = release;
+}
+EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
+
+void gnttab_request_free_callback(struct gnttab_free_callback *callback,
+ void (*fn)(void *), void *arg, u16 count)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&gnttab_list_lock, flags);
+ if (callback->next)
+ goto out;
+ callback->fn = fn;
+ callback->arg = arg;
+ callback->count = count;
+ callback->next = gnttab_free_callback_list;
+ gnttab_free_callback_list = callback;
+ check_free_callbacks();
+out:
+ spin_unlock_irqrestore(&gnttab_list_lock, flags);
+}
+EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
+
+void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
+{
+ struct gnttab_free_callback **pcb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gnttab_list_lock, flags);
+ for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
+ if (*pcb == callback) {
+ *pcb = callback->next;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&gnttab_list_lock, flags);
+}
+EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
+
+static int grow_gnttab_list(unsigned int more_frames)
+{
+ unsigned int new_nr_grant_frames, extra_entries, i;
+
+ new_nr_grant_frames = nr_grant_frames + more_frames;
+ extra_entries = more_frames * GREFS_PER_GRANT_FRAME;
+
+ for (i = nr_grant_frames; i < new_nr_grant_frames; i++) {
+ gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
+ if (!gnttab_list[i])
+ goto grow_nomem;
+ }
+
+
+ for (i = GREFS_PER_GRANT_FRAME * nr_grant_frames;
+ i < GREFS_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++)
+ gnttab_entry(i) = i + 1;
+
+ gnttab_entry(i) = gnttab_free_head;
+ gnttab_free_head = GREFS_PER_GRANT_FRAME * nr_grant_frames;
+ gnttab_free_count += extra_entries;
+
+ nr_grant_frames = new_nr_grant_frames;
+
+ check_free_callbacks();
+
+ return 0;
+
+grow_nomem:
+ for ( ; i >= nr_grant_frames; i--)
+ free_page((unsigned long) gnttab_list[i]);
+ return -ENOMEM;
+}
+
+static unsigned int __max_nr_grant_frames(void)
+{
+ struct gnttab_query_size query;
+ int rc;
+
+ query.dom = DOMID_SELF;
+
+ rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
+ if ((rc < 0) || (query.status != GNTST_okay))
+ return 4; /* Legacy max supported number of frames */
+
+ return query.max_nr_frames;
+}
+
+static inline unsigned int max_nr_grant_frames(void)
+{
+ unsigned int xen_max = __max_nr_grant_frames();
+
+ if (xen_max > boot_max_nr_grant_frames)
+ return boot_max_nr_grant_frames;
+ return xen_max;
+}
+
+static int map_pte_fn(pte_t *pte, struct page *pmd_page,
+ unsigned long addr, void *data)
+{
+ unsigned long **frames = (unsigned long **)data;
+
+ set_pte_at(&init_mm, addr, pte, mfn_pte((*frames)[0], PAGE_KERNEL));
+ (*frames)++;
+ return 0;
+}
+
+static int unmap_pte_fn(pte_t *pte, struct page *pmd_page,
+ unsigned long addr, void *data)
+{
+
+ set_pte_at(&init_mm, addr, pte, __pte(0));
+ return 0;
+}
+
+static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
+{
+ struct gnttab_setup_table setup;
+ unsigned long *frames;
+ unsigned int nr_gframes = end_idx + 1;
+ int rc;
+
+ frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC);
+ if (!frames)
+ return -ENOMEM;
+
+ setup.dom = DOMID_SELF;
+ setup.nr_frames = nr_gframes;
+ setup.frame_list = frames;
+
+ rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
+ if (rc == -ENOSYS) {
+ kfree(frames);
+ return -ENOSYS;
+ }
+
+ BUG_ON(rc || setup.status);
+
+ if (shared == NULL) {
+ struct vm_struct *area;
+ area = alloc_vm_area(PAGE_SIZE * max_nr_grant_frames());
+ BUG_ON(area == NULL);
+ shared = area->addr;
+ }
+ rc = apply_to_page_range(&init_mm, (unsigned long)shared,
+ PAGE_SIZE * nr_gframes,
+ map_pte_fn, &frames);
+ BUG_ON(rc);
+ frames -= nr_gframes; /* adjust after map_pte_fn() */
+
+ kfree(frames);
+
+ return 0;
+}
+
+static int gnttab_resume(void)
+{
+ if (max_nr_grant_frames() < nr_grant_frames)
+ return -ENOSYS;
+ return gnttab_map(0, nr_grant_frames - 1);
+}
+
+static int gnttab_suspend(void)
+{
+ apply_to_page_range(&init_mm, (unsigned long)shared,
+ PAGE_SIZE * nr_grant_frames,
+ unmap_pte_fn, NULL);
+
+ return 0;
+}
+
+static int gnttab_expand(unsigned int req_entries)
+{
+ int rc;
+ unsigned int cur, extra;
+
+ cur = nr_grant_frames;
+ extra = ((req_entries + (GREFS_PER_GRANT_FRAME-1)) /
+ GREFS_PER_GRANT_FRAME);
+ if (cur + extra > max_nr_grant_frames())
+ return -ENOSPC;
+
+ rc = gnttab_map(cur, cur + extra - 1);
+ if (rc == 0)
+ rc = grow_gnttab_list(extra);
+
+ return rc;
+}
+
+static int __devinit gnttab_init(void)
+{
+ int i;
+ unsigned int max_nr_glist_frames;
+ unsigned int nr_init_grefs;
+
+ if (!is_running_on_xen())
+ return -ENODEV;
+
+ nr_grant_frames = 1;
+ boot_max_nr_grant_frames = __max_nr_grant_frames();
+
+ /* Determine the maximum number of frames required for the
+ * grant reference free list on the current hypervisor.
+ */
+ max_nr_glist_frames = (boot_max_nr_grant_frames *
+ GREFS_PER_GRANT_FRAME /
+ (PAGE_SIZE / sizeof(grant_ref_t)));
+
+ gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
+ GFP_KERNEL);
+ if (gnttab_list == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < nr_grant_frames; i++) {
+ gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
+ if (gnttab_list[i] == NULL)
+ goto ini_nomem;
+ }
+
+ if (gnttab_resume() < 0)
+ return -ENODEV;
+
+ nr_init_grefs = nr_grant_frames * GREFS_PER_GRANT_FRAME;
+
+ for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
+ gnttab_entry(i) = i + 1;
+
+ gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
+ gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES;
+ gnttab_free_head = NR_RESERVED_ENTRIES;
+
+ printk("Grant table initialized\n");
+ return 0;
+
+ ini_nomem:
+ for (i--; i >= 0; i--)
+ free_page((unsigned long)gnttab_list[i]);
+ kfree(gnttab_list);
+ return -ENOMEM;
+}
+
+core_initcall(gnttab_init);
diff --git a/drivers/xen/xenbus/Makefile b/drivers/xen/xenbus/Makefile
new file mode 100644
index 00000000000..5571f5b8422
--- /dev/null
+++ b/drivers/xen/xenbus/Makefile
@@ -0,0 +1,7 @@
+obj-y += xenbus.o
+
+xenbus-objs =
+xenbus-objs += xenbus_client.o
+xenbus-objs += xenbus_comms.o
+xenbus-objs += xenbus_xs.o
+xenbus-objs += xenbus_probe.o
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
new file mode 100644
index 00000000000..9fd2f70ab46
--- /dev/null
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -0,0 +1,569 @@
+/******************************************************************************
+ * Client-facing interface for the Xenbus driver. In other words, the
+ * interface between the Xenbus and the device-specific code, be it the
+ * frontend or the backend of that driver.
+ *
+ * Copyright (C) 2005 XenSource Ltd
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <asm/xen/hypervisor.h>
+#include <xen/interface/xen.h>
+#include <xen/interface/event_channel.h>
+#include <xen/events.h>
+#include <xen/grant_table.h>
+#include <xen/xenbus.h>
+
+const char *xenbus_strstate(enum xenbus_state state)
+{
+ static const char *const name[] = {
+ [ XenbusStateUnknown ] = "Unknown",
+ [ XenbusStateInitialising ] = "Initialising",
+ [ XenbusStateInitWait ] = "InitWait",
+ [ XenbusStateInitialised ] = "Initialised",
+ [ XenbusStateConnected ] = "Connected",
+ [ XenbusStateClosing ] = "Closing",
+ [ XenbusStateClosed ] = "Closed",
+ };
+ return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID";
+}
+EXPORT_SYMBOL_GPL(xenbus_strstate);
+
+/**
+ * xenbus_watch_path - register a watch
+ * @dev: xenbus device
+ * @path: path to watch
+ * @watch: watch to register
+ * @callback: callback to register
+ *
+ * Register a @watch on the given path, using the given xenbus_watch structure
+ * for storage, and the given @callback function as the callback. Return 0 on
+ * success, or -errno on error. On success, the given @path will be saved as
+ * @watch->node, and remains the caller's to free. On error, @watch->node will
+ * be NULL, the device will switch to %XenbusStateClosing, and the error will
+ * be saved in the store.
+ */
+int xenbus_watch_path(struct xenbus_device *dev, const char *path,
+ struct xenbus_watch *watch,
+ void (*callback)(struct xenbus_watch *,
+ const char **, unsigned int))
+{
+ int err;
+
+ watch->node = path;
+ watch->callback = callback;
+
+ err = register_xenbus_watch(watch);
+
+ if (err) {
+ watch->node = NULL;
+ watch->callback = NULL;
+ xenbus_dev_fatal(dev, err, "adding watch on %s", path);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(xenbus_watch_path);
+
+
+/**
+ * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path
+ * @dev: xenbus device
+ * @watch: watch to register
+ * @callback: callback to register
+ * @pathfmt: format of path to watch
+ *
+ * Register a watch on the given @path, using the given xenbus_watch
+ * structure for storage, and the given @callback function as the callback.
+ * Return 0 on success, or -errno on error. On success, the watched path
+ * (@path/@path2) will be saved as @watch->node, and becomes the caller's to
+ * kfree(). On error, watch->node will be NULL, so the caller has nothing to
+ * free, the device will switch to %XenbusStateClosing, and the error will be
+ * saved in the store.
+ */
+int xenbus_watch_pathfmt(struct xenbus_device *dev,
+ struct xenbus_watch *watch,
+ void (*callback)(struct xenbus_watch *,
+ const char **, unsigned int),
+ const char *pathfmt, ...)
+{
+ int err;
+ va_list ap;
+ char *path;
+
+ va_start(ap, pathfmt);
+ path = kvasprintf(GFP_KERNEL, pathfmt, ap);
+ va_end(ap);
+
+ if (!path) {
+ xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
+ return -ENOMEM;
+ }
+ err = xenbus_watch_path(dev, path, watch, callback);
+
+ if (err)
+ kfree(path);
+ return err;
+}
+EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt);
+
+
+/**
+ * xenbus_switch_state
+ * @dev: xenbus device
+ * @xbt: transaction handle
+ * @state: new state
+ *
+ * Advertise in the store a change of the given driver to the given new_state.
+ * Return 0 on success, or -errno on error. On error, the device will switch
+ * to XenbusStateClosing, and the error will be saved in the store.
+ */
+int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
+{
+ /* We check whether the state is currently set to the given value, and
+ if not, then the state is set. We don't want to unconditionally
+ write the given state, because we don't want to fire watches
+ unnecessarily. Furthermore, if the node has gone, we don't write
+ to it, as the device will be tearing down, and we don't want to
+ resurrect that directory.
+
+ Note that, because of this cached value of our state, this function
+ will not work inside a Xenstore transaction (something it was
+ trying to in the past) because dev->state would not get reset if
+ the transaction was aborted.
+
+ */
+
+ int current_state;
+ int err;
+
+ if (state == dev->state)
+ return 0;
+
+ err = xenbus_scanf(XBT_NIL, dev->nodename, "state", "%d",
+ &current_state);
+ if (err != 1)
+ return 0;
+
+ err = xenbus_printf(XBT_NIL, dev->nodename, "state", "%d", state);
+ if (err) {
+ if (state != XenbusStateClosing) /* Avoid looping */
+ xenbus_dev_fatal(dev, err, "writing new state");
+ return err;
+ }
+
+ dev->state = state;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xenbus_switch_state);
+
+int xenbus_frontend_closed(struct xenbus_device *dev)
+{
+ xenbus_switch_state(dev, XenbusStateClosed);
+ complete(&dev->down);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xenbus_frontend_closed);
+
+/**
+ * Return the path to the error node for the given device, or NULL on failure.
+ * If the value returned is non-NULL, then it is the caller's to kfree.
+ */
+static char *error_path(struct xenbus_device *dev)
+{
+ return kasprintf(GFP_KERNEL, "error/%s", dev->nodename);
+}
+
+
+static void xenbus_va_dev_error(struct xenbus_device *dev, int err,
+ const char *fmt, va_list ap)
+{
+ int ret;
+ unsigned int len;
+ char *printf_buffer = NULL;
+ char *path_buffer = NULL;
+
+#define PRINTF_BUFFER_SIZE 4096
+ printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
+ if (printf_buffer == NULL)
+ goto fail;
+
+ len = sprintf(printf_buffer, "%i ", -err);
+ ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap);
+
+ BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1);
+
+ dev_err(&dev->dev, "%s\n", printf_buffer);
+
+ path_buffer = error_path(dev);
+
+ if (path_buffer == NULL) {
+ dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
+ dev->nodename, printf_buffer);
+ goto fail;
+ }
+
+ if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) {
+ dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
+ dev->nodename, printf_buffer);
+ goto fail;
+ }
+
+fail:
+ kfree(printf_buffer);
+ kfree(path_buffer);
+}
+
+
+/**
+ * xenbus_dev_error
+ * @dev: xenbus device
+ * @err: error to report
+ * @fmt: error message format
+ *
+ * Report the given negative errno into the store, along with the given
+ * formatted message.
+ */
+void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ xenbus_va_dev_error(dev, err, fmt, ap);
+ va_end(ap);
+}
+EXPORT_SYMBOL_GPL(xenbus_dev_error);
+
+/**
+ * xenbus_dev_fatal
+ * @dev: xenbus device
+ * @err: error to report
+ * @fmt: error message format
+ *
+ * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
+ * xenbus_switch_state(dev, NULL, XenbusStateClosing) to schedule an orderly
+ * closedown of this driver and its peer.
+ */
+
+void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ xenbus_va_dev_error(dev, err, fmt, ap);
+ va_end(ap);
+
+ xenbus_switch_state(dev, XenbusStateClosing);
+}
+EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
+
+/**
+ * xenbus_grant_ring
+ * @dev: xenbus device
+ * @ring_mfn: mfn of ring to grant
+
+ * Grant access to the given @ring_mfn to the peer of the given device. Return
+ * 0 on success, or -errno on error. On error, the device will switch to
+ * XenbusStateClosing, and the error will be saved in the store.
+ */
+int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn)
+{
+ int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0);
+ if (err < 0)
+ xenbus_dev_fatal(dev, err, "granting access to ring page");
+ return err;
+}
+EXPORT_SYMBOL_GPL(xenbus_grant_ring);
+
+
+/**
+ * Allocate an event channel for the given xenbus_device, assigning the newly
+ * created local port to *port. Return 0 on success, or -errno on error. On
+ * error, the device will switch to XenbusStateClosing, and the error will be
+ * saved in the store.
+ */
+int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
+{
+ struct evtchn_alloc_unbound alloc_unbound;
+ int err;
+
+ alloc_unbound.dom = DOMID_SELF;
+ alloc_unbound.remote_dom = dev->otherend_id;
+
+ err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
+ &alloc_unbound);
+ if (err)
+ xenbus_dev_fatal(dev, err, "allocating event channel");
+ else
+ *port = alloc_unbound.port;
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
+
+
+/**
+ * Bind to an existing interdomain event channel in another domain. Returns 0
+ * on success and stores the local port in *port. On error, returns -errno,
+ * switches the device to XenbusStateClosing, and saves the error in XenStore.
+ */
+int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port)
+{
+ struct evtchn_bind_interdomain bind_interdomain;
+ int err;
+
+ bind_interdomain.remote_dom = dev->otherend_id;
+ bind_interdomain.remote_port = remote_port;
+
+ err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
+ &bind_interdomain);
+ if (err)
+ xenbus_dev_fatal(dev, err,
+ "binding to event channel %d from domain %d",
+ remote_port, dev->otherend_id);
+ else
+ *port = bind_interdomain.local_port;
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(xenbus_bind_evtchn);
+
+
+/**
+ * Free an existing event channel. Returns 0 on success or -errno on error.
+ */
+int xenbus_free_evtchn(struct xenbus_device *dev, int port)
+{
+ struct evtchn_close close;
+ int err;
+
+ close.port = port;
+
+ err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
+ if (err)
+ xenbus_dev_error(dev, err, "freeing event channel %d", port);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
+
+
+/**
+ * xenbus_map_ring_valloc
+ * @dev: xenbus device
+ * @gnt_ref: grant reference
+ * @vaddr: pointer to address to be filled out by mapping
+ *
+ * Based on Rusty Russell's skeleton driver's map_page.
+ * Map a page of memory into this domain from another domain's grant table.
+ * xenbus_map_ring_valloc allocates a page of virtual address space, maps the
+ * page to that address, and sets *vaddr to that address.
+ * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
+ * or -ENOMEM on error. If an error is returned, device will switch to
+ * XenbusStateClosing and the error message will be saved in XenStore.
+ */
+int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
+{
+ struct gnttab_map_grant_ref op = {
+ .flags = GNTMAP_host_map,
+ .ref = gnt_ref,
+ .dom = dev->otherend_id,
+ };
+ struct vm_struct *area;
+
+ *vaddr = NULL;
+
+ area = alloc_vm_area(PAGE_SIZE);
+ if (!area)
+ return -ENOMEM;
+
+ op.host_addr = (unsigned long)area->addr;
+
+ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
+ BUG();
+
+ if (op.status != GNTST_okay) {
+ free_vm_area(area);
+ xenbus_dev_fatal(dev, op.status,
+ "mapping in shared page %d from domain %d",
+ gnt_ref, dev->otherend_id);
+ return op.status;
+ }
+
+ /* Stuff the handle in an unused field */
+ area->phys_addr = (unsigned long)op.handle;
+
+ *vaddr = area->addr;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
+
+
+/**
+ * xenbus_map_ring
+ * @dev: xenbus device
+ * @gnt_ref: grant reference
+ * @handle: pointer to grant handle to be filled
+ * @vaddr: address to be mapped to
+ *
+ * Map a page of memory into this domain from another domain's grant table.
+ * xenbus_map_ring does not allocate the virtual address space (you must do
+ * this yourself!). It only maps in the page to the specified address.
+ * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
+ * or -ENOMEM on error. If an error is returned, device will switch to
+ * XenbusStateClosing and the error message will be saved in XenStore.
+ */
+int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
+ grant_handle_t *handle, void *vaddr)
+{
+ struct gnttab_map_grant_ref op = {
+ .host_addr = (unsigned long)vaddr,
+ .flags = GNTMAP_host_map,
+ .ref = gnt_ref,
+ .dom = dev->otherend_id,
+ };
+
+ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
+ BUG();
+
+ if (op.status != GNTST_okay) {
+ xenbus_dev_fatal(dev, op.status,
+ "mapping in shared page %d from domain %d",
+ gnt_ref, dev->otherend_id);
+ } else
+ *handle = op.handle;
+
+ return op.status;
+}
+EXPORT_SYMBOL_GPL(xenbus_map_ring);
+
+
+/**
+ * xenbus_unmap_ring_vfree
+ * @dev: xenbus device
+ * @vaddr: addr to unmap
+ *
+ * Based on Rusty Russell's skeleton driver's unmap_page.
+ * Unmap a page of memory in this domain that was imported from another domain.
+ * Use xenbus_unmap_ring_vfree if you mapped in your memory with
+ * xenbus_map_ring_valloc (it will free the virtual address space).
+ * Returns 0 on success and returns GNTST_* on error
+ * (see xen/include/interface/grant_table.h).
+ */
+int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
+{
+ struct vm_struct *area;
+ struct gnttab_unmap_grant_ref op = {
+ .host_addr = (unsigned long)vaddr,
+ };
+
+ /* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr)
+ * method so that we don't have to muck with vmalloc internals here.
+ * We could force the user to hang on to their struct vm_struct from
+ * xenbus_map_ring_valloc, but these 6 lines considerably simplify
+ * this API.
+ */
+ read_lock(&vmlist_lock);
+ for (area = vmlist; area != NULL; area = area->next) {
+ if (area->addr == vaddr)
+ break;
+ }
+ read_unlock(&vmlist_lock);
+
+ if (!area) {
+ xenbus_dev_error(dev, -ENOENT,
+ "can't find mapped virtual address %p", vaddr);
+ return GNTST_bad_virt_addr;
+ }
+
+ op.handle = (grant_handle_t)area->phys_addr;
+
+ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
+ BUG();
+
+ if (op.status == GNTST_okay)
+ free_vm_area(area);
+ else
+ xenbus_dev_error(dev, op.status,
+ "unmapping page at handle %d error %d",
+ (int16_t)area->phys_addr, op.status);
+
+ return op.status;
+}
+EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
+
+
+/**
+ * xenbus_unmap_ring
+ * @dev: xenbus device
+ * @handle: grant handle
+ * @vaddr: addr to unmap
+ *
+ * Unmap a page of memory in this domain that was imported from another domain.
+ * Returns 0 on success and returns GNTST_* on error
+ * (see xen/include/interface/grant_table.h).
+ */
+int xenbus_unmap_ring(struct xenbus_device *dev,
+ grant_handle_t handle, void *vaddr)
+{
+ struct gnttab_unmap_grant_ref op = {
+ .host_addr = (unsigned long)vaddr,
+ .handle = handle,
+ };
+
+ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
+ BUG();
+
+ if (op.status != GNTST_okay)
+ xenbus_dev_error(dev, op.status,
+ "unmapping page at handle %d error %d",
+ handle, op.status);
+
+ return op.status;
+}
+EXPORT_SYMBOL_GPL(xenbus_unmap_ring);
+
+
+/**
+ * xenbus_read_driver_state
+ * @path: path for driver
+ *
+ * Return the state of the driver rooted at the given store path, or
+ * XenbusStateUnknown if no state can be read.
+ */
+enum xenbus_state xenbus_read_driver_state(const char *path)
+{
+ enum xenbus_state result;
+ int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL);
+ if (err)
+ result = XenbusStateUnknown;
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c
new file mode 100644
index 00000000000..6efbe3f29ca
--- /dev/null
+++ b/drivers/xen/xenbus/xenbus_comms.c
@@ -0,0 +1,233 @@
+/******************************************************************************
+ * xenbus_comms.c
+ *
+ * Low level code to talks to Xen Store: ringbuffer and event channel.
+ *
+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/wait.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <xen/xenbus.h>
+#include <asm/xen/hypervisor.h>
+#include <xen/events.h>
+#include <xen/page.h>
+#include "xenbus_comms.h"
+
+static int xenbus_irq;
+
+static DECLARE_WORK(probe_work, xenbus_probe);
+
+static DECLARE_WAIT_QUEUE_HEAD(xb_waitq);
+
+static irqreturn_t wake_waiting(int irq, void *unused)
+{
+ if (unlikely(xenstored_ready == 0)) {
+ xenstored_ready = 1;
+ schedule_work(&probe_work);
+ }
+
+ wake_up(&xb_waitq);
+ return IRQ_HANDLED;
+}
+
+static int check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod)
+{
+ return ((prod - cons) <= XENSTORE_RING_SIZE);
+}
+
+static void *get_output_chunk(XENSTORE_RING_IDX cons,
+ XENSTORE_RING_IDX prod,
+ char *buf, uint32_t *len)
+{
+ *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod);
+ if ((XENSTORE_RING_SIZE - (prod - cons)) < *len)
+ *len = XENSTORE_RING_SIZE - (prod - cons);
+ return buf + MASK_XENSTORE_IDX(prod);
+}
+
+static const void *get_input_chunk(XENSTORE_RING_IDX cons,
+ XENSTORE_RING_IDX prod,
+ const char *buf, uint32_t *len)
+{
+ *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons);
+ if ((prod - cons) < *len)
+ *len = prod - cons;
+ return buf + MASK_XENSTORE_IDX(cons);
+}
+
+/**
+ * xb_write - low level write
+ * @data: buffer to send
+ * @len: length of buffer
+ *
+ * Returns 0 on success, error otherwise.
+ */
+int xb_write(const void *data, unsigned len)
+{
+ struct xenstore_domain_interface *intf = xen_store_interface;
+ XENSTORE_RING_IDX cons, prod;
+ int rc;
+
+ while (len != 0) {
+ void *dst;
+ unsigned int avail;
+
+ rc = wait_event_interruptible(
+ xb_waitq,
+ (intf->req_prod - intf->req_cons) !=
+ XENSTORE_RING_SIZE);
+ if (rc < 0)
+ return rc;
+
+ /* Read indexes, then verify. */
+ cons = intf->req_cons;
+ prod = intf->req_prod;
+ if (!check_indexes(cons, prod)) {
+ intf->req_cons = intf->req_prod = 0;
+ return -EIO;
+ }
+
+ dst = get_output_chunk(cons, prod, intf->req, &avail);
+ if (avail == 0)
+ continue;
+ if (avail > len)
+ avail = len;
+
+ /* Must write data /after/ reading the consumer index. */
+ mb();
+
+ memcpy(dst, data, avail);
+ data += avail;
+ len -= avail;
+
+ /* Other side must not see new producer until data is there. */
+ wmb();
+ intf->req_prod += avail;
+
+ /* Implies mb(): other side will see the updated producer. */
+ notify_remote_via_evtchn(xen_store_evtchn);
+ }
+
+ return 0;
+}
+
+int xb_data_to_read(void)
+{
+ struct xenstore_domain_interface *intf = xen_store_interface;
+ return (intf->rsp_cons != intf->rsp_prod);
+}
+
+int xb_wait_for_data_to_read(void)
+{
+ return wait_event_interruptible(xb_waitq, xb_data_to_read());
+}
+
+int xb_read(void *data, unsigned len)
+{
+ struct xenstore_domain_interface *intf = xen_store_interface;
+ XENSTORE_RING_IDX cons, prod;
+ int rc;
+
+ while (len != 0) {
+ unsigned int avail;
+ const char *src;
+
+ rc = xb_wait_for_data_to_read();
+ if (rc < 0)
+ return rc;
+
+ /* Read indexes, then verify. */
+ cons = intf->rsp_cons;
+ prod = intf->rsp_prod;
+ if (!check_indexes(cons, prod)) {
+ intf->rsp_cons = intf->rsp_prod = 0;
+ return -EIO;
+ }
+
+ src = get_input_chunk(cons, prod, intf->rsp, &avail);
+ if (avail == 0)
+ continue;
+ if (avail > len)
+ avail = len;
+
+ /* Must read data /after/ reading the producer index. */
+ rmb();
+
+ memcpy(data, src, avail);
+ data += avail;
+ len -= avail;
+
+ /* Other side must not see free space until we've copied out */
+ mb();
+ intf->rsp_cons += avail;
+
+ pr_debug("Finished read of %i bytes (%i to go)\n", avail, len);
+
+ /* Implies mb(): other side will see the updated consumer. */
+ notify_remote_via_evtchn(xen_store_evtchn);
+ }
+
+ return 0;
+}
+
+/**
+ * xb_init_comms - Set up interrupt handler off store event channel.
+ */
+int xb_init_comms(void)
+{
+ struct xenstore_domain_interface *intf = xen_store_interface;
+ int err;
+
+ if (intf->req_prod != intf->req_cons)
+ printk(KERN_ERR "XENBUS request ring is not quiescent "
+ "(%08x:%08x)!\n", intf->req_cons, intf->req_prod);
+
+ if (intf->rsp_prod != intf->rsp_cons) {
+ printk(KERN_WARNING "XENBUS response ring is not quiescent "
+ "(%08x:%08x): fixing up\n",
+ intf->rsp_cons, intf->rsp_prod);
+ intf->rsp_cons = intf->rsp_prod;
+ }
+
+ if (xenbus_irq)
+ unbind_from_irqhandler(xenbus_irq, &xb_waitq);
+
+ err = bind_evtchn_to_irqhandler(
+ xen_store_evtchn, wake_waiting,
+ 0, "xenbus", &xb_waitq);
+ if (err <= 0) {
+ printk(KERN_ERR "XENBUS request irq failed %i\n", err);
+ return err;
+ }
+
+ xenbus_irq = err;
+
+ return 0;
+}
diff --git a/drivers/xen/xenbus/xenbus_comms.h b/drivers/xen/xenbus/xenbus_comms.h
new file mode 100644
index 00000000000..c21db751373
--- /dev/null
+++ b/drivers/xen/xenbus/xenbus_comms.h
@@ -0,0 +1,46 @@
+/*
+ * Private include for xenbus communications.
+ *
+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _XENBUS_COMMS_H
+#define _XENBUS_COMMS_H
+
+int xs_init(void);
+int xb_init_comms(void);
+
+/* Low level routines. */
+int xb_write(const void *data, unsigned len);
+int xb_read(void *data, unsigned len);
+int xb_data_to_read(void);
+int xb_wait_for_data_to_read(void);
+int xs_input_avail(void);
+extern struct xenstore_domain_interface *xen_store_interface;
+extern int xen_store_evtchn;
+
+#endif /* _XENBUS_COMMS_H */
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
new file mode 100644
index 00000000000..0b769f7c4a4
--- /dev/null
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -0,0 +1,935 @@
+/******************************************************************************
+ * Talks to Xen Store to figure out what devices we have.
+ *
+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
+ * Copyright (C) 2005 Mike Wray, Hewlett-Packard
+ * Copyright (C) 2005, 2006 XenSource Ltd
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#define DPRINTK(fmt, args...) \
+ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \
+ __func__, __LINE__, ##args)
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/fcntl.h>
+#include <linux/mm.h>
+#include <linux/notifier.h>
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/xen/hypervisor.h>
+#include <xen/xenbus.h>
+#include <xen/events.h>
+#include <xen/page.h>
+
+#include "xenbus_comms.h"
+#include "xenbus_probe.h"
+
+int xen_store_evtchn;
+struct xenstore_domain_interface *xen_store_interface;
+static unsigned long xen_store_mfn;
+
+static BLOCKING_NOTIFIER_HEAD(xenstore_chain);
+
+static void wait_for_devices(struct xenbus_driver *xendrv);
+
+static int xenbus_probe_frontend(const char *type, const char *name);
+
+static void xenbus_dev_shutdown(struct device *_dev);
+
+/* If something in array of ids matches this device, return it. */
+static const struct xenbus_device_id *
+match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev)
+{
+ for (; *arr->devicetype != '\0'; arr++) {
+ if (!strcmp(arr->devicetype, dev->devicetype))
+ return arr;
+ }
+ return NULL;
+}
+
+int xenbus_match(struct device *_dev, struct device_driver *_drv)
+{
+ struct xenbus_driver *drv = to_xenbus_driver(_drv);
+
+ if (!drv->ids)
+ return 0;
+
+ return match_device(drv->ids, to_xenbus_device(_dev)) != NULL;
+}
+
+/* device/<type>/<id> => <type>-<id> */
+static int frontend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename)
+{
+ nodename = strchr(nodename, '/');
+ if (!nodename || strlen(nodename + 1) >= BUS_ID_SIZE) {
+ printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename);
+ return -EINVAL;
+ }
+
+ strlcpy(bus_id, nodename + 1, BUS_ID_SIZE);
+ if (!strchr(bus_id, '/')) {
+ printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id);
+ return -EINVAL;
+ }
+ *strchr(bus_id, '/') = '-';
+ return 0;
+}
+
+
+static void free_otherend_details(struct xenbus_device *dev)
+{
+ kfree(dev->otherend);
+ dev->otherend = NULL;
+}
+
+
+static void free_otherend_watch(struct xenbus_device *dev)
+{
+ if (dev->otherend_watch.node) {
+ unregister_xenbus_watch(&dev->otherend_watch);
+ kfree(dev->otherend_watch.node);
+ dev->otherend_watch.node = NULL;
+ }
+}
+
+
+int read_otherend_details(struct xenbus_device *xendev,
+ char *id_node, char *path_node)
+{
+ int err = xenbus_gather(XBT_NIL, xendev->nodename,
+ id_node, "%i", &xendev->otherend_id,
+ path_node, NULL, &xendev->otherend,
+ NULL);
+ if (err) {
+ xenbus_dev_fatal(xendev, err,
+ "reading other end details from %s",
+ xendev->nodename);
+ return err;
+ }
+ if (strlen(xendev->otherend) == 0 ||
+ !xenbus_exists(XBT_NIL, xendev->otherend, "")) {
+ xenbus_dev_fatal(xendev, -ENOENT,
+ "unable to read other end from %s. "
+ "missing or inaccessible.",
+ xendev->nodename);
+ free_otherend_details(xendev);
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+
+static int read_backend_details(struct xenbus_device *xendev)
+{
+ return read_otherend_details(xendev, "backend-id", "backend");
+}
+
+
+/* Bus type for frontend drivers. */
+static struct xen_bus_type xenbus_frontend = {
+ .root = "device",
+ .levels = 2, /* device/type/<id> */
+ .get_bus_id = frontend_bus_id,
+ .probe = xenbus_probe_frontend,
+ .bus = {
+ .name = "xen",
+ .match = xenbus_match,
+ .probe = xenbus_dev_probe,
+ .remove = xenbus_dev_remove,
+ .shutdown = xenbus_dev_shutdown,
+ },
+};
+
+static void otherend_changed(struct xenbus_watch *watch,
+ const char **vec, unsigned int len)
+{
+ struct xenbus_device *dev =
+ container_of(watch, struct xenbus_device, otherend_watch);
+ struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
+ enum xenbus_state state;
+
+ /* Protect us against watches firing on old details when the otherend
+ details change, say immediately after a resume. */
+ if (!dev->otherend ||
+ strncmp(dev->otherend, vec[XS_WATCH_PATH],
+ strlen(dev->otherend))) {
+ dev_dbg(&dev->dev, "Ignoring watch at %s", vec[XS_WATCH_PATH]);
+ return;
+ }
+
+ state = xenbus_read_driver_state(dev->otherend);
+
+ dev_dbg(&dev->dev, "state is %d, (%s), %s, %s",
+ state, xenbus_strstate(state), dev->otherend_watch.node,
+ vec[XS_WATCH_PATH]);
+
+ /*
+ * Ignore xenbus transitions during shutdown. This prevents us doing
+ * work that can fail e.g., when the rootfs is gone.
+ */
+ if (system_state > SYSTEM_RUNNING) {
+ struct xen_bus_type *bus = bus;
+ bus = container_of(dev->dev.bus, struct xen_bus_type, bus);
+ /* If we're frontend, drive the state machine to Closed. */
+ /* This should cause the backend to release our resources. */
+ if ((bus == &xenbus_frontend) && (state == XenbusStateClosing))
+ xenbus_frontend_closed(dev);
+ return;
+ }
+
+ if (drv->otherend_changed)
+ drv->otherend_changed(dev, state);
+}
+
+
+static int talk_to_otherend(struct xenbus_device *dev)
+{
+ struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
+
+ free_otherend_watch(dev);
+ free_otherend_details(dev);
+
+ return drv->read_otherend_details(dev);
+}
+
+
+static int watch_otherend(struct xenbus_device *dev)
+{
+ return xenbus_watch_pathfmt(dev, &dev->otherend_watch, otherend_changed,
+ "%s/%s", dev->otherend, "state");
+}
+
+
+int xenbus_dev_probe(struct device *_dev)
+{
+ struct xenbus_device *dev = to_xenbus_device(_dev);
+ struct xenbus_driver *drv = to_xenbus_driver(_dev->driver);
+ const struct xenbus_device_id *id;
+ int err;
+
+ DPRINTK("%s", dev->nodename);
+
+ if (!drv->probe) {
+ err = -ENODEV;
+ goto fail;
+ }
+
+ id = match_device(drv->ids, dev);
+ if (!id) {
+ err = -ENODEV;
+ goto fail;
+ }
+
+ err = talk_to_otherend(dev);
+ if (err) {
+ dev_warn(&dev->dev, "talk_to_otherend on %s failed.\n",
+ dev->nodename);
+ return err;
+ }
+
+ err = drv->probe(dev, id);
+ if (err)
+ goto fail;
+
+ err = watch_otherend(dev);
+ if (err) {
+ dev_warn(&dev->dev, "watch_otherend on %s failed.\n",
+ dev->nodename);
+ return err;
+ }
+
+ return 0;
+fail:
+ xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename);
+ xenbus_switch_state(dev, XenbusStateClosed);
+ return -ENODEV;
+}
+
+int xenbus_dev_remove(struct device *_dev)
+{
+ struct xenbus_device *dev = to_xenbus_device(_dev);
+ struct xenbus_driver *drv = to_xenbus_driver(_dev->driver);
+
+ DPRINTK("%s", dev->nodename);
+
+ free_otherend_watch(dev);
+ free_otherend_details(dev);
+
+ if (drv->remove)
+ drv->remove(dev);
+
+ xenbus_switch_state(dev, XenbusStateClosed);
+ return 0;
+}
+
+static void xenbus_dev_shutdown(struct device *_dev)
+{
+ struct xenbus_device *dev = to_xenbus_device(_dev);
+ unsigned long timeout = 5*HZ;
+
+ DPRINTK("%s", dev->nodename);
+
+ get_device(&dev->dev);
+ if (dev->state != XenbusStateConnected) {
+ printk(KERN_INFO "%s: %s: %s != Connected, skipping\n", __func__,
+ dev->nodename, xenbus_strstate(dev->state));
+ goto out;
+ }
+ xenbus_switch_state(dev, XenbusStateClosing);
+ timeout = wait_for_completion_timeout(&dev->down, timeout);
+ if (!timeout)
+ printk(KERN_INFO "%s: %s timeout closing device\n",
+ __func__, dev->nodename);
+ out:
+ put_device(&dev->dev);
+}
+
+int xenbus_register_driver_common(struct xenbus_driver *drv,
+ struct xen_bus_type *bus,
+ struct module *owner,
+ const char *mod_name)
+{
+ drv->driver.name = drv->name;
+ drv->driver.bus = &bus->bus;
+ drv->driver.owner = owner;
+ drv->driver.mod_name = mod_name;
+
+ return driver_register(&drv->driver);
+}
+
+int __xenbus_register_frontend(struct xenbus_driver *drv,
+ struct module *owner, const char *mod_name)
+{
+ int ret;
+
+ drv->read_otherend_details = read_backend_details;
+
+ ret = xenbus_register_driver_common(drv, &xenbus_frontend,
+ owner, mod_name);
+ if (ret)
+ return ret;
+
+ /* If this driver is loaded as a module wait for devices to attach. */
+ wait_for_devices(drv);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__xenbus_register_frontend);
+
+void xenbus_unregister_driver(struct xenbus_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(xenbus_unregister_driver);
+
+struct xb_find_info
+{
+ struct xenbus_device *dev;
+ const char *nodename;
+};
+
+static int cmp_dev(struct device *dev, void *data)
+{
+ struct xenbus_device *xendev = to_xenbus_device(dev);
+ struct xb_find_info *info = data;
+
+ if (!strcmp(xendev->nodename, info->nodename)) {
+ info->dev = xendev;
+ get_device(dev);
+ return 1;
+ }
+ return 0;
+}
+
+struct xenbus_device *xenbus_device_find(const char *nodename,
+ struct bus_type *bus)
+{
+ struct xb_find_info info = { .dev = NULL, .nodename = nodename };
+
+ bus_for_each_dev(bus, NULL, &info, cmp_dev);
+ return info.dev;
+}
+
+static int cleanup_dev(struct device *dev, void *data)
+{
+ struct xenbus_device *xendev = to_xenbus_device(dev);
+ struct xb_find_info *info = data;
+ int len = strlen(info->nodename);
+
+ DPRINTK("%s", info->nodename);
+
+ /* Match the info->nodename path, or any subdirectory of that path. */
+ if (strncmp(xendev->nodename, info->nodename, len))
+ return 0;
+
+ /* If the node name is longer, ensure it really is a subdirectory. */
+ if ((strlen(xendev->nodename) > len) && (xendev->nodename[len] != '/'))
+ return 0;
+
+ info->dev = xendev;
+ get_device(dev);
+ return 1;
+}
+
+static void xenbus_cleanup_devices(const char *path, struct bus_type *bus)
+{
+ struct xb_find_info info = { .nodename = path };
+
+ do {
+ info.dev = NULL;
+ bus_for_each_dev(bus, NULL, &info, cleanup_dev);
+ if (info.dev) {
+ device_unregister(&info.dev->dev);
+ put_device(&info.dev->dev);
+ }
+ } while (info.dev);
+}
+
+static void xenbus_dev_release(struct device *dev)
+{
+ if (dev)
+ kfree(to_xenbus_device(dev));
+}
+
+static ssize_t xendev_show_nodename(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename);
+}
+DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL);
+
+static ssize_t xendev_show_devtype(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype);
+}
+DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL);
+
+
+int xenbus_probe_node(struct xen_bus_type *bus,
+ const char *type,
+ const char *nodename)
+{
+ int err;
+ struct xenbus_device *xendev;
+ size_t stringlen;
+ char *tmpstring;
+
+ enum xenbus_state state = xenbus_read_driver_state(nodename);
+
+ if (state != XenbusStateInitialising) {
+ /* Device is not new, so ignore it. This can happen if a
+ device is going away after switching to Closed. */
+ return 0;
+ }
+
+ stringlen = strlen(nodename) + 1 + strlen(type) + 1;
+ xendev = kzalloc(sizeof(*xendev) + stringlen, GFP_KERNEL);
+ if (!xendev)
+ return -ENOMEM;
+
+ xendev->state = XenbusStateInitialising;
+
+ /* Copy the strings into the extra space. */
+
+ tmpstring = (char *)(xendev + 1);
+ strcpy(tmpstring, nodename);
+ xendev->nodename = tmpstring;
+
+ tmpstring += strlen(tmpstring) + 1;
+ strcpy(tmpstring, type);
+ xendev->devicetype = tmpstring;
+ init_completion(&xendev->down);
+
+ xendev->dev.bus = &bus->bus;
+ xendev->dev.release = xenbus_dev_release;
+
+ err = bus->get_bus_id(xendev->dev.bus_id, xendev->nodename);
+ if (err)
+ goto fail;
+
+ /* Register with generic device framework. */
+ err = device_register(&xendev->dev);
+ if (err)
+ goto fail;
+
+ err = device_create_file(&xendev->dev, &dev_attr_nodename);
+ if (err)
+ goto fail_unregister;
+
+ err = device_create_file(&xendev->dev, &dev_attr_devtype);
+ if (err)
+ goto fail_remove_file;
+
+ return 0;
+fail_remove_file:
+ device_remove_file(&xendev->dev, &dev_attr_nodename);
+fail_unregister:
+ device_unregister(&xendev->dev);
+fail:
+ kfree(xendev);
+ return err;
+}
+
+/* device/<typename>/<name> */
+static int xenbus_probe_frontend(const char *type, const char *name)
+{
+ char *nodename;
+ int err;
+
+ nodename = kasprintf(GFP_KERNEL, "%s/%s/%s",
+ xenbus_frontend.root, type, name);
+ if (!nodename)
+ return -ENOMEM;
+
+ DPRINTK("%s", nodename);
+
+ err = xenbus_probe_node(&xenbus_frontend, type, nodename);
+ kfree(nodename);
+ return err;
+}
+
+static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type)
+{
+ int err = 0;
+ char **dir;
+ unsigned int dir_n = 0;
+ int i;
+
+ dir = xenbus_directory(XBT_NIL, bus->root, type, &dir_n);
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+
+ for (i = 0; i < dir_n; i++) {
+ err = bus->probe(type, dir[i]);
+ if (err)
+ break;
+ }
+ kfree(dir);
+ return err;
+}
+
+int xenbus_probe_devices(struct xen_bus_type *bus)
+{
+ int err = 0;
+ char **dir;
+ unsigned int i, dir_n;
+
+ dir = xenbus_directory(XBT_NIL, bus->root, "", &dir_n);
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+
+ for (i = 0; i < dir_n; i++) {
+ err = xenbus_probe_device_type(bus, dir[i]);
+ if (err)
+ break;
+ }
+ kfree(dir);
+ return err;
+}
+
+static unsigned int char_count(const char *str, char c)
+{
+ unsigned int i, ret = 0;
+
+ for (i = 0; str[i]; i++)
+ if (str[i] == c)
+ ret++;
+ return ret;
+}
+
+static int strsep_len(const char *str, char c, unsigned int len)
+{
+ unsigned int i;
+
+ for (i = 0; str[i]; i++)
+ if (str[i] == c) {
+ if (len == 0)
+ return i;
+ len--;
+ }
+ return (len == 0) ? i : -ERANGE;
+}
+
+void xenbus_dev_changed(const char *node, struct xen_bus_type *bus)
+{
+ int exists, rootlen;
+ struct xenbus_device *dev;
+ char type[BUS_ID_SIZE];
+ const char *p, *root;
+
+ if (char_count(node, '/') < 2)
+ return;
+
+ exists = xenbus_exists(XBT_NIL, node, "");
+ if (!exists) {
+ xenbus_cleanup_devices(node, &bus->bus);
+ return;
+ }
+
+ /* backend/<type>/... or device/<type>/... */
+ p = strchr(node, '/') + 1;
+ snprintf(type, BUS_ID_SIZE, "%.*s", (int)strcspn(p, "/"), p);
+ type[BUS_ID_SIZE-1] = '\0';
+
+ rootlen = strsep_len(node, '/', bus->levels);
+ if (rootlen < 0)
+ return;
+ root = kasprintf(GFP_KERNEL, "%.*s", rootlen, node);
+ if (!root)
+ return;
+
+ dev = xenbus_device_find(root, &bus->bus);
+ if (!dev)
+ xenbus_probe_node(bus, type, root);
+ else
+ put_device(&dev->dev);
+
+ kfree(root);
+}
+
+static void frontend_changed(struct xenbus_watch *watch,
+ const char **vec, unsigned int len)
+{
+ DPRINTK("");
+
+ xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend);
+}
+
+/* We watch for devices appearing and vanishing. */
+static struct xenbus_watch fe_watch = {
+ .node = "device",
+ .callback = frontend_changed,
+};
+
+static int suspend_dev(struct device *dev, void *data)
+{
+ int err = 0;
+ struct xenbus_driver *drv;
+ struct xenbus_device *xdev;
+
+ DPRINTK("");
+
+ if (dev->driver == NULL)
+ return 0;
+ drv = to_xenbus_driver(dev->driver);
+ xdev = container_of(dev, struct xenbus_device, dev);
+ if (drv->suspend)
+ err = drv->suspend(xdev);
+ if (err)
+ printk(KERN_WARNING
+ "xenbus: suspend %s failed: %i\n", dev->bus_id, err);
+ return 0;
+}
+
+static int suspend_cancel_dev(struct device *dev, void *data)
+{
+ int err = 0;
+ struct xenbus_driver *drv;
+ struct xenbus_device *xdev;
+
+ DPRINTK("");
+
+ if (dev->driver == NULL)
+ return 0;
+ drv = to_xenbus_driver(dev->driver);
+ xdev = container_of(dev, struct xenbus_device, dev);
+ if (drv->suspend_cancel)
+ err = drv->suspend_cancel(xdev);
+ if (err)
+ printk(KERN_WARNING
+ "xenbus: suspend_cancel %s failed: %i\n",
+ dev->bus_id, err);
+ return 0;
+}
+
+static int resume_dev(struct device *dev, void *data)
+{
+ int err;
+ struct xenbus_driver *drv;
+ struct xenbus_device *xdev;
+
+ DPRINTK("");
+
+ if (dev->driver == NULL)
+ return 0;
+
+ drv = to_xenbus_driver(dev->driver);
+ xdev = container_of(dev, struct xenbus_device, dev);
+
+ err = talk_to_otherend(xdev);
+ if (err) {
+ printk(KERN_WARNING
+ "xenbus: resume (talk_to_otherend) %s failed: %i\n",
+ dev->bus_id, err);
+ return err;
+ }
+
+ xdev->state = XenbusStateInitialising;
+
+ if (drv->resume) {
+ err = drv->resume(xdev);
+ if (err) {
+ printk(KERN_WARNING
+ "xenbus: resume %s failed: %i\n",
+ dev->bus_id, err);
+ return err;
+ }
+ }
+
+ err = watch_otherend(xdev);
+ if (err) {
+ printk(KERN_WARNING
+ "xenbus_probe: resume (watch_otherend) %s failed: "
+ "%d.\n", dev->bus_id, err);
+ return err;
+ }
+
+ return 0;
+}
+
+void xenbus_suspend(void)
+{
+ DPRINTK("");
+
+ bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev);
+ xenbus_backend_suspend(suspend_dev);
+ xs_suspend();
+}
+EXPORT_SYMBOL_GPL(xenbus_suspend);
+
+void xenbus_resume(void)
+{
+ xb_init_comms();
+ xs_resume();
+ bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, resume_dev);
+ xenbus_backend_resume(resume_dev);
+}
+EXPORT_SYMBOL_GPL(xenbus_resume);
+
+void xenbus_suspend_cancel(void)
+{
+ xs_suspend_cancel();
+ bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_cancel_dev);
+ xenbus_backend_resume(suspend_cancel_dev);
+}
+EXPORT_SYMBOL_GPL(xenbus_suspend_cancel);
+
+/* A flag to determine if xenstored is 'ready' (i.e. has started) */
+int xenstored_ready = 0;
+
+
+int register_xenstore_notifier(struct notifier_block *nb)
+{
+ int ret = 0;
+
+ if (xenstored_ready > 0)
+ ret = nb->notifier_call(nb, 0, NULL);
+ else
+ blocking_notifier_chain_register(&xenstore_chain, nb);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(register_xenstore_notifier);
+
+void unregister_xenstore_notifier(struct notifier_block *nb)
+{
+ blocking_notifier_chain_unregister(&xenstore_chain, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
+
+void xenbus_probe(struct work_struct *unused)
+{
+ BUG_ON((xenstored_ready <= 0));
+
+ /* Enumerate devices in xenstore and watch for changes. */
+ xenbus_probe_devices(&xenbus_frontend);
+ register_xenbus_watch(&fe_watch);
+ xenbus_backend_probe_and_watch();
+
+ /* Notify others that xenstore is up */
+ blocking_notifier_call_chain(&xenstore_chain, 0, NULL);
+}
+
+static int __init xenbus_probe_init(void)
+{
+ int err = 0;
+
+ DPRINTK("");
+
+ err = -ENODEV;
+ if (!is_running_on_xen())
+ goto out_error;
+
+ /* Register ourselves with the kernel bus subsystem */
+ err = bus_register(&xenbus_frontend.bus);
+ if (err)
+ goto out_error;
+
+ err = xenbus_backend_bus_register();
+ if (err)
+ goto out_unreg_front;
+
+ /*
+ * Domain0 doesn't have a store_evtchn or store_mfn yet.
+ */
+ if (is_initial_xendomain()) {
+ /* dom0 not yet supported */
+ } else {
+ xenstored_ready = 1;
+ xen_store_evtchn = xen_start_info->store_evtchn;
+ xen_store_mfn = xen_start_info->store_mfn;
+ }
+ xen_store_interface = mfn_to_virt(xen_store_mfn);
+
+ /* Initialize the interface to xenstore. */
+ err = xs_init();
+ if (err) {
+ printk(KERN_WARNING
+ "XENBUS: Error initializing xenstore comms: %i\n", err);
+ goto out_unreg_back;
+ }
+
+ if (!is_initial_xendomain())
+ xenbus_probe(NULL);
+
+ return 0;
+
+ out_unreg_back:
+ xenbus_backend_bus_unregister();
+
+ out_unreg_front:
+ bus_unregister(&xenbus_frontend.bus);
+
+ out_error:
+ return err;
+}
+
+postcore_initcall(xenbus_probe_init);
+
+MODULE_LICENSE("GPL");
+
+static int is_disconnected_device(struct device *dev, void *data)
+{
+ struct xenbus_device *xendev = to_xenbus_device(dev);
+ struct device_driver *drv = data;
+
+ /*
+ * A device with no driver will never connect. We care only about
+ * devices which should currently be in the process of connecting.
+ */
+ if (!dev->driver)
+ return 0;
+
+ /* Is this search limited to a particular driver? */
+ if (drv && (dev->driver != drv))
+ return 0;
+
+ return (xendev->state != XenbusStateConnected);
+}
+
+static int exists_disconnected_device(struct device_driver *drv)
+{
+ return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
+ is_disconnected_device);
+}
+
+static int print_device_status(struct device *dev, void *data)
+{
+ struct xenbus_device *xendev = to_xenbus_device(dev);
+ struct device_driver *drv = data;
+
+ /* Is this operation limited to a particular driver? */
+ if (drv && (dev->driver != drv))
+ return 0;
+
+ if (!dev->driver) {
+ /* Information only: is this too noisy? */
+ printk(KERN_INFO "XENBUS: Device with no driver: %s\n",
+ xendev->nodename);
+ } else if (xendev->state != XenbusStateConnected) {
+ printk(KERN_WARNING "XENBUS: Timeout connecting "
+ "to device: %s (state %d)\n",
+ xendev->nodename, xendev->state);
+ }
+
+ return 0;
+}
+
+/* We only wait for device setup after most initcalls have run. */
+static int ready_to_wait_for_devices;
+
+/*
+ * On a 10 second timeout, wait for all devices currently configured. We need
+ * to do this to guarantee that the filesystems and / or network devices
+ * needed for boot are available, before we can allow the boot to proceed.
+ *
+ * This needs to be on a late_initcall, to happen after the frontend device
+ * drivers have been initialised, but before the root fs is mounted.
+ *
+ * A possible improvement here would be to have the tools add a per-device
+ * flag to the store entry, indicating whether it is needed at boot time.
+ * This would allow people who knew what they were doing to accelerate their
+ * boot slightly, but of course needs tools or manual intervention to set up
+ * those flags correctly.
+ */
+static void wait_for_devices(struct xenbus_driver *xendrv)
+{
+ unsigned long timeout = jiffies + 10*HZ;
+ struct device_driver *drv = xendrv ? &xendrv->driver : NULL;
+
+ if (!ready_to_wait_for_devices || !is_running_on_xen())
+ return;
+
+ while (exists_disconnected_device(drv)) {
+ if (time_after(jiffies, timeout))
+ break;
+ schedule_timeout_interruptible(HZ/10);
+ }
+
+ bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
+ print_device_status);
+}
+
+#ifndef MODULE
+static int __init boot_wait_for_devices(void)
+{
+ ready_to_wait_for_devices = 1;
+ wait_for_devices(NULL);
+ return 0;
+}
+
+late_initcall(boot_wait_for_devices);
+#endif
diff --git a/drivers/xen/xenbus/xenbus_probe.h b/drivers/xen/xenbus/xenbus_probe.h
new file mode 100644
index 00000000000..e09b19415a4
--- /dev/null
+++ b/drivers/xen/xenbus/xenbus_probe.h
@@ -0,0 +1,74 @@
+/******************************************************************************
+ * xenbus_probe.h
+ *
+ * Talks to Xen Store to figure out what devices we have.
+ *
+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
+ * Copyright (C) 2005 XenSource Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _XENBUS_PROBE_H
+#define _XENBUS_PROBE_H
+
+#ifdef CONFIG_XEN_BACKEND
+extern void xenbus_backend_suspend(int (*fn)(struct device *, void *));
+extern void xenbus_backend_resume(int (*fn)(struct device *, void *));
+extern void xenbus_backend_probe_and_watch(void);
+extern int xenbus_backend_bus_register(void);
+extern void xenbus_backend_bus_unregister(void);
+#else
+static inline void xenbus_backend_suspend(int (*fn)(struct device *, void *)) {}
+static inline void xenbus_backend_resume(int (*fn)(struct device *, void *)) {}
+static inline void xenbus_backend_probe_and_watch(void) {}
+static inline int xenbus_backend_bus_register(void) { return 0; }
+static inline void xenbus_backend_bus_unregister(void) {}
+#endif
+
+struct xen_bus_type
+{
+ char *root;
+ unsigned int levels;
+ int (*get_bus_id)(char bus_id[BUS_ID_SIZE], const char *nodename);
+ int (*probe)(const char *type, const char *dir);
+ struct bus_type bus;
+};
+
+extern int xenbus_match(struct device *_dev, struct device_driver *_drv);
+extern int xenbus_dev_probe(struct device *_dev);
+extern int xenbus_dev_remove(struct device *_dev);
+extern int xenbus_register_driver_common(struct xenbus_driver *drv,
+ struct xen_bus_type *bus,
+ struct module *owner,
+ const char *mod_name);
+extern int xenbus_probe_node(struct xen_bus_type *bus,
+ const char *type,
+ const char *nodename);
+extern int xenbus_probe_devices(struct xen_bus_type *bus);
+
+extern void xenbus_dev_changed(const char *node, struct xen_bus_type *bus);
+
+#endif
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
new file mode 100644
index 00000000000..9e943fbce81
--- /dev/null
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -0,0 +1,861 @@
+/******************************************************************************
+ * xenbus_xs.c
+ *
+ * This is the kernel equivalent of the "xs" library. We don't need everything
+ * and we use xenbus_comms for communication.
+ *
+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/unistd.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/uio.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/fcntl.h>
+#include <linux/kthread.h>
+#include <linux/rwsem.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <xen/xenbus.h>
+#include "xenbus_comms.h"
+
+struct xs_stored_msg {
+ struct list_head list;
+
+ struct xsd_sockmsg hdr;
+
+ union {
+ /* Queued replies. */
+ struct {
+ char *body;
+ } reply;
+
+ /* Queued watch events. */
+ struct {
+ struct xenbus_watch *handle;
+ char **vec;
+ unsigned int vec_size;
+ } watch;
+ } u;
+};
+
+struct xs_handle {
+ /* A list of replies. Currently only one will ever be outstanding. */
+ struct list_head reply_list;
+ spinlock_t reply_lock;
+ wait_queue_head_t reply_waitq;
+
+ /*
+ * Mutex ordering: transaction_mutex -> watch_mutex -> request_mutex.
+ * response_mutex is never taken simultaneously with the other three.
+ */
+
+ /* One request at a time. */
+ struct mutex request_mutex;
+
+ /* Protect xenbus reader thread against save/restore. */
+ struct mutex response_mutex;
+
+ /* Protect transactions against save/restore. */
+ struct rw_semaphore transaction_mutex;
+
+ /* Protect watch (de)register against save/restore. */
+ struct rw_semaphore watch_mutex;
+};
+
+static struct xs_handle xs_state;
+
+/* List of registered watches, and a lock to protect it. */
+static LIST_HEAD(watches);
+static DEFINE_SPINLOCK(watches_lock);
+
+/* List of pending watch callback events, and a lock to protect it. */
+static LIST_HEAD(watch_events);
+static DEFINE_SPINLOCK(watch_events_lock);
+
+/*
+ * Details of the xenwatch callback kernel thread. The thread waits on the
+ * watch_events_waitq for work to do (queued on watch_events list). When it
+ * wakes up it acquires the xenwatch_mutex before reading the list and
+ * carrying out work.
+ */
+static pid_t xenwatch_pid;
+static DEFINE_MUTEX(xenwatch_mutex);
+static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq);
+
+static int get_error(const char *errorstring)
+{
+ unsigned int i;
+
+ for (i = 0; strcmp(errorstring, xsd_errors[i].errstring) != 0; i++) {
+ if (i == ARRAY_SIZE(xsd_errors) - 1) {
+ printk(KERN_WARNING
+ "XENBUS xen store gave: unknown error %s",
+ errorstring);
+ return EINVAL;
+ }
+ }
+ return xsd_errors[i].errnum;
+}
+
+static void *read_reply(enum xsd_sockmsg_type *type, unsigned int *len)
+{
+ struct xs_stored_msg *msg;
+ char *body;
+
+ spin_lock(&xs_state.reply_lock);
+
+ while (list_empty(&xs_state.reply_list)) {
+ spin_unlock(&xs_state.reply_lock);
+ /* XXX FIXME: Avoid synchronous wait for response here. */
+ wait_event(xs_state.reply_waitq,
+ !list_empty(&xs_state.reply_list));
+ spin_lock(&xs_state.reply_lock);
+ }
+
+ msg = list_entry(xs_state.reply_list.next,
+ struct xs_stored_msg, list);
+ list_del(&msg->list);
+
+ spin_unlock(&xs_state.reply_lock);
+
+ *type = msg->hdr.type;
+ if (len)
+ *len = msg->hdr.len;
+ body = msg->u.reply.body;
+
+ kfree(msg);
+
+ return body;
+}
+
+void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
+{
+ void *ret;
+ struct xsd_sockmsg req_msg = *msg;
+ int err;
+
+ if (req_msg.type == XS_TRANSACTION_START)
+ down_read(&xs_state.transaction_mutex);
+
+ mutex_lock(&xs_state.request_mutex);
+
+ err = xb_write(msg, sizeof(*msg) + msg->len);
+ if (err) {
+ msg->type = XS_ERROR;
+ ret = ERR_PTR(err);
+ } else
+ ret = read_reply(&msg->type, &msg->len);
+
+ mutex_unlock(&xs_state.request_mutex);
+
+ if ((msg->type == XS_TRANSACTION_END) ||
+ ((req_msg.type == XS_TRANSACTION_START) &&
+ (msg->type == XS_ERROR)))
+ up_read(&xs_state.transaction_mutex);
+
+ return ret;
+}
+
+/* Send message to xs, get kmalloc'ed reply. ERR_PTR() on error. */
+static void *xs_talkv(struct xenbus_transaction t,
+ enum xsd_sockmsg_type type,
+ const struct kvec *iovec,
+ unsigned int num_vecs,
+ unsigned int *len)
+{
+ struct xsd_sockmsg msg;
+ void *ret = NULL;
+ unsigned int i;
+ int err;
+
+ msg.tx_id = t.id;
+ msg.req_id = 0;
+ msg.type = type;
+ msg.len = 0;
+ for (i = 0; i < num_vecs; i++)
+ msg.len += iovec[i].iov_len;
+
+ mutex_lock(&xs_state.request_mutex);
+
+ err = xb_write(&msg, sizeof(msg));
+ if (err) {
+ mutex_unlock(&xs_state.request_mutex);
+ return ERR_PTR(err);
+ }
+
+ for (i = 0; i < num_vecs; i++) {
+ err = xb_write(iovec[i].iov_base, iovec[i].iov_len);
+ if (err) {
+ mutex_unlock(&xs_state.request_mutex);
+ return ERR_PTR(err);
+ }
+ }
+
+ ret = read_reply(&msg.type, len);
+
+ mutex_unlock(&xs_state.request_mutex);
+
+ if (IS_ERR(ret))
+ return ret;
+
+ if (msg.type == XS_ERROR) {
+ err = get_error(ret);
+ kfree(ret);
+ return ERR_PTR(-err);
+ }
+
+ if (msg.type != type) {
+ if (printk_ratelimit())
+ printk(KERN_WARNING
+ "XENBUS unexpected type [%d], expected [%d]\n",
+ msg.type, type);
+ kfree(ret);
+ return ERR_PTR(-EINVAL);
+ }
+ return ret;
+}
+
+/* Simplified version of xs_talkv: single message. */
+static void *xs_single(struct xenbus_transaction t,
+ enum xsd_sockmsg_type type,
+ const char *string,
+ unsigned int *len)
+{
+ struct kvec iovec;
+
+ iovec.iov_base = (void *)string;
+ iovec.iov_len = strlen(string) + 1;
+ return xs_talkv(t, type, &iovec, 1, len);
+}
+
+/* Many commands only need an ack, don't care what it says. */
+static int xs_error(char *reply)
+{
+ if (IS_ERR(reply))
+ return PTR_ERR(reply);
+ kfree(reply);
+ return 0;
+}
+
+static unsigned int count_strings(const char *strings, unsigned int len)
+{
+ unsigned int num;
+ const char *p;
+
+ for (p = strings, num = 0; p < strings + len; p += strlen(p) + 1)
+ num++;
+
+ return num;
+}
+
+/* Return the path to dir with /name appended. Buffer must be kfree()'ed. */
+static char *join(const char *dir, const char *name)
+{
+ char *buffer;
+
+ if (strlen(name) == 0)
+ buffer = kasprintf(GFP_KERNEL, "%s", dir);
+ else
+ buffer = kasprintf(GFP_KERNEL, "%s/%s", dir, name);
+ return (!buffer) ? ERR_PTR(-ENOMEM) : buffer;
+}
+
+static char **split(char *strings, unsigned int len, unsigned int *num)
+{
+ char *p, **ret;
+
+ /* Count the strings. */
+ *num = count_strings(strings, len);
+
+ /* Transfer to one big alloc for easy freeing. */
+ ret = kmalloc(*num * sizeof(char *) + len, GFP_KERNEL);
+ if (!ret) {
+ kfree(strings);
+ return ERR_PTR(-ENOMEM);
+ }
+ memcpy(&ret[*num], strings, len);
+ kfree(strings);
+
+ strings = (char *)&ret[*num];
+ for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1)
+ ret[(*num)++] = p;
+
+ return ret;
+}
+
+char **xenbus_directory(struct xenbus_transaction t,
+ const char *dir, const char *node, unsigned int *num)
+{
+ char *strings, *path;
+ unsigned int len;
+
+ path = join(dir, node);
+ if (IS_ERR(path))
+ return (char **)path;
+
+ strings = xs_single(t, XS_DIRECTORY, path, &len);
+ kfree(path);
+ if (IS_ERR(strings))
+ return (char **)strings;
+
+ return split(strings, len, num);
+}
+EXPORT_SYMBOL_GPL(xenbus_directory);
+
+/* Check if a path exists. Return 1 if it does. */
+int xenbus_exists(struct xenbus_transaction t,
+ const char *dir, const char *node)
+{
+ char **d;
+ int dir_n;
+
+ d = xenbus_directory(t, dir, node, &dir_n);
+ if (IS_ERR(d))
+ return 0;
+ kfree(d);
+ return 1;
+}
+EXPORT_SYMBOL_GPL(xenbus_exists);
+
+/* Get the value of a single file.
+ * Returns a kmalloced value: call free() on it after use.
+ * len indicates length in bytes.
+ */
+void *xenbus_read(struct xenbus_transaction t,
+ const char *dir, const char *node, unsigned int *len)
+{
+ char *path;
+ void *ret;
+
+ path = join(dir, node);
+ if (IS_ERR(path))
+ return (void *)path;
+
+ ret = xs_single(t, XS_READ, path, len);
+ kfree(path);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xenbus_read);
+
+/* Write the value of a single file.
+ * Returns -err on failure.
+ */
+int xenbus_write(struct xenbus_transaction t,
+ const char *dir, const char *node, const char *string)
+{
+ const char *path;
+ struct kvec iovec[2];
+ int ret;
+
+ path = join(dir, node);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+
+ iovec[0].iov_base = (void *)path;
+ iovec[0].iov_len = strlen(path) + 1;
+ iovec[1].iov_base = (void *)string;
+ iovec[1].iov_len = strlen(string);
+
+ ret = xs_error(xs_talkv(t, XS_WRITE, iovec, ARRAY_SIZE(iovec), NULL));
+ kfree(path);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xenbus_write);
+
+/* Create a new directory. */
+int xenbus_mkdir(struct xenbus_transaction t,
+ const char *dir, const char *node)
+{
+ char *path;
+ int ret;
+
+ path = join(dir, node);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+
+ ret = xs_error(xs_single(t, XS_MKDIR, path, NULL));
+ kfree(path);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xenbus_mkdir);
+
+/* Destroy a file or directory (directories must be empty). */
+int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node)
+{
+ char *path;
+ int ret;
+
+ path = join(dir, node);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+
+ ret = xs_error(xs_single(t, XS_RM, path, NULL));
+ kfree(path);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xenbus_rm);
+
+/* Start a transaction: changes by others will not be seen during this
+ * transaction, and changes will not be visible to others until end.
+ */
+int xenbus_transaction_start(struct xenbus_transaction *t)
+{
+ char *id_str;
+
+ down_read(&xs_state.transaction_mutex);
+
+ id_str = xs_single(XBT_NIL, XS_TRANSACTION_START, "", NULL);
+ if (IS_ERR(id_str)) {
+ up_read(&xs_state.transaction_mutex);
+ return PTR_ERR(id_str);
+ }
+
+ t->id = simple_strtoul(id_str, NULL, 0);
+ kfree(id_str);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xenbus_transaction_start);
+
+/* End a transaction.
+ * If abandon is true, transaction is discarded instead of committed.
+ */
+int xenbus_transaction_end(struct xenbus_transaction t, int abort)
+{
+ char abortstr[2];
+ int err;
+
+ if (abort)
+ strcpy(abortstr, "F");
+ else
+ strcpy(abortstr, "T");
+
+ err = xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL));
+
+ up_read(&xs_state.transaction_mutex);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(xenbus_transaction_end);
+
+/* Single read and scanf: returns -errno or num scanned. */
+int xenbus_scanf(struct xenbus_transaction t,
+ const char *dir, const char *node, const char *fmt, ...)
+{
+ va_list ap;
+ int ret;
+ char *val;
+
+ val = xenbus_read(t, dir, node, NULL);
+ if (IS_ERR(val))
+ return PTR_ERR(val);
+
+ va_start(ap, fmt);
+ ret = vsscanf(val, fmt, ap);
+ va_end(ap);
+ kfree(val);
+ /* Distinctive errno. */
+ if (ret == 0)
+ return -ERANGE;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xenbus_scanf);
+
+/* Single printf and write: returns -errno or 0. */
+int xenbus_printf(struct xenbus_transaction t,
+ const char *dir, const char *node, const char *fmt, ...)
+{
+ va_list ap;
+ int ret;
+#define PRINTF_BUFFER_SIZE 4096
+ char *printf_buffer;
+
+ printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
+ if (printf_buffer == NULL)
+ return -ENOMEM;
+
+ va_start(ap, fmt);
+ ret = vsnprintf(printf_buffer, PRINTF_BUFFER_SIZE, fmt, ap);
+ va_end(ap);
+
+ BUG_ON(ret > PRINTF_BUFFER_SIZE-1);
+ ret = xenbus_write(t, dir, node, printf_buffer);
+
+ kfree(printf_buffer);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xenbus_printf);
+
+/* Takes tuples of names, scanf-style args, and void **, NULL terminated. */
+int xenbus_gather(struct xenbus_transaction t, const char *dir, ...)
+{
+ va_list ap;
+ const char *name;
+ int ret = 0;
+
+ va_start(ap, dir);
+ while (ret == 0 && (name = va_arg(ap, char *)) != NULL) {
+ const char *fmt = va_arg(ap, char *);
+ void *result = va_arg(ap, void *);
+ char *p;
+
+ p = xenbus_read(t, dir, name, NULL);
+ if (IS_ERR(p)) {
+ ret = PTR_ERR(p);
+ break;
+ }
+ if (fmt) {
+ if (sscanf(p, fmt, result) == 0)
+ ret = -EINVAL;
+ kfree(p);
+ } else
+ *(char **)result = p;
+ }
+ va_end(ap);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xenbus_gather);
+
+static int xs_watch(const char *path, const char *token)
+{
+ struct kvec iov[2];
+
+ iov[0].iov_base = (void *)path;
+ iov[0].iov_len = strlen(path) + 1;
+ iov[1].iov_base = (void *)token;
+ iov[1].iov_len = strlen(token) + 1;
+
+ return xs_error(xs_talkv(XBT_NIL, XS_WATCH, iov,
+ ARRAY_SIZE(iov), NULL));
+}
+
+static int xs_unwatch(const char *path, const char *token)
+{
+ struct kvec iov[2];
+
+ iov[0].iov_base = (char *)path;
+ iov[0].iov_len = strlen(path) + 1;
+ iov[1].iov_base = (char *)token;
+ iov[1].iov_len = strlen(token) + 1;
+
+ return xs_error(xs_talkv(XBT_NIL, XS_UNWATCH, iov,
+ ARRAY_SIZE(iov), NULL));
+}
+
+static struct xenbus_watch *find_watch(const char *token)
+{
+ struct xenbus_watch *i, *cmp;
+
+ cmp = (void *)simple_strtoul(token, NULL, 16);
+
+ list_for_each_entry(i, &watches, list)
+ if (i == cmp)
+ return i;
+
+ return NULL;
+}
+
+/* Register callback to watch this node. */
+int register_xenbus_watch(struct xenbus_watch *watch)
+{
+ /* Pointer in ascii is the token. */
+ char token[sizeof(watch) * 2 + 1];
+ int err;
+
+ sprintf(token, "%lX", (long)watch);
+
+ down_read(&xs_state.watch_mutex);
+
+ spin_lock(&watches_lock);
+ BUG_ON(find_watch(token));
+ list_add(&watch->list, &watches);
+ spin_unlock(&watches_lock);
+
+ err = xs_watch(watch->node, token);
+
+ /* Ignore errors due to multiple registration. */
+ if ((err != 0) && (err != -EEXIST)) {
+ spin_lock(&watches_lock);
+ list_del(&watch->list);
+ spin_unlock(&watches_lock);
+ }
+
+ up_read(&xs_state.watch_mutex);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(register_xenbus_watch);
+
+void unregister_xenbus_watch(struct xenbus_watch *watch)
+{
+ struct xs_stored_msg *msg, *tmp;
+ char token[sizeof(watch) * 2 + 1];
+ int err;
+
+ sprintf(token, "%lX", (long)watch);
+
+ down_read(&xs_state.watch_mutex);
+
+ spin_lock(&watches_lock);
+ BUG_ON(!find_watch(token));
+ list_del(&watch->list);
+ spin_unlock(&watches_lock);
+
+ err = xs_unwatch(watch->node, token);
+ if (err)
+ printk(KERN_WARNING
+ "XENBUS Failed to release watch %s: %i\n",
+ watch->node, err);
+
+ up_read(&xs_state.watch_mutex);
+
+ /* Make sure there are no callbacks running currently (unless
+ its us) */
+ if (current->pid != xenwatch_pid)
+ mutex_lock(&xenwatch_mutex);
+
+ /* Cancel pending watch events. */
+ spin_lock(&watch_events_lock);
+ list_for_each_entry_safe(msg, tmp, &watch_events, list) {
+ if (msg->u.watch.handle != watch)
+ continue;
+ list_del(&msg->list);
+ kfree(msg->u.watch.vec);
+ kfree(msg);
+ }
+ spin_unlock(&watch_events_lock);
+
+ if (current->pid != xenwatch_pid)
+ mutex_unlock(&xenwatch_mutex);
+}
+EXPORT_SYMBOL_GPL(unregister_xenbus_watch);
+
+void xs_suspend(void)
+{
+ down_write(&xs_state.transaction_mutex);
+ down_write(&xs_state.watch_mutex);
+ mutex_lock(&xs_state.request_mutex);
+ mutex_lock(&xs_state.response_mutex);
+}
+
+void xs_resume(void)
+{
+ struct xenbus_watch *watch;
+ char token[sizeof(watch) * 2 + 1];
+
+ mutex_unlock(&xs_state.response_mutex);
+ mutex_unlock(&xs_state.request_mutex);
+ up_write(&xs_state.transaction_mutex);
+
+ /* No need for watches_lock: the watch_mutex is sufficient. */
+ list_for_each_entry(watch, &watches, list) {
+ sprintf(token, "%lX", (long)watch);
+ xs_watch(watch->node, token);
+ }
+
+ up_write(&xs_state.watch_mutex);
+}
+
+void xs_suspend_cancel(void)
+{
+ mutex_unlock(&xs_state.response_mutex);
+ mutex_unlock(&xs_state.request_mutex);
+ up_write(&xs_state.watch_mutex);
+ up_write(&xs_state.transaction_mutex);
+}
+
+static int xenwatch_thread(void *unused)
+{
+ struct list_head *ent;
+ struct xs_stored_msg *msg;
+
+ for (;;) {
+ wait_event_interruptible(watch_events_waitq,
+ !list_empty(&watch_events));
+
+ if (kthread_should_stop())
+ break;
+
+ mutex_lock(&xenwatch_mutex);
+
+ spin_lock(&watch_events_lock);
+ ent = watch_events.next;
+ if (ent != &watch_events)
+ list_del(ent);
+ spin_unlock(&watch_events_lock);
+
+ if (ent != &watch_events) {
+ msg = list_entry(ent, struct xs_stored_msg, list);
+ msg->u.watch.handle->callback(
+ msg->u.watch.handle,
+ (const char **)msg->u.watch.vec,
+ msg->u.watch.vec_size);
+ kfree(msg->u.watch.vec);
+ kfree(msg);
+ }
+
+ mutex_unlock(&xenwatch_mutex);
+ }
+
+ return 0;
+}
+
+static int process_msg(void)
+{
+ struct xs_stored_msg *msg;
+ char *body;
+ int err;
+
+ /*
+ * We must disallow save/restore while reading a xenstore message.
+ * A partial read across s/r leaves us out of sync with xenstored.
+ */
+ for (;;) {
+ err = xb_wait_for_data_to_read();
+ if (err)
+ return err;
+ mutex_lock(&xs_state.response_mutex);
+ if (xb_data_to_read())
+ break;
+ /* We raced with save/restore: pending data 'disappeared'. */
+ mutex_unlock(&xs_state.response_mutex);
+ }
+
+
+ msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+ if (msg == NULL) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = xb_read(&msg->hdr, sizeof(msg->hdr));
+ if (err) {
+ kfree(msg);
+ goto out;
+ }
+
+ body = kmalloc(msg->hdr.len + 1, GFP_KERNEL);
+ if (body == NULL) {
+ kfree(msg);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = xb_read(body, msg->hdr.len);
+ if (err) {
+ kfree(body);
+ kfree(msg);
+ goto out;
+ }
+ body[msg->hdr.len] = '\0';
+
+ if (msg->hdr.type == XS_WATCH_EVENT) {
+ msg->u.watch.vec = split(body, msg->hdr.len,
+ &msg->u.watch.vec_size);
+ if (IS_ERR(msg->u.watch.vec)) {
+ kfree(msg);
+ err = PTR_ERR(msg->u.watch.vec);
+ goto out;
+ }
+
+ spin_lock(&watches_lock);
+ msg->u.watch.handle = find_watch(
+ msg->u.watch.vec[XS_WATCH_TOKEN]);
+ if (msg->u.watch.handle != NULL) {
+ spin_lock(&watch_events_lock);
+ list_add_tail(&msg->list, &watch_events);
+ wake_up(&watch_events_waitq);
+ spin_unlock(&watch_events_lock);
+ } else {
+ kfree(msg->u.watch.vec);
+ kfree(msg);
+ }
+ spin_unlock(&watches_lock);
+ } else {
+ msg->u.reply.body = body;
+ spin_lock(&xs_state.reply_lock);
+ list_add_tail(&msg->list, &xs_state.reply_list);
+ spin_unlock(&xs_state.reply_lock);
+ wake_up(&xs_state.reply_waitq);
+ }
+
+ out:
+ mutex_unlock(&xs_state.response_mutex);
+ return err;
+}
+
+static int xenbus_thread(void *unused)
+{
+ int err;
+
+ for (;;) {
+ err = process_msg();
+ if (err)
+ printk(KERN_WARNING "XENBUS error %d while reading "
+ "message\n", err);
+ if (kthread_should_stop())
+ break;
+ }
+
+ return 0;
+}
+
+int xs_init(void)
+{
+ int err;
+ struct task_struct *task;
+
+ INIT_LIST_HEAD(&xs_state.reply_list);
+ spin_lock_init(&xs_state.reply_lock);
+ init_waitqueue_head(&xs_state.reply_waitq);
+
+ mutex_init(&xs_state.request_mutex);
+ mutex_init(&xs_state.response_mutex);
+ init_rwsem(&xs_state.transaction_mutex);
+ init_rwsem(&xs_state.watch_mutex);
+
+ /* Initialize the shared memory rings to talk to xenstored */
+ err = xb_init_comms();
+ if (err)
+ return err;
+
+ task = kthread_run(xenwatch_thread, NULL, "xenwatch");
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+ xenwatch_pid = task->pid;
+
+ task = kthread_run(xenbus_thread, NULL, "xenbus");
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+
+ return 0;
+}
diff --git a/fs/Kconfig b/fs/Kconfig
index 613df554728..6a649902c5a 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -251,7 +251,7 @@ config JBD2
config JBD2_DEBUG
bool "JBD2 (ext4dev/ext4) debugging support"
- depends on JBD2
+ depends on JBD2 && DEBUG_FS
help
If you are using the ext4dev/ext4 journaled file system (or
potentially any other filesystem/device using JBD2), this option
@@ -260,10 +260,10 @@ config JBD2_DEBUG
By default, the debugging output will be turned off.
If you select Y here, then you will be able to turn on debugging
- with "echo N > /proc/sys/fs/jbd2-debug", where N is a number between
- 1 and 5. The higher the number, the more debugging output is
- generated. To turn debugging off again, do
- "echo 0 > /proc/sys/fs/jbd2-debug".
+ with "echo N > /sys/kernel/debug/jbd2/jbd2-debug", where N is a
+ number between 1 and 5. The higher the number, the more debugging
+ output is generated. To turn debugging off again, do
+ "echo 0 > /sys/kernel/debug/jbd2/jbd2-debug".
config FS_MBCACHE
# Meta block cache for Extended Attributes (ext2/ext3/ext4)
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 9de54ae48de..e53b4af52f1 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -517,7 +517,7 @@ do_more:
/*
* An HJ special. This is expensive...
*/
-#ifdef CONFIG_JBD_DEBUG
+#ifdef CONFIG_JBD2_DEBUG
jbd_unlock_bh_state(bitmap_bh);
{
struct buffer_head *debug_bh;
@@ -1597,7 +1597,7 @@ allocated:
performed_allocation = 1;
-#ifdef CONFIG_JBD_DEBUG
+#ifdef CONFIG_JBD2_DEBUG
{
struct buffer_head *debug_bh;
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index b9ce2412907..750c46f7d89 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -39,6 +39,7 @@
#include <linux/quotaops.h>
#include <linux/string.h>
#include <linux/slab.h>
+#include <linux/falloc.h>
#include <linux/ext4_fs_extents.h>
#include <asm/uaccess.h>
@@ -91,36 +92,6 @@ static void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb)
ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
}
-static int ext4_ext_check_header(const char *function, struct inode *inode,
- struct ext4_extent_header *eh)
-{
- const char *error_msg = NULL;
-
- if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
- error_msg = "invalid magic";
- goto corrupted;
- }
- if (unlikely(eh->eh_max == 0)) {
- error_msg = "invalid eh_max";
- goto corrupted;
- }
- if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
- error_msg = "invalid eh_entries";
- goto corrupted;
- }
- return 0;
-
-corrupted:
- ext4_error(inode->i_sb, function,
- "bad header in inode #%lu: %s - magic %x, "
- "entries %u, max %u, depth %u",
- inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic),
- le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
- le16_to_cpu(eh->eh_depth));
-
- return -EIO;
-}
-
static handle_t *ext4_ext_journal_restart(handle_t *handle, int needed)
{
int err;
@@ -269,6 +240,70 @@ static int ext4_ext_space_root_idx(struct inode *inode)
return size;
}
+static int
+ext4_ext_max_entries(struct inode *inode, int depth)
+{
+ int max;
+
+ if (depth == ext_depth(inode)) {
+ if (depth == 0)
+ max = ext4_ext_space_root(inode);
+ else
+ max = ext4_ext_space_root_idx(inode);
+ } else {
+ if (depth == 0)
+ max = ext4_ext_space_block(inode);
+ else
+ max = ext4_ext_space_block_idx(inode);
+ }
+
+ return max;
+}
+
+static int __ext4_ext_check_header(const char *function, struct inode *inode,
+ struct ext4_extent_header *eh,
+ int depth)
+{
+ const char *error_msg;
+ int max = 0;
+
+ if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
+ error_msg = "invalid magic";
+ goto corrupted;
+ }
+ if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
+ error_msg = "unexpected eh_depth";
+ goto corrupted;
+ }
+ if (unlikely(eh->eh_max == 0)) {
+ error_msg = "invalid eh_max";
+ goto corrupted;
+ }
+ max = ext4_ext_max_entries(inode, depth);
+ if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
+ error_msg = "too large eh_max";
+ goto corrupted;
+ }
+ if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
+ error_msg = "invalid eh_entries";
+ goto corrupted;
+ }
+ return 0;
+
+corrupted:
+ ext4_error(inode->i_sb, function,
+ "bad header in inode #%lu: %s - magic %x, "
+ "entries %u, max %u(%u), depth %u(%u)",
+ inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic),
+ le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
+ max, le16_to_cpu(eh->eh_depth), depth);
+
+ return -EIO;
+}
+
+#define ext4_ext_check_header(inode, eh, depth) \
+ __ext4_ext_check_header(__FUNCTION__, inode, eh, depth)
+
#ifdef EXT_DEBUG
static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
{
@@ -282,7 +317,7 @@ static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
} else if (path->p_ext) {
ext_debug(" %d:%d:%llu ",
le32_to_cpu(path->p_ext->ee_block),
- le16_to_cpu(path->p_ext->ee_len),
+ ext4_ext_get_actual_len(path->p_ext),
ext_pblock(path->p_ext));
} else
ext_debug(" []");
@@ -305,7 +340,7 @@ static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
ext_debug("%d:%d:%llu ", le32_to_cpu(ex->ee_block),
- le16_to_cpu(ex->ee_len), ext_pblock(ex));
+ ext4_ext_get_actual_len(ex), ext_pblock(ex));
}
ext_debug("\n");
}
@@ -329,6 +364,7 @@ static void ext4_ext_drop_refs(struct ext4_ext_path *path)
/*
* ext4_ext_binsearch_idx:
* binary search for the closest index of the given block
+ * the header must be checked before calling this
*/
static void
ext4_ext_binsearch_idx(struct inode *inode, struct ext4_ext_path *path, int block)
@@ -336,27 +372,25 @@ ext4_ext_binsearch_idx(struct inode *inode, struct ext4_ext_path *path, int bloc
struct ext4_extent_header *eh = path->p_hdr;
struct ext4_extent_idx *r, *l, *m;
- BUG_ON(eh->eh_magic != EXT4_EXT_MAGIC);
- BUG_ON(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max));
- BUG_ON(le16_to_cpu(eh->eh_entries) <= 0);
ext_debug("binsearch for %d(idx): ", block);
l = EXT_FIRST_INDEX(eh) + 1;
- r = EXT_FIRST_INDEX(eh) + le16_to_cpu(eh->eh_entries) - 1;
+ r = EXT_LAST_INDEX(eh);
while (l <= r) {
m = l + (r - l) / 2;
if (block < le32_to_cpu(m->ei_block))
r = m - 1;
else
l = m + 1;
- ext_debug("%p(%u):%p(%u):%p(%u) ", l, l->ei_block,
- m, m->ei_block, r, r->ei_block);
+ ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
+ m, le32_to_cpu(m->ei_block),
+ r, le32_to_cpu(r->ei_block));
}
path->p_idx = l - 1;
ext_debug(" -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block),
- idx_block(path->p_idx));
+ idx_pblock(path->p_idx));
#ifdef CHECK_BINSEARCH
{
@@ -388,6 +422,7 @@ ext4_ext_binsearch_idx(struct inode *inode, struct ext4_ext_path *path, int bloc
/*
* ext4_ext_binsearch:
* binary search for closest extent of the given block
+ * the header must be checked before calling this
*/
static void
ext4_ext_binsearch(struct inode *inode, struct ext4_ext_path *path, int block)
@@ -395,9 +430,6 @@ ext4_ext_binsearch(struct inode *inode, struct ext4_ext_path *path, int block)
struct ext4_extent_header *eh = path->p_hdr;
struct ext4_extent *r, *l, *m;
- BUG_ON(eh->eh_magic != EXT4_EXT_MAGIC);
- BUG_ON(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max));
-
if (eh->eh_entries == 0) {
/*
* this leaf is empty:
@@ -409,7 +441,7 @@ ext4_ext_binsearch(struct inode *inode, struct ext4_ext_path *path, int block)
ext_debug("binsearch for %d: ", block);
l = EXT_FIRST_EXTENT(eh) + 1;
- r = EXT_FIRST_EXTENT(eh) + le16_to_cpu(eh->eh_entries) - 1;
+ r = EXT_LAST_EXTENT(eh);
while (l <= r) {
m = l + (r - l) / 2;
@@ -417,15 +449,16 @@ ext4_ext_binsearch(struct inode *inode, struct ext4_ext_path *path, int block)
r = m - 1;
else
l = m + 1;
- ext_debug("%p(%u):%p(%u):%p(%u) ", l, l->ee_block,
- m, m->ee_block, r, r->ee_block);
+ ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
+ m, le32_to_cpu(m->ee_block),
+ r, le32_to_cpu(r->ee_block));
}
path->p_ext = l - 1;
ext_debug(" -> %d:%llu:%d ",
le32_to_cpu(path->p_ext->ee_block),
ext_pblock(path->p_ext),
- le16_to_cpu(path->p_ext->ee_len));
+ ext4_ext_get_actual_len(path->p_ext));
#ifdef CHECK_BINSEARCH
{
@@ -468,11 +501,10 @@ ext4_ext_find_extent(struct inode *inode, int block, struct ext4_ext_path *path)
short int depth, i, ppos = 0, alloc = 0;
eh = ext_inode_hdr(inode);
- BUG_ON(eh == NULL);
- if (ext4_ext_check_header(__FUNCTION__, inode, eh))
+ depth = ext_depth(inode);
+ if (ext4_ext_check_header(inode, eh, depth))
return ERR_PTR(-EIO);
- i = depth = ext_depth(inode);
/* account possible depth increase */
if (!path) {
@@ -484,10 +516,12 @@ ext4_ext_find_extent(struct inode *inode, int block, struct ext4_ext_path *path)
}
path[0].p_hdr = eh;
+ i = depth;
/* walk through the tree */
while (i) {
ext_debug("depth %d: num %d, max %d\n",
ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
+
ext4_ext_binsearch_idx(inode, path + ppos, block);
path[ppos].p_block = idx_pblock(path[ppos].p_idx);
path[ppos].p_depth = i;
@@ -504,7 +538,7 @@ ext4_ext_find_extent(struct inode *inode, int block, struct ext4_ext_path *path)
path[ppos].p_hdr = eh;
i--;
- if (ext4_ext_check_header(__FUNCTION__, inode, eh))
+ if (ext4_ext_check_header(inode, eh, i))
goto err;
}
@@ -513,9 +547,6 @@ ext4_ext_find_extent(struct inode *inode, int block, struct ext4_ext_path *path)
path[ppos].p_ext = NULL;
path[ppos].p_idx = NULL;
- if (ext4_ext_check_header(__FUNCTION__, inode, eh))
- goto err;
-
/* find extent */
ext4_ext_binsearch(inode, path + ppos, block);
@@ -553,7 +584,7 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
len = (len - 1) * sizeof(struct ext4_extent_idx);
len = len < 0 ? 0 : len;
- ext_debug("insert new index %d after: %d. "
+ ext_debug("insert new index %d after: %llu. "
"move %d from 0x%p to 0x%p\n",
logical, ptr, len,
(curp->p_idx + 1), (curp->p_idx + 2));
@@ -564,7 +595,7 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
/* insert before */
len = len * sizeof(struct ext4_extent_idx);
len = len < 0 ? 0 : len;
- ext_debug("insert new index %d before: %d. "
+ ext_debug("insert new index %d before: %llu. "
"move %d from 0x%p to 0x%p\n",
logical, ptr, len,
curp->p_idx, (curp->p_idx + 1));
@@ -686,7 +717,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
ext_debug("move %d:%llu:%d in new leaf %llu\n",
le32_to_cpu(path[depth].p_ext->ee_block),
ext_pblock(path[depth].p_ext),
- le16_to_cpu(path[depth].p_ext->ee_len),
+ ext4_ext_get_actual_len(path[depth].p_ext),
newblock);
/*memmove(ex++, path[depth].p_ext++,
sizeof(struct ext4_extent));
@@ -764,7 +795,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
BUG_ON(EXT_MAX_INDEX(path[i].p_hdr) !=
EXT_LAST_INDEX(path[i].p_hdr));
while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
- ext_debug("%d: move %d:%d in new index %llu\n", i,
+ ext_debug("%d: move %d:%llu in new index %llu\n", i,
le32_to_cpu(path[i].p_idx->ei_block),
idx_pblock(path[i].p_idx),
newblock);
@@ -893,8 +924,13 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
curp->p_hdr->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode));
curp->p_hdr->eh_entries = cpu_to_le16(1);
curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
- /* FIXME: it works, but actually path[0] can be index */
- curp->p_idx->ei_block = EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
+
+ if (path[0].p_hdr->eh_depth)
+ curp->p_idx->ei_block =
+ EXT_FIRST_INDEX(path[0].p_hdr)->ei_block;
+ else
+ curp->p_idx->ei_block =
+ EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
ext4_idx_store_pblock(curp->p_idx, newblock);
neh = ext_inode_hdr(inode);
@@ -1106,7 +1142,24 @@ static int
ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
struct ext4_extent *ex2)
{
- if (le32_to_cpu(ex1->ee_block) + le16_to_cpu(ex1->ee_len) !=
+ unsigned short ext1_ee_len, ext2_ee_len, max_len;
+
+ /*
+ * Make sure that either both extents are uninitialized, or
+ * both are _not_.
+ */
+ if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2))
+ return 0;
+
+ if (ext4_ext_is_uninitialized(ex1))
+ max_len = EXT_UNINIT_MAX_LEN;
+ else
+ max_len = EXT_INIT_MAX_LEN;
+
+ ext1_ee_len = ext4_ext_get_actual_len(ex1);
+ ext2_ee_len = ext4_ext_get_actual_len(ex2);
+
+ if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
le32_to_cpu(ex2->ee_block))
return 0;
@@ -1115,19 +1168,66 @@ ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
* as an RO_COMPAT feature, refuse to merge to extents if
* this can result in the top bit of ee_len being set.
*/
- if (le16_to_cpu(ex1->ee_len) + le16_to_cpu(ex2->ee_len) > EXT_MAX_LEN)
+ if (ext1_ee_len + ext2_ee_len > max_len)
return 0;
#ifdef AGGRESSIVE_TEST
if (le16_to_cpu(ex1->ee_len) >= 4)
return 0;
#endif
- if (ext_pblock(ex1) + le16_to_cpu(ex1->ee_len) == ext_pblock(ex2))
+ if (ext_pblock(ex1) + ext1_ee_len == ext_pblock(ex2))
return 1;
return 0;
}
/*
+ * This function tries to merge the "ex" extent to the next extent in the tree.
+ * It always tries to merge towards right. If you want to merge towards
+ * left, pass "ex - 1" as argument instead of "ex".
+ * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
+ * 1 if they got merged.
+ */
+int ext4_ext_try_to_merge(struct inode *inode,
+ struct ext4_ext_path *path,
+ struct ext4_extent *ex)
+{
+ struct ext4_extent_header *eh;
+ unsigned int depth, len;
+ int merge_done = 0;
+ int uninitialized = 0;
+
+ depth = ext_depth(inode);
+ BUG_ON(path[depth].p_hdr == NULL);
+ eh = path[depth].p_hdr;
+
+ while (ex < EXT_LAST_EXTENT(eh)) {
+ if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
+ break;
+ /* merge with next extent! */
+ if (ext4_ext_is_uninitialized(ex))
+ uninitialized = 1;
+ ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
+ + ext4_ext_get_actual_len(ex + 1));
+ if (uninitialized)
+ ext4_ext_mark_uninitialized(ex);
+
+ if (ex + 1 < EXT_LAST_EXTENT(eh)) {
+ len = (EXT_LAST_EXTENT(eh) - ex - 1)
+ * sizeof(struct ext4_extent);
+ memmove(ex + 1, ex + 2, len);
+ }
+ eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries) - 1);
+ merge_done = 1;
+ WARN_ON(eh->eh_entries == 0);
+ if (!eh->eh_entries)
+ ext4_error(inode->i_sb, "ext4_ext_try_to_merge",
+ "inode#%lu, eh->eh_entries = 0!", inode->i_ino);
+ }
+
+ return merge_done;
+}
+
+/*
* check if a portion of the "newext" extent overlaps with an
* existing extent.
*
@@ -1144,7 +1244,7 @@ unsigned int ext4_ext_check_overlap(struct inode *inode,
unsigned int ret = 0;
b1 = le32_to_cpu(newext->ee_block);
- len1 = le16_to_cpu(newext->ee_len);
+ len1 = ext4_ext_get_actual_len(newext);
depth = ext_depth(inode);
if (!path[depth].p_ext)
goto out;
@@ -1191,8 +1291,9 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
struct ext4_extent *nearex; /* nearest extent */
struct ext4_ext_path *npath = NULL;
int depth, len, err, next;
+ unsigned uninitialized = 0;
- BUG_ON(newext->ee_len == 0);
+ BUG_ON(ext4_ext_get_actual_len(newext) == 0);
depth = ext_depth(inode);
ex = path[depth].p_ext;
BUG_ON(path[depth].p_hdr == NULL);
@@ -1200,14 +1301,24 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
/* try to insert block into found extent and return */
if (ex && ext4_can_extents_be_merged(inode, ex, newext)) {
ext_debug("append %d block to %d:%d (from %llu)\n",
- le16_to_cpu(newext->ee_len),
+ ext4_ext_get_actual_len(newext),
le32_to_cpu(ex->ee_block),
- le16_to_cpu(ex->ee_len), ext_pblock(ex));
+ ext4_ext_get_actual_len(ex), ext_pblock(ex));
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
return err;
- ex->ee_len = cpu_to_le16(le16_to_cpu(ex->ee_len)
- + le16_to_cpu(newext->ee_len));
+
+ /*
+ * ext4_can_extents_be_merged should have checked that either
+ * both extents are uninitialized, or both aren't. Thus we
+ * need to check only one of them here.
+ */
+ if (ext4_ext_is_uninitialized(ex))
+ uninitialized = 1;
+ ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
+ + ext4_ext_get_actual_len(newext));
+ if (uninitialized)
+ ext4_ext_mark_uninitialized(ex);
eh = path[depth].p_hdr;
nearex = ex;
goto merge;
@@ -1263,7 +1374,7 @@ has_space:
ext_debug("first extent in the leaf: %d:%llu:%d\n",
le32_to_cpu(newext->ee_block),
ext_pblock(newext),
- le16_to_cpu(newext->ee_len));
+ ext4_ext_get_actual_len(newext));
path[depth].p_ext = EXT_FIRST_EXTENT(eh);
} else if (le32_to_cpu(newext->ee_block)
> le32_to_cpu(nearex->ee_block)) {
@@ -1276,7 +1387,7 @@ has_space:
"move %d from 0x%p to 0x%p\n",
le32_to_cpu(newext->ee_block),
ext_pblock(newext),
- le16_to_cpu(newext->ee_len),
+ ext4_ext_get_actual_len(newext),
nearex, len, nearex + 1, nearex + 2);
memmove(nearex + 2, nearex + 1, len);
}
@@ -1289,7 +1400,7 @@ has_space:
"move %d from 0x%p to 0x%p\n",
le32_to_cpu(newext->ee_block),
ext_pblock(newext),
- le16_to_cpu(newext->ee_len),
+ ext4_ext_get_actual_len(newext),
nearex, len, nearex + 1, nearex + 2);
memmove(nearex + 1, nearex, len);
path[depth].p_ext = nearex;
@@ -1304,20 +1415,7 @@ has_space:
merge:
/* try to merge extents to the right */
- while (nearex < EXT_LAST_EXTENT(eh)) {
- if (!ext4_can_extents_be_merged(inode, nearex, nearex + 1))
- break;
- /* merge with next extent! */
- nearex->ee_len = cpu_to_le16(le16_to_cpu(nearex->ee_len)
- + le16_to_cpu(nearex[1].ee_len));
- if (nearex + 1 < EXT_LAST_EXTENT(eh)) {
- len = (EXT_LAST_EXTENT(eh) - nearex - 1)
- * sizeof(struct ext4_extent);
- memmove(nearex + 1, nearex + 2, len);
- }
- eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)-1);
- BUG_ON(eh->eh_entries == 0);
- }
+ ext4_ext_try_to_merge(inode, path, nearex);
/* try to merge extents to the left */
@@ -1379,8 +1477,8 @@ int ext4_ext_walk_space(struct inode *inode, unsigned long block,
end = le32_to_cpu(ex->ee_block);
if (block + num < end)
end = block + num;
- } else if (block >=
- le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len)) {
+ } else if (block >= le32_to_cpu(ex->ee_block)
+ + ext4_ext_get_actual_len(ex)) {
/* need to allocate space after found extent */
start = block;
end = block + num;
@@ -1392,7 +1490,8 @@ int ext4_ext_walk_space(struct inode *inode, unsigned long block,
* by found extent
*/
start = block;
- end = le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len);
+ end = le32_to_cpu(ex->ee_block)
+ + ext4_ext_get_actual_len(ex);
if (block + num < end)
end = block + num;
exists = 1;
@@ -1408,7 +1507,7 @@ int ext4_ext_walk_space(struct inode *inode, unsigned long block,
cbex.ec_type = EXT4_EXT_CACHE_GAP;
} else {
cbex.ec_block = le32_to_cpu(ex->ee_block);
- cbex.ec_len = le16_to_cpu(ex->ee_len);
+ cbex.ec_len = ext4_ext_get_actual_len(ex);
cbex.ec_start = ext_pblock(ex);
cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
}
@@ -1481,15 +1580,15 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
ext_debug("cache gap(before): %lu [%lu:%lu]",
(unsigned long) block,
(unsigned long) le32_to_cpu(ex->ee_block),
- (unsigned long) le16_to_cpu(ex->ee_len));
+ (unsigned long) ext4_ext_get_actual_len(ex));
} else if (block >= le32_to_cpu(ex->ee_block)
- + le16_to_cpu(ex->ee_len)) {
+ + ext4_ext_get_actual_len(ex)) {
lblock = le32_to_cpu(ex->ee_block)
- + le16_to_cpu(ex->ee_len);
+ + ext4_ext_get_actual_len(ex);
len = ext4_ext_next_allocated_block(path);
ext_debug("cache gap(after): [%lu:%lu] %lu",
(unsigned long) le32_to_cpu(ex->ee_block),
- (unsigned long) le16_to_cpu(ex->ee_len),
+ (unsigned long) ext4_ext_get_actual_len(ex),
(unsigned long) block);
BUG_ON(len == lblock);
len = len - lblock;
@@ -1619,12 +1718,12 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
unsigned long from, unsigned long to)
{
struct buffer_head *bh;
+ unsigned short ee_len = ext4_ext_get_actual_len(ex);
int i;
#ifdef EXTENTS_STATS
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- unsigned short ee_len = le16_to_cpu(ex->ee_len);
spin_lock(&sbi->s_ext_stats_lock);
sbi->s_ext_blocks += ee_len;
sbi->s_ext_extents++;
@@ -1638,12 +1737,12 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
}
#endif
if (from >= le32_to_cpu(ex->ee_block)
- && to == le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - 1) {
+ && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
/* tail removal */
unsigned long num;
ext4_fsblk_t start;
- num = le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - from;
- start = ext_pblock(ex) + le16_to_cpu(ex->ee_len) - num;
+ num = le32_to_cpu(ex->ee_block) + ee_len - from;
+ start = ext_pblock(ex) + ee_len - num;
ext_debug("free last %lu blocks starting %llu\n", num, start);
for (i = 0; i < num; i++) {
bh = sb_find_get_block(inode->i_sb, start + i);
@@ -1651,12 +1750,12 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
}
ext4_free_blocks(handle, inode, start, num);
} else if (from == le32_to_cpu(ex->ee_block)
- && to <= le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - 1) {
+ && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
printk("strange request: removal %lu-%lu from %u:%u\n",
- from, to, le32_to_cpu(ex->ee_block), le16_to_cpu(ex->ee_len));
+ from, to, le32_to_cpu(ex->ee_block), ee_len);
} else {
printk("strange request: removal(2) %lu-%lu from %u:%u\n",
- from, to, le32_to_cpu(ex->ee_block), le16_to_cpu(ex->ee_len));
+ from, to, le32_to_cpu(ex->ee_block), ee_len);
}
return 0;
}
@@ -1671,21 +1770,23 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
unsigned a, b, block, num;
unsigned long ex_ee_block;
unsigned short ex_ee_len;
+ unsigned uninitialized = 0;
struct ext4_extent *ex;
+ /* the header must be checked already in ext4_ext_remove_space() */
ext_debug("truncate since %lu in leaf\n", start);
if (!path[depth].p_hdr)
path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
eh = path[depth].p_hdr;
BUG_ON(eh == NULL);
- BUG_ON(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max));
- BUG_ON(eh->eh_magic != EXT4_EXT_MAGIC);
/* find where to start removing */
ex = EXT_LAST_EXTENT(eh);
ex_ee_block = le32_to_cpu(ex->ee_block);
- ex_ee_len = le16_to_cpu(ex->ee_len);
+ if (ext4_ext_is_uninitialized(ex))
+ uninitialized = 1;
+ ex_ee_len = ext4_ext_get_actual_len(ex);
while (ex >= EXT_FIRST_EXTENT(eh) &&
ex_ee_block + ex_ee_len > start) {
@@ -1753,6 +1854,12 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
ex->ee_block = cpu_to_le32(block);
ex->ee_len = cpu_to_le16(num);
+ /*
+ * Do not mark uninitialized if all the blocks in the
+ * extent have been removed.
+ */
+ if (uninitialized && num)
+ ext4_ext_mark_uninitialized(ex);
err = ext4_ext_dirty(handle, inode, path + depth);
if (err)
@@ -1762,7 +1869,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
ext_pblock(ex));
ex--;
ex_ee_block = le32_to_cpu(ex->ee_block);
- ex_ee_len = le16_to_cpu(ex->ee_len);
+ ex_ee_len = ext4_ext_get_actual_len(ex);
}
if (correct_index && eh->eh_entries)
@@ -1825,7 +1932,7 @@ int ext4_ext_remove_space(struct inode *inode, unsigned long start)
return -ENOMEM;
}
path[0].p_hdr = ext_inode_hdr(inode);
- if (ext4_ext_check_header(__FUNCTION__, inode, path[0].p_hdr)) {
+ if (ext4_ext_check_header(inode, path[0].p_hdr, depth)) {
err = -EIO;
goto out;
}
@@ -1846,17 +1953,8 @@ int ext4_ext_remove_space(struct inode *inode, unsigned long start)
if (!path[i].p_hdr) {
ext_debug("initialize header\n");
path[i].p_hdr = ext_block_hdr(path[i].p_bh);
- if (ext4_ext_check_header(__FUNCTION__, inode,
- path[i].p_hdr)) {
- err = -EIO;
- goto out;
- }
}
- BUG_ON(le16_to_cpu(path[i].p_hdr->eh_entries)
- > le16_to_cpu(path[i].p_hdr->eh_max));
- BUG_ON(path[i].p_hdr->eh_magic != EXT4_EXT_MAGIC);
-
if (!path[i].p_idx) {
/* this level hasn't been touched yet */
path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
@@ -1873,17 +1971,27 @@ int ext4_ext_remove_space(struct inode *inode, unsigned long start)
i, EXT_FIRST_INDEX(path[i].p_hdr),
path[i].p_idx);
if (ext4_ext_more_to_rm(path + i)) {
+ struct buffer_head *bh;
/* go to the next level */
ext_debug("move to level %d (block %llu)\n",
i + 1, idx_pblock(path[i].p_idx));
memset(path + i + 1, 0, sizeof(*path));
- path[i+1].p_bh =
- sb_bread(sb, idx_pblock(path[i].p_idx));
- if (!path[i+1].p_bh) {
+ bh = sb_bread(sb, idx_pblock(path[i].p_idx));
+ if (!bh) {
/* should we reset i_size? */
err = -EIO;
break;
}
+ if (WARN_ON(i + 1 > depth)) {
+ err = -EIO;
+ break;
+ }
+ if (ext4_ext_check_header(inode, ext_block_hdr(bh),
+ depth - i - 1)) {
+ err = -EIO;
+ break;
+ }
+ path[i + 1].p_bh = bh;
/* save actual number of indexes since this
* number is changed at the next iteration */
@@ -1977,15 +2085,158 @@ void ext4_ext_release(struct super_block *sb)
#endif
}
+/*
+ * This function is called by ext4_ext_get_blocks() if someone tries to write
+ * to an uninitialized extent. It may result in splitting the uninitialized
+ * extent into multiple extents (upto three - one initialized and two
+ * uninitialized).
+ * There are three possibilities:
+ * a> There is no split required: Entire extent should be initialized
+ * b> Splits in two extents: Write is happening at either end of the extent
+ * c> Splits in three extents: Somone is writing in middle of the extent
+ */
+int ext4_ext_convert_to_initialized(handle_t *handle, struct inode *inode,
+ struct ext4_ext_path *path,
+ ext4_fsblk_t iblock,
+ unsigned long max_blocks)
+{
+ struct ext4_extent *ex, newex;
+ struct ext4_extent *ex1 = NULL;
+ struct ext4_extent *ex2 = NULL;
+ struct ext4_extent *ex3 = NULL;
+ struct ext4_extent_header *eh;
+ unsigned int allocated, ee_block, ee_len, depth;
+ ext4_fsblk_t newblock;
+ int err = 0;
+ int ret = 0;
+
+ depth = ext_depth(inode);
+ eh = path[depth].p_hdr;
+ ex = path[depth].p_ext;
+ ee_block = le32_to_cpu(ex->ee_block);
+ ee_len = ext4_ext_get_actual_len(ex);
+ allocated = ee_len - (iblock - ee_block);
+ newblock = iblock - ee_block + ext_pblock(ex);
+ ex2 = ex;
+
+ /* ex1: ee_block to iblock - 1 : uninitialized */
+ if (iblock > ee_block) {
+ ex1 = ex;
+ ex1->ee_len = cpu_to_le16(iblock - ee_block);
+ ext4_ext_mark_uninitialized(ex1);
+ ex2 = &newex;
+ }
+ /*
+ * for sanity, update the length of the ex2 extent before
+ * we insert ex3, if ex1 is NULL. This is to avoid temporary
+ * overlap of blocks.
+ */
+ if (!ex1 && allocated > max_blocks)
+ ex2->ee_len = cpu_to_le16(max_blocks);
+ /* ex3: to ee_block + ee_len : uninitialised */
+ if (allocated > max_blocks) {
+ unsigned int newdepth;
+ ex3 = &newex;
+ ex3->ee_block = cpu_to_le32(iblock + max_blocks);
+ ext4_ext_store_pblock(ex3, newblock + max_blocks);
+ ex3->ee_len = cpu_to_le16(allocated - max_blocks);
+ ext4_ext_mark_uninitialized(ex3);
+ err = ext4_ext_insert_extent(handle, inode, path, ex3);
+ if (err)
+ goto out;
+ /*
+ * The depth, and hence eh & ex might change
+ * as part of the insert above.
+ */
+ newdepth = ext_depth(inode);
+ if (newdepth != depth) {
+ depth = newdepth;
+ path = ext4_ext_find_extent(inode, iblock, NULL);
+ if (IS_ERR(path)) {
+ err = PTR_ERR(path);
+ path = NULL;
+ goto out;
+ }
+ eh = path[depth].p_hdr;
+ ex = path[depth].p_ext;
+ if (ex2 != &newex)
+ ex2 = ex;
+ }
+ allocated = max_blocks;
+ }
+ /*
+ * If there was a change of depth as part of the
+ * insertion of ex3 above, we need to update the length
+ * of the ex1 extent again here
+ */
+ if (ex1 && ex1 != ex) {
+ ex1 = ex;
+ ex1->ee_len = cpu_to_le16(iblock - ee_block);
+ ext4_ext_mark_uninitialized(ex1);
+ ex2 = &newex;
+ }
+ /* ex2: iblock to iblock + maxblocks-1 : initialised */
+ ex2->ee_block = cpu_to_le32(iblock);
+ ex2->ee_start = cpu_to_le32(newblock);
+ ext4_ext_store_pblock(ex2, newblock);
+ ex2->ee_len = cpu_to_le16(allocated);
+ if (ex2 != ex)
+ goto insert;
+ err = ext4_ext_get_access(handle, inode, path + depth);
+ if (err)
+ goto out;
+ /*
+ * New (initialized) extent starts from the first block
+ * in the current extent. i.e., ex2 == ex
+ * We have to see if it can be merged with the extent
+ * on the left.
+ */
+ if (ex2 > EXT_FIRST_EXTENT(eh)) {
+ /*
+ * To merge left, pass "ex2 - 1" to try_to_merge(),
+ * since it merges towards right _only_.
+ */
+ ret = ext4_ext_try_to_merge(inode, path, ex2 - 1);
+ if (ret) {
+ err = ext4_ext_correct_indexes(handle, inode, path);
+ if (err)
+ goto out;
+ depth = ext_depth(inode);
+ ex2--;
+ }
+ }
+ /*
+ * Try to Merge towards right. This might be required
+ * only when the whole extent is being written to.
+ * i.e. ex2 == ex and ex3 == NULL.
+ */
+ if (!ex3) {
+ ret = ext4_ext_try_to_merge(inode, path, ex2);
+ if (ret) {
+ err = ext4_ext_correct_indexes(handle, inode, path);
+ if (err)
+ goto out;
+ }
+ }
+ /* Mark modified extent as dirty */
+ err = ext4_ext_dirty(handle, inode, path + depth);
+ goto out;
+insert:
+ err = ext4_ext_insert_extent(handle, inode, path, &newex);
+out:
+ return err ? err : allocated;
+}
+
int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
ext4_fsblk_t iblock,
unsigned long max_blocks, struct buffer_head *bh_result,
int create, int extend_disksize)
{
struct ext4_ext_path *path = NULL;
+ struct ext4_extent_header *eh;
struct ext4_extent newex, *ex;
ext4_fsblk_t goal, newblock;
- int err = 0, depth;
+ int err = 0, depth, ret;
unsigned long allocated = 0;
__clear_bit(BH_New, &bh_result->b_state);
@@ -1998,8 +2249,10 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
if (goal) {
if (goal == EXT4_EXT_CACHE_GAP) {
if (!create) {
- /* block isn't allocated yet and
- * user doesn't want to allocate it */
+ /*
+ * block isn't allocated yet and
+ * user doesn't want to allocate it
+ */
goto out2;
}
/* we should allocate requested block */
@@ -2033,21 +2286,19 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
* this is why assert can't be put in ext4_ext_find_extent()
*/
BUG_ON(path[depth].p_ext == NULL && depth != 0);
+ eh = path[depth].p_hdr;
ex = path[depth].p_ext;
if (ex) {
unsigned long ee_block = le32_to_cpu(ex->ee_block);
ext4_fsblk_t ee_start = ext_pblock(ex);
- unsigned short ee_len = le16_to_cpu(ex->ee_len);
+ unsigned short ee_len;
/*
- * Allow future support for preallocated extents to be added
- * as an RO_COMPAT feature:
* Uninitialized extents are treated as holes, except that
- * we avoid (fail) allocating new blocks during a write.
+ * we split out initialized portions during a write.
*/
- if (ee_len > EXT_MAX_LEN)
- goto out2;
+ ee_len = ext4_ext_get_actual_len(ex);
/* if found extent covers block, simply return it */
if (iblock >= ee_block && iblock < ee_block + ee_len) {
newblock = iblock - ee_block + ee_start;
@@ -2055,9 +2306,27 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
allocated = ee_len - (iblock - ee_block);
ext_debug("%d fit into %lu:%d -> %llu\n", (int) iblock,
ee_block, ee_len, newblock);
- ext4_ext_put_in_cache(inode, ee_block, ee_len,
- ee_start, EXT4_EXT_CACHE_EXTENT);
- goto out;
+
+ /* Do not put uninitialized extent in the cache */
+ if (!ext4_ext_is_uninitialized(ex)) {
+ ext4_ext_put_in_cache(inode, ee_block,
+ ee_len, ee_start,
+ EXT4_EXT_CACHE_EXTENT);
+ goto out;
+ }
+ if (create == EXT4_CREATE_UNINITIALIZED_EXT)
+ goto out;
+ if (!create)
+ goto out2;
+
+ ret = ext4_ext_convert_to_initialized(handle, inode,
+ path, iblock,
+ max_blocks);
+ if (ret <= 0)
+ goto out2;
+ else
+ allocated = ret;
+ goto outnew;
}
}
@@ -2066,8 +2335,10 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
* we couldn't try to create block if create flag is zero
*/
if (!create) {
- /* put just found gap into cache to speed up
- * subsequent requests */
+ /*
+ * put just found gap into cache to speed up
+ * subsequent requests
+ */
ext4_ext_put_gap_in_cache(inode, path, iblock);
goto out2;
}
@@ -2081,6 +2352,19 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
/* allocate new block */
goal = ext4_ext_find_goal(inode, path, iblock);
+ /*
+ * See if request is beyond maximum number of blocks we can have in
+ * a single extent. For an initialized extent this limit is
+ * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
+ * EXT_UNINIT_MAX_LEN.
+ */
+ if (max_blocks > EXT_INIT_MAX_LEN &&
+ create != EXT4_CREATE_UNINITIALIZED_EXT)
+ max_blocks = EXT_INIT_MAX_LEN;
+ else if (max_blocks > EXT_UNINIT_MAX_LEN &&
+ create == EXT4_CREATE_UNINITIALIZED_EXT)
+ max_blocks = EXT_UNINIT_MAX_LEN;
+
/* Check if we can really insert (iblock)::(iblock+max_blocks) extent */
newex.ee_block = cpu_to_le32(iblock);
newex.ee_len = cpu_to_le16(max_blocks);
@@ -2098,6 +2382,8 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
/* try to insert new extent into found leaf and return */
ext4_ext_store_pblock(&newex, newblock);
newex.ee_len = cpu_to_le16(allocated);
+ if (create == EXT4_CREATE_UNINITIALIZED_EXT) /* Mark uninitialized */
+ ext4_ext_mark_uninitialized(&newex);
err = ext4_ext_insert_extent(handle, inode, path, &newex);
if (err) {
/* free data blocks we just allocated */
@@ -2111,10 +2397,13 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
/* previous routine could use block we allocated */
newblock = ext_pblock(&newex);
+outnew:
__set_bit(BH_New, &bh_result->b_state);
- ext4_ext_put_in_cache(inode, iblock, allocated, newblock,
- EXT4_EXT_CACHE_EXTENT);
+ /* Cache only when it is _not_ an uninitialized extent */
+ if (create != EXT4_CREATE_UNINITIALIZED_EXT)
+ ext4_ext_put_in_cache(inode, iblock, allocated, newblock,
+ EXT4_EXT_CACHE_EXTENT);
out:
if (allocated > max_blocks)
allocated = max_blocks;
@@ -2178,7 +2467,8 @@ void ext4_ext_truncate(struct inode * inode, struct page *page)
err = ext4_ext_remove_space(inode, last_block);
/* In a multi-transaction truncate, we only make the final
- * transaction synchronous. */
+ * transaction synchronous.
+ */
if (IS_SYNC(inode))
handle->h_sync = 1;
@@ -2217,3 +2507,127 @@ int ext4_ext_writepage_trans_blocks(struct inode *inode, int num)
return needed;
}
+
+/*
+ * preallocate space for a file. This implements ext4's fallocate inode
+ * operation, which gets called from sys_fallocate system call.
+ * For block-mapped files, posix_fallocate should fall back to the method
+ * of writing zeroes to the required new blocks (the same behavior which is
+ * expected for file systems which do not support fallocate() system call).
+ */
+long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
+{
+ handle_t *handle;
+ ext4_fsblk_t block, max_blocks;
+ ext4_fsblk_t nblocks = 0;
+ int ret = 0;
+ int ret2 = 0;
+ int retries = 0;
+ struct buffer_head map_bh;
+ unsigned int credits, blkbits = inode->i_blkbits;
+
+ /*
+ * currently supporting (pre)allocate mode for extent-based
+ * files _only_
+ */
+ if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
+ return -EOPNOTSUPP;
+
+ /* preallocation to directories is currently not supported */
+ if (S_ISDIR(inode->i_mode))
+ return -ENODEV;
+
+ block = offset >> blkbits;
+ max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
+ - block;
+
+ /*
+ * credits to insert 1 extent into extent tree + buffers to be able to
+ * modify 1 super block, 1 block bitmap and 1 group descriptor.
+ */
+ credits = EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + 3;
+retry:
+ while (ret >= 0 && ret < max_blocks) {
+ block = block + ret;
+ max_blocks = max_blocks - ret;
+ handle = ext4_journal_start(inode, credits);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ break;
+ }
+
+ ret = ext4_ext_get_blocks(handle, inode, block,
+ max_blocks, &map_bh,
+ EXT4_CREATE_UNINITIALIZED_EXT, 0);
+ WARN_ON(!ret);
+ if (!ret) {
+ ext4_error(inode->i_sb, "ext4_fallocate",
+ "ext4_ext_get_blocks returned 0! inode#%lu"
+ ", block=%llu, max_blocks=%llu",
+ inode->i_ino, block, max_blocks);
+ ret = -EIO;
+ ext4_mark_inode_dirty(handle, inode);
+ ret2 = ext4_journal_stop(handle);
+ break;
+ }
+ if (ret > 0) {
+ /* check wrap through sign-bit/zero here */
+ if ((block + ret) < 0 || (block + ret) < block) {
+ ret = -EIO;
+ ext4_mark_inode_dirty(handle, inode);
+ ret2 = ext4_journal_stop(handle);
+ break;
+ }
+ if (buffer_new(&map_bh) && ((block + ret) >
+ (EXT4_BLOCK_ALIGN(i_size_read(inode), blkbits)
+ >> blkbits)))
+ nblocks = nblocks + ret;
+ }
+
+ /* Update ctime if new blocks get allocated */
+ if (nblocks) {
+ struct timespec now;
+
+ now = current_fs_time(inode->i_sb);
+ if (!timespec_equal(&inode->i_ctime, &now))
+ inode->i_ctime = now;
+ }
+
+ ext4_mark_inode_dirty(handle, inode);
+ ret2 = ext4_journal_stop(handle);
+ if (ret2)
+ break;
+ }
+
+ if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
+ goto retry;
+
+ /*
+ * Time to update the file size.
+ * Update only when preallocation was requested beyond the file size.
+ */
+ if (!(mode & FALLOC_FL_KEEP_SIZE) &&
+ (offset + len) > i_size_read(inode)) {
+ if (ret > 0) {
+ /*
+ * if no error, we assume preallocation succeeded
+ * completely
+ */
+ mutex_lock(&inode->i_mutex);
+ i_size_write(inode, offset + len);
+ EXT4_I(inode)->i_disksize = i_size_read(inode);
+ mutex_unlock(&inode->i_mutex);
+ } else if (ret < 0 && nblocks) {
+ /* Handle partial allocation scenario */
+ loff_t newsize;
+
+ mutex_lock(&inode->i_mutex);
+ newsize = (nblocks << blkbits) + i_size_read(inode);
+ i_size_write(inode, EXT4_BLOCK_ALIGN(newsize, blkbits));
+ EXT4_I(inode)->i_disksize = i_size_read(inode);
+ mutex_unlock(&inode->i_mutex);
+ }
+ }
+
+ return ret > 0 ? ret2 : ret;
+}
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index d4c8186aed6..1a81cd66d63 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -134,5 +134,6 @@ const struct inode_operations ext4_file_inode_operations = {
.removexattr = generic_removexattr,
#endif
.permission = ext4_permission,
+ .fallocate = ext4_fallocate,
};
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index c88b439ba5c..427f83066a0 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -563,7 +563,8 @@ got:
inode->i_ino = ino;
/* This is the optimal IO size (for stat), not the fs block size */
inode->i_blocks = 0;
- inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
+ inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =
+ ext4_current_time(inode);
memset(ei->i_data, 0, sizeof(ei->i_data));
ei->i_dir_start_lookup = 0;
@@ -595,9 +596,8 @@ got:
spin_unlock(&sbi->s_next_gen_lock);
ei->i_state = EXT4_STATE_NEW;
- ei->i_extra_isize =
- (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) ?
- sizeof(struct ext4_inode) - EXT4_GOOD_OLD_INODE_SIZE : 0;
+
+ ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
ret = inode;
if(DQUOT_ALLOC_INODE(inode)) {
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 8416fa28c42..de26c25d6a1 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -726,7 +726,7 @@ static int ext4_splice_branch(handle_t *handle, struct inode *inode,
/* We are done with atomic stuff, now do the rest of housekeeping */
- inode->i_ctime = CURRENT_TIME_SEC;
+ inode->i_ctime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode);
/* had we spliced it onto indirect block? */
@@ -1766,7 +1766,6 @@ int ext4_block_truncate_page(handle_t *handle, struct page *page,
struct inode *inode = mapping->host;
struct buffer_head *bh;
int err = 0;
- void *kaddr;
blocksize = inode->i_sb->s_blocksize;
length = blocksize - (offset & (blocksize - 1));
@@ -1778,10 +1777,7 @@ int ext4_block_truncate_page(handle_t *handle, struct page *page,
*/
if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
ext4_should_writeback_data(inode) && PageUptodate(page)) {
- kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + offset, 0, length);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
+ zero_user_page(page, offset, length, KM_USER0);
set_page_dirty(page);
goto unlock;
}
@@ -1834,10 +1830,7 @@ int ext4_block_truncate_page(handle_t *handle, struct page *page,
goto unlock;
}
- kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + offset, 0, length);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
+ zero_user_page(page, offset, length, KM_USER0);
BUFFER_TRACE(bh, "zeroed end of block");
@@ -2375,7 +2368,7 @@ do_indirects:
ext4_discard_reservation(inode);
mutex_unlock(&ei->truncate_mutex);
- inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
+ inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode);
/*
@@ -2583,6 +2576,25 @@ void ext4_set_inode_flags(struct inode *inode)
inode->i_flags |= S_DIRSYNC;
}
+/* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
+void ext4_get_inode_flags(struct ext4_inode_info *ei)
+{
+ unsigned int flags = ei->vfs_inode.i_flags;
+
+ ei->i_flags &= ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
+ EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|EXT4_DIRSYNC_FL);
+ if (flags & S_SYNC)
+ ei->i_flags |= EXT4_SYNC_FL;
+ if (flags & S_APPEND)
+ ei->i_flags |= EXT4_APPEND_FL;
+ if (flags & S_IMMUTABLE)
+ ei->i_flags |= EXT4_IMMUTABLE_FL;
+ if (flags & S_NOATIME)
+ ei->i_flags |= EXT4_NOATIME_FL;
+ if (flags & S_DIRSYNC)
+ ei->i_flags |= EXT4_DIRSYNC_FL;
+}
+
void ext4_read_inode(struct inode * inode)
{
struct ext4_iloc iloc;
@@ -2610,10 +2622,6 @@ void ext4_read_inode(struct inode * inode)
}
inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
inode->i_size = le32_to_cpu(raw_inode->i_size);
- inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
- inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime);
- inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
- inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0;
ei->i_state = 0;
ei->i_dir_start_lookup = 0;
@@ -2691,6 +2699,11 @@ void ext4_read_inode(struct inode * inode)
} else
ei->i_extra_isize = 0;
+ EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
+ EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
+ EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
+ EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
+
if (S_ISREG(inode->i_mode)) {
inode->i_op = &ext4_file_inode_operations;
inode->i_fop = &ext4_file_operations;
@@ -2744,6 +2757,7 @@ static int ext4_do_update_inode(handle_t *handle,
if (ei->i_state & EXT4_STATE_NEW)
memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
+ ext4_get_inode_flags(ei);
raw_inode->i_mode = cpu_to_le16(inode->i_mode);
if(!(test_opt(inode->i_sb, NO_UID32))) {
raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
@@ -2771,9 +2785,12 @@ static int ext4_do_update_inode(handle_t *handle,
}
raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
raw_inode->i_size = cpu_to_le32(ei->i_disksize);
- raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
- raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
- raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
+
+ EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
+ EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
+ EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
+ EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
+
raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
raw_inode->i_flags = cpu_to_le32(ei->i_flags);
@@ -3082,6 +3099,39 @@ ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
}
/*
+ * Expand an inode by new_extra_isize bytes.
+ * Returns 0 on success or negative error number on failure.
+ */
+int ext4_expand_extra_isize(struct inode *inode, unsigned int new_extra_isize,
+ struct ext4_iloc iloc, handle_t *handle)
+{
+ struct ext4_inode *raw_inode;
+ struct ext4_xattr_ibody_header *header;
+ struct ext4_xattr_entry *entry;
+
+ if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
+ return 0;
+
+ raw_inode = ext4_raw_inode(&iloc);
+
+ header = IHDR(inode, raw_inode);
+ entry = IFIRST(header);
+
+ /* No extended attributes present */
+ if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR) ||
+ header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
+ memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
+ new_extra_isize);
+ EXT4_I(inode)->i_extra_isize = new_extra_isize;
+ return 0;
+ }
+
+ /* try to expand with EAs present */
+ return ext4_expand_extra_isize_ea(inode, new_extra_isize,
+ raw_inode, handle);
+}
+
+/*
* What we do here is to mark the in-core inode as clean with respect to inode
* dirtiness (it may still be data-dirty).
* This means that the in-core inode may be reaped by prune_icache
@@ -3105,10 +3155,38 @@ ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
{
struct ext4_iloc iloc;
- int err;
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ static unsigned int mnt_count;
+ int err, ret;
might_sleep();
err = ext4_reserve_inode_write(handle, inode, &iloc);
+ if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
+ !(EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND)) {
+ /*
+ * We need extra buffer credits since we may write into EA block
+ * with this same handle. If journal_extend fails, then it will
+ * only result in a minor loss of functionality for that inode.
+ * If this is felt to be critical, then e2fsck should be run to
+ * force a large enough s_min_extra_isize.
+ */
+ if ((jbd2_journal_extend(handle,
+ EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
+ ret = ext4_expand_extra_isize(inode,
+ sbi->s_want_extra_isize,
+ iloc, handle);
+ if (ret) {
+ EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND;
+ if (mnt_count != sbi->s_es->s_mnt_count) {
+ ext4_warning(inode->i_sb, __FUNCTION__,
+ "Unable to expand inode %lu. Delete"
+ " some EAs or run e2fsck.",
+ inode->i_ino);
+ mnt_count = sbi->s_es->s_mnt_count;
+ }
+ }
+ }
+ }
if (!err)
err = ext4_mark_iloc_dirty(handle, inode, &iloc);
return err;
@@ -3197,7 +3275,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
*/
journal = EXT4_JOURNAL(inode);
- if (is_journal_aborted(journal) || IS_RDONLY(inode))
+ if (is_journal_aborted(journal))
return -EROFS;
jbd2_journal_lock_updates(journal);
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 7b4aa4543c8..c04c7ccba9e 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -28,6 +28,7 @@ int ext4_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
switch (cmd) {
case EXT4_IOC_GETFLAGS:
+ ext4_get_inode_flags(ei);
flags = ei->i_flags & EXT4_FL_USER_VISIBLE;
return put_user(flags, (int __user *) arg);
case EXT4_IOC_SETFLAGS: {
@@ -96,7 +97,7 @@ int ext4_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
ei->i_flags = flags;
ext4_set_inode_flags(inode);
- inode->i_ctime = CURRENT_TIME_SEC;
+ inode->i_ctime = ext4_current_time(inode);
err = ext4_mark_iloc_dirty(handle, inode, &iloc);
flags_err:
@@ -133,14 +134,14 @@ flags_err:
return PTR_ERR(handle);
err = ext4_reserve_inode_write(handle, inode, &iloc);
if (err == 0) {
- inode->i_ctime = CURRENT_TIME_SEC;
+ inode->i_ctime = ext4_current_time(inode);
inode->i_generation = generation;
err = ext4_mark_iloc_dirty(handle, inode, &iloc);
}
ext4_journal_stop(handle);
return err;
}
-#ifdef CONFIG_JBD_DEBUG
+#ifdef CONFIG_JBD2_DEBUG
case EXT4_IOC_WAIT_FOR_READONLY:
/*
* This is racy - by the time we're woken up and running,
@@ -282,7 +283,7 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case EXT4_IOC32_SETVERSION_OLD:
cmd = EXT4_IOC_SETVERSION_OLD;
break;
-#ifdef CONFIG_JBD_DEBUG
+#ifdef CONFIG_JBD2_DEBUG
case EXT4_IOC32_WAIT_FOR_READONLY:
cmd = EXT4_IOC_WAIT_FOR_READONLY;
break;
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 2de339dd755..da224974af7 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1295,7 +1295,7 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
* happen is that the times are slightly out of date
* and/or different from the directory change time.
*/
- dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
+ dir->i_mtime = dir->i_ctime = ext4_current_time(dir);
ext4_update_dx_flag(dir);
dir->i_version++;
ext4_mark_inode_dirty(handle, dir);
@@ -1629,6 +1629,35 @@ static int ext4_delete_entry (handle_t *handle,
return -ENOENT;
}
+/*
+ * DIR_NLINK feature is set if 1) nlinks > EXT4_LINK_MAX or 2) nlinks == 2,
+ * since this indicates that nlinks count was previously 1.
+ */
+static void ext4_inc_count(handle_t *handle, struct inode *inode)
+{
+ inc_nlink(inode);
+ if (is_dx(inode) && inode->i_nlink > 1) {
+ /* limit is 16-bit i_links_count */
+ if (inode->i_nlink >= EXT4_LINK_MAX || inode->i_nlink == 2) {
+ inode->i_nlink = 1;
+ EXT4_SET_RO_COMPAT_FEATURE(inode->i_sb,
+ EXT4_FEATURE_RO_COMPAT_DIR_NLINK);
+ }
+ }
+}
+
+/*
+ * If a directory had nlink == 1, then we should let it be 1. This indicates
+ * directory has >EXT4_LINK_MAX subdirs.
+ */
+static void ext4_dec_count(handle_t *handle, struct inode *inode)
+{
+ drop_nlink(inode);
+ if (S_ISDIR(inode->i_mode) && inode->i_nlink == 0)
+ inc_nlink(inode);
+}
+
+
static int ext4_add_nondir(handle_t *handle,
struct dentry *dentry, struct inode *inode)
{
@@ -1725,7 +1754,7 @@ static int ext4_mkdir(struct inode * dir, struct dentry * dentry, int mode)
struct ext4_dir_entry_2 * de;
int err, retries = 0;
- if (dir->i_nlink >= EXT4_LINK_MAX)
+ if (EXT4_DIR_LINK_MAX(dir))
return -EMLINK;
retry:
@@ -1748,7 +1777,7 @@ retry:
inode->i_size = EXT4_I(inode)->i_disksize = inode->i_sb->s_blocksize;
dir_block = ext4_bread (handle, inode, 0, 1, &err);
if (!dir_block) {
- drop_nlink(inode); /* is this nlink == 0? */
+ ext4_dec_count(handle, inode); /* is this nlink == 0? */
ext4_mark_inode_dirty(handle, inode);
iput (inode);
goto out_stop;
@@ -1780,7 +1809,7 @@ retry:
iput (inode);
goto out_stop;
}
- inc_nlink(dir);
+ ext4_inc_count(handle, dir);
ext4_update_dx_flag(dir);
ext4_mark_inode_dirty(handle, dir);
d_instantiate(dentry, inode);
@@ -2045,9 +2074,9 @@ static int ext4_rmdir (struct inode * dir, struct dentry *dentry)
retval = ext4_delete_entry(handle, dir, de, bh);
if (retval)
goto end_rmdir;
- if (inode->i_nlink != 2)
+ if (!EXT4_DIR_LINK_EMPTY(inode))
ext4_warning (inode->i_sb, "ext4_rmdir",
- "empty directory has nlink!=2 (%d)",
+ "empty directory has too many links (%d)",
inode->i_nlink);
inode->i_version++;
clear_nlink(inode);
@@ -2056,9 +2085,9 @@ static int ext4_rmdir (struct inode * dir, struct dentry *dentry)
* recovery. */
inode->i_size = 0;
ext4_orphan_add(handle, inode);
- inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
+ inode->i_ctime = dir->i_ctime = dir->i_mtime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode);
- drop_nlink(dir);
+ ext4_dec_count(handle, dir);
ext4_update_dx_flag(dir);
ext4_mark_inode_dirty(handle, dir);
@@ -2106,13 +2135,13 @@ static int ext4_unlink(struct inode * dir, struct dentry *dentry)
retval = ext4_delete_entry(handle, dir, de, bh);
if (retval)
goto end_unlink;
- dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
+ dir->i_ctime = dir->i_mtime = ext4_current_time(dir);
ext4_update_dx_flag(dir);
ext4_mark_inode_dirty(handle, dir);
- drop_nlink(inode);
+ ext4_dec_count(handle, inode);
if (!inode->i_nlink)
ext4_orphan_add(handle, inode);
- inode->i_ctime = dir->i_ctime;
+ inode->i_ctime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode);
retval = 0;
@@ -2159,7 +2188,7 @@ retry:
err = __page_symlink(inode, symname, l,
mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
if (err) {
- drop_nlink(inode);
+ ext4_dec_count(handle, inode);
ext4_mark_inode_dirty(handle, inode);
iput (inode);
goto out_stop;
@@ -2185,8 +2214,9 @@ static int ext4_link (struct dentry * old_dentry,
struct inode *inode = old_dentry->d_inode;
int err, retries = 0;
- if (inode->i_nlink >= EXT4_LINK_MAX)
+ if (EXT4_DIR_LINK_MAX(inode))
return -EMLINK;
+
/*
* Return -ENOENT if we've raced with unlink and i_nlink is 0. Doing
* otherwise has the potential to corrupt the orphan inode list.
@@ -2203,8 +2233,8 @@ retry:
if (IS_DIRSYNC(dir))
handle->h_sync = 1;
- inode->i_ctime = CURRENT_TIME_SEC;
- inc_nlink(inode);
+ inode->i_ctime = ext4_current_time(inode);
+ ext4_inc_count(handle, inode);
atomic_inc(&inode->i_count);
err = ext4_add_nondir(handle, dentry, inode);
@@ -2305,7 +2335,7 @@ static int ext4_rename (struct inode * old_dir, struct dentry *old_dentry,
* Like most other Unix systems, set the ctime for inodes on a
* rename.
*/
- old_inode->i_ctime = CURRENT_TIME_SEC;
+ old_inode->i_ctime = ext4_current_time(old_inode);
ext4_mark_inode_dirty(handle, old_inode);
/*
@@ -2337,10 +2367,10 @@ static int ext4_rename (struct inode * old_dir, struct dentry *old_dentry,
}
if (new_inode) {
- drop_nlink(new_inode);
- new_inode->i_ctime = CURRENT_TIME_SEC;
+ ext4_dec_count(handle, new_inode);
+ new_inode->i_ctime = ext4_current_time(new_inode);
}
- old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME_SEC;
+ old_dir->i_ctime = old_dir->i_mtime = ext4_current_time(old_dir);
ext4_update_dx_flag(old_dir);
if (dir_bh) {
BUFFER_TRACE(dir_bh, "get_write_access");
@@ -2348,11 +2378,13 @@ static int ext4_rename (struct inode * old_dir, struct dentry *old_dentry,
PARENT_INO(dir_bh->b_data) = cpu_to_le32(new_dir->i_ino);
BUFFER_TRACE(dir_bh, "call ext4_journal_dirty_metadata");
ext4_journal_dirty_metadata(handle, dir_bh);
- drop_nlink(old_dir);
+ ext4_dec_count(handle, old_dir);
if (new_inode) {
- drop_nlink(new_inode);
+ /* checked empty_dir above, can't have another parent,
+ * ext3_dec_count() won't work for many-linked dirs */
+ new_inode->i_nlink = 0;
} else {
- inc_nlink(new_dir);
+ ext4_inc_count(handle, new_dir);
ext4_update_dx_flag(new_dir);
ext4_mark_inode_dirty(handle, new_dir);
}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index b806e689c4a..6dcbb28dc06 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -36,6 +36,7 @@
#include <linux/namei.h>
#include <linux/quotaops.h>
#include <linux/seq_file.h>
+#include <linux/log2.h>
#include <asm/uaccess.h>
@@ -734,7 +735,7 @@ enum {
Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_quota, Opt_noquota,
Opt_ignore, Opt_barrier, Opt_err, Opt_resize, Opt_usrquota,
- Opt_grpquota, Opt_extents,
+ Opt_grpquota, Opt_extents, Opt_noextents,
};
static match_table_t tokens = {
@@ -785,6 +786,7 @@ static match_table_t tokens = {
{Opt_usrquota, "usrquota"},
{Opt_barrier, "barrier=%u"},
{Opt_extents, "extents"},
+ {Opt_noextents, "noextents"},
{Opt_err, NULL},
{Opt_resize, "resize"},
};
@@ -1120,6 +1122,9 @@ clear_qf_name:
case Opt_extents:
set_opt (sbi->s_mount_opt, EXTENTS);
break;
+ case Opt_noextents:
+ clear_opt (sbi->s_mount_opt, EXTENTS);
+ break;
default:
printk (KERN_ERR
"EXT4-fs: Unrecognized mount option \"%s\" "
@@ -1551,6 +1556,12 @@ static int ext4_fill_super (struct super_block *sb, void *data, int silent)
set_opt(sbi->s_mount_opt, RESERVATION);
+ /*
+ * turn on extents feature by default in ext4 filesystem
+ * User -o noextents to turn it off
+ */
+ set_opt(sbi->s_mount_opt, EXTENTS);
+
if (!parse_options ((char *) data, sb, &journal_inum, &journal_devnum,
NULL, 0))
goto failed_mount;
@@ -1634,13 +1645,15 @@ static int ext4_fill_super (struct super_block *sb, void *data, int silent)
sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
- (sbi->s_inode_size & (sbi->s_inode_size - 1)) ||
+ (!is_power_of_2(sbi->s_inode_size)) ||
(sbi->s_inode_size > blocksize)) {
printk (KERN_ERR
"EXT4-fs: unsupported inode size: %d\n",
sbi->s_inode_size);
goto failed_mount;
}
+ if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE)
+ sb->s_time_gran = 1 << (EXT4_EPOCH_BITS - 2);
}
sbi->s_frag_size = EXT4_MIN_FRAG_SIZE <<
le32_to_cpu(es->s_log_frag_size);
@@ -1803,6 +1816,13 @@ static int ext4_fill_super (struct super_block *sb, void *data, int silent)
goto failed_mount3;
}
+ if (ext4_blocks_count(es) > 0xffffffffULL &&
+ !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
+ JBD2_FEATURE_INCOMPAT_64BIT)) {
+ printk(KERN_ERR "ext4: Failed to set 64-bit journal feature\n");
+ goto failed_mount4;
+ }
+
/* We have now updated the journal if required, so we can
* validate the data journaling mode. */
switch (test_opt(sb, DATA_FLAGS)) {
@@ -1857,6 +1877,32 @@ static int ext4_fill_super (struct super_block *sb, void *data, int silent)
}
ext4_setup_super (sb, es, sb->s_flags & MS_RDONLY);
+
+ /* determine the minimum size of new large inodes, if present */
+ if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) {
+ sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
+ EXT4_GOOD_OLD_INODE_SIZE;
+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+ EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE)) {
+ if (sbi->s_want_extra_isize <
+ le16_to_cpu(es->s_want_extra_isize))
+ sbi->s_want_extra_isize =
+ le16_to_cpu(es->s_want_extra_isize);
+ if (sbi->s_want_extra_isize <
+ le16_to_cpu(es->s_min_extra_isize))
+ sbi->s_want_extra_isize =
+ le16_to_cpu(es->s_min_extra_isize);
+ }
+ }
+ /* Check if enough inode space is available */
+ if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
+ sbi->s_inode_size) {
+ sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
+ EXT4_GOOD_OLD_INODE_SIZE;
+ printk(KERN_INFO "EXT4-fs: required extra inode space not"
+ "available.\n");
+ }
+
/*
* akpm: core read_super() calls in here with the superblock locked.
* That deadlocks, because orphan cleanup needs to lock the superblock
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index e832e96095b..b10d68fffb5 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -66,13 +66,6 @@
#define BFIRST(bh) ENTRY(BHDR(bh)+1)
#define IS_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0)
-#define IHDR(inode, raw_inode) \
- ((struct ext4_xattr_ibody_header *) \
- ((void *)raw_inode + \
- EXT4_GOOD_OLD_INODE_SIZE + \
- EXT4_I(inode)->i_extra_isize))
-#define IFIRST(hdr) ((struct ext4_xattr_entry *)((hdr)+1))
-
#ifdef EXT4_XATTR_DEBUG
# define ea_idebug(inode, f...) do { \
printk(KERN_DEBUG "inode %s:%lu: ", \
@@ -508,6 +501,24 @@ out:
return;
}
+/*
+ * Find the available free space for EAs. This also returns the total number of
+ * bytes used by EA entries.
+ */
+static size_t ext4_xattr_free_space(struct ext4_xattr_entry *last,
+ size_t *min_offs, void *base, int *total)
+{
+ for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
+ *total += EXT4_XATTR_LEN(last->e_name_len);
+ if (!last->e_value_block && last->e_value_size) {
+ size_t offs = le16_to_cpu(last->e_value_offs);
+ if (offs < *min_offs)
+ *min_offs = offs;
+ }
+ }
+ return (*min_offs - ((void *)last - base) - sizeof(__u32));
+}
+
struct ext4_xattr_info {
int name_index;
const char *name;
@@ -1013,7 +1024,9 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
}
if (!error) {
ext4_xattr_update_super_block(handle, inode->i_sb);
- inode->i_ctime = CURRENT_TIME_SEC;
+ inode->i_ctime = ext4_current_time(inode);
+ if (!value)
+ EXT4_I(inode)->i_state &= ~EXT4_STATE_NO_EXPAND;
error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
/*
* The bh is consumed by ext4_mark_iloc_dirty, even with
@@ -1067,6 +1080,253 @@ retry:
}
/*
+ * Shift the EA entries in the inode to create space for the increased
+ * i_extra_isize.
+ */
+static void ext4_xattr_shift_entries(struct ext4_xattr_entry *entry,
+ int value_offs_shift, void *to,
+ void *from, size_t n, int blocksize)
+{
+ struct ext4_xattr_entry *last = entry;
+ int new_offs;
+
+ /* Adjust the value offsets of the entries */
+ for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
+ if (!last->e_value_block && last->e_value_size) {
+ new_offs = le16_to_cpu(last->e_value_offs) +
+ value_offs_shift;
+ BUG_ON(new_offs + le32_to_cpu(last->e_value_size)
+ > blocksize);
+ last->e_value_offs = cpu_to_le16(new_offs);
+ }
+ }
+ /* Shift the entries by n bytes */
+ memmove(to, from, n);
+}
+
+/*
+ * Expand an inode by new_extra_isize bytes when EAs are present.
+ * Returns 0 on success or negative error number on failure.
+ */
+int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
+ struct ext4_inode *raw_inode, handle_t *handle)
+{
+ struct ext4_xattr_ibody_header *header;
+ struct ext4_xattr_entry *entry, *last, *first;
+ struct buffer_head *bh = NULL;
+ struct ext4_xattr_ibody_find *is = NULL;
+ struct ext4_xattr_block_find *bs = NULL;
+ char *buffer = NULL, *b_entry_name = NULL;
+ size_t min_offs, free;
+ int total_ino, total_blk;
+ void *base, *start, *end;
+ int extra_isize = 0, error = 0, tried_min_extra_isize = 0;
+ int s_min_extra_isize = EXT4_SB(inode->i_sb)->s_es->s_min_extra_isize;
+
+ down_write(&EXT4_I(inode)->xattr_sem);
+retry:
+ if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) {
+ up_write(&EXT4_I(inode)->xattr_sem);
+ return 0;
+ }
+
+ header = IHDR(inode, raw_inode);
+ entry = IFIRST(header);
+
+ /*
+ * Check if enough free space is available in the inode to shift the
+ * entries ahead by new_extra_isize.
+ */
+
+ base = start = entry;
+ end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
+ min_offs = end - base;
+ last = entry;
+ total_ino = sizeof(struct ext4_xattr_ibody_header);
+
+ free = ext4_xattr_free_space(last, &min_offs, base, &total_ino);
+ if (free >= new_extra_isize) {
+ entry = IFIRST(header);
+ ext4_xattr_shift_entries(entry, EXT4_I(inode)->i_extra_isize
+ - new_extra_isize, (void *)raw_inode +
+ EXT4_GOOD_OLD_INODE_SIZE + new_extra_isize,
+ (void *)header, total_ino,
+ inode->i_sb->s_blocksize);
+ EXT4_I(inode)->i_extra_isize = new_extra_isize;
+ error = 0;
+ goto cleanup;
+ }
+
+ /*
+ * Enough free space isn't available in the inode, check if
+ * EA block can hold new_extra_isize bytes.
+ */
+ if (EXT4_I(inode)->i_file_acl) {
+ bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
+ error = -EIO;
+ if (!bh)
+ goto cleanup;
+ if (ext4_xattr_check_block(bh)) {
+ ext4_error(inode->i_sb, __FUNCTION__,
+ "inode %lu: bad block %llu", inode->i_ino,
+ EXT4_I(inode)->i_file_acl);
+ error = -EIO;
+ goto cleanup;
+ }
+ base = BHDR(bh);
+ first = BFIRST(bh);
+ end = bh->b_data + bh->b_size;
+ min_offs = end - base;
+ free = ext4_xattr_free_space(first, &min_offs, base,
+ &total_blk);
+ if (free < new_extra_isize) {
+ if (!tried_min_extra_isize && s_min_extra_isize) {
+ tried_min_extra_isize++;
+ new_extra_isize = s_min_extra_isize;
+ brelse(bh);
+ goto retry;
+ }
+ error = -1;
+ goto cleanup;
+ }
+ } else {
+ free = inode->i_sb->s_blocksize;
+ }
+
+ while (new_extra_isize > 0) {
+ size_t offs, size, entry_size;
+ struct ext4_xattr_entry *small_entry = NULL;
+ struct ext4_xattr_info i = {
+ .value = NULL,
+ .value_len = 0,
+ };
+ unsigned int total_size; /* EA entry size + value size */
+ unsigned int shift_bytes; /* No. of bytes to shift EAs by? */
+ unsigned int min_total_size = ~0U;
+
+ is = kzalloc(sizeof(struct ext4_xattr_ibody_find), GFP_NOFS);
+ bs = kzalloc(sizeof(struct ext4_xattr_block_find), GFP_NOFS);
+ if (!is || !bs) {
+ error = -ENOMEM;
+ goto cleanup;
+ }
+
+ is->s.not_found = -ENODATA;
+ bs->s.not_found = -ENODATA;
+ is->iloc.bh = NULL;
+ bs->bh = NULL;
+
+ last = IFIRST(header);
+ /* Find the entry best suited to be pushed into EA block */
+ entry = NULL;
+ for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
+ total_size =
+ EXT4_XATTR_SIZE(le32_to_cpu(last->e_value_size)) +
+ EXT4_XATTR_LEN(last->e_name_len);
+ if (total_size <= free && total_size < min_total_size) {
+ if (total_size < new_extra_isize) {
+ small_entry = last;
+ } else {
+ entry = last;
+ min_total_size = total_size;
+ }
+ }
+ }
+
+ if (entry == NULL) {
+ if (small_entry) {
+ entry = small_entry;
+ } else {
+ if (!tried_min_extra_isize &&
+ s_min_extra_isize) {
+ tried_min_extra_isize++;
+ new_extra_isize = s_min_extra_isize;
+ goto retry;
+ }
+ error = -1;
+ goto cleanup;
+ }
+ }
+ offs = le16_to_cpu(entry->e_value_offs);
+ size = le32_to_cpu(entry->e_value_size);
+ entry_size = EXT4_XATTR_LEN(entry->e_name_len);
+ i.name_index = entry->e_name_index,
+ buffer = kmalloc(EXT4_XATTR_SIZE(size), GFP_NOFS);
+ b_entry_name = kmalloc(entry->e_name_len + 1, GFP_NOFS);
+ if (!buffer || !b_entry_name) {
+ error = -ENOMEM;
+ goto cleanup;
+ }
+ /* Save the entry name and the entry value */
+ memcpy(buffer, (void *)IFIRST(header) + offs,
+ EXT4_XATTR_SIZE(size));
+ memcpy(b_entry_name, entry->e_name, entry->e_name_len);
+ b_entry_name[entry->e_name_len] = '\0';
+ i.name = b_entry_name;
+
+ error = ext4_get_inode_loc(inode, &is->iloc);
+ if (error)
+ goto cleanup;
+
+ error = ext4_xattr_ibody_find(inode, &i, is);
+ if (error)
+ goto cleanup;
+
+ /* Remove the chosen entry from the inode */
+ error = ext4_xattr_ibody_set(handle, inode, &i, is);
+
+ entry = IFIRST(header);
+ if (entry_size + EXT4_XATTR_SIZE(size) >= new_extra_isize)
+ shift_bytes = new_extra_isize;
+ else
+ shift_bytes = entry_size + size;
+ /* Adjust the offsets and shift the remaining entries ahead */
+ ext4_xattr_shift_entries(entry, EXT4_I(inode)->i_extra_isize -
+ shift_bytes, (void *)raw_inode +
+ EXT4_GOOD_OLD_INODE_SIZE + extra_isize + shift_bytes,
+ (void *)header, total_ino - entry_size,
+ inode->i_sb->s_blocksize);
+
+ extra_isize += shift_bytes;
+ new_extra_isize -= shift_bytes;
+ EXT4_I(inode)->i_extra_isize = extra_isize;
+
+ i.name = b_entry_name;
+ i.value = buffer;
+ i.value_len = cpu_to_le32(size);
+ error = ext4_xattr_block_find(inode, &i, bs);
+ if (error)
+ goto cleanup;
+
+ /* Add entry which was removed from the inode into the block */
+ error = ext4_xattr_block_set(handle, inode, &i, bs);
+ if (error)
+ goto cleanup;
+ kfree(b_entry_name);
+ kfree(buffer);
+ brelse(is->iloc.bh);
+ kfree(is);
+ kfree(bs);
+ }
+ brelse(bh);
+ up_write(&EXT4_I(inode)->xattr_sem);
+ return 0;
+
+cleanup:
+ kfree(b_entry_name);
+ kfree(buffer);
+ if (is)
+ brelse(is->iloc.bh);
+ kfree(is);
+ kfree(bs);
+ brelse(bh);
+ up_write(&EXT4_I(inode)->xattr_sem);
+ return error;
+}
+
+
+
+/*
* ext4_xattr_delete_inode()
*
* Free extended attribute resources associated with this inode. This
diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h
index 79432b35398..d7f5d6a1265 100644
--- a/fs/ext4/xattr.h
+++ b/fs/ext4/xattr.h
@@ -56,6 +56,13 @@ struct ext4_xattr_entry {
#define EXT4_XATTR_SIZE(size) \
(((size) + EXT4_XATTR_ROUND) & ~EXT4_XATTR_ROUND)
+#define IHDR(inode, raw_inode) \
+ ((struct ext4_xattr_ibody_header *) \
+ ((void *)raw_inode + \
+ EXT4_GOOD_OLD_INODE_SIZE + \
+ EXT4_I(inode)->i_extra_isize))
+#define IFIRST(hdr) ((struct ext4_xattr_entry *)((hdr)+1))
+
# ifdef CONFIG_EXT4DEV_FS_XATTR
extern struct xattr_handler ext4_xattr_user_handler;
@@ -74,6 +81,9 @@ extern int ext4_xattr_set_handle(handle_t *, struct inode *, int, const char *,
extern void ext4_xattr_delete_inode(handle_t *, struct inode *);
extern void ext4_xattr_put_super(struct super_block *);
+extern int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
+ struct ext4_inode *raw_inode, handle_t *handle);
+
extern int init_ext4_xattr(void);
extern void exit_ext4_xattr(void);
@@ -129,6 +139,13 @@ exit_ext4_xattr(void)
{
}
+static inline int
+ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
+ struct ext4_inode *raw_inode, handle_t *handle)
+{
+ return -EOPNOTSUPP;
+}
+
#define ext4_xattr_handlers NULL
# endif /* CONFIG_EXT4DEV_FS_XATTR */
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 78d63b818f0..f290cb7cb83 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -35,6 +35,7 @@
#include <linux/kthread.h>
#include <linux/poison.h>
#include <linux/proc_fs.h>
+#include <linux/debugfs.h>
#include <asm/uaccess.h>
#include <asm/page.h>
@@ -528,7 +529,7 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid)
{
int err = 0;
-#ifdef CONFIG_JBD_DEBUG
+#ifdef CONFIG_JBD2_DEBUG
spin_lock(&journal->j_state_lock);
if (!tid_geq(journal->j_commit_request, tid)) {
printk(KERN_EMERG
@@ -1709,7 +1710,7 @@ void jbd2_slab_free(void *ptr, size_t size)
* Journal_head storage management
*/
static struct kmem_cache *jbd2_journal_head_cache;
-#ifdef CONFIG_JBD_DEBUG
+#ifdef CONFIG_JBD2_DEBUG
static atomic_t nr_journal_heads = ATOMIC_INIT(0);
#endif
@@ -1747,7 +1748,7 @@ static struct journal_head *journal_alloc_journal_head(void)
struct journal_head *ret;
static unsigned long last_warning;
-#ifdef CONFIG_JBD_DEBUG
+#ifdef CONFIG_JBD2_DEBUG
atomic_inc(&nr_journal_heads);
#endif
ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS);
@@ -1768,7 +1769,7 @@ static struct journal_head *journal_alloc_journal_head(void)
static void journal_free_journal_head(struct journal_head *jh)
{
-#ifdef CONFIG_JBD_DEBUG
+#ifdef CONFIG_JBD2_DEBUG
atomic_dec(&nr_journal_heads);
memset(jh, JBD_POISON_FREE, sizeof(*jh));
#endif
@@ -1951,64 +1952,50 @@ void jbd2_journal_put_journal_head(struct journal_head *jh)
}
/*
- * /proc tunables
+ * debugfs tunables
*/
-#if defined(CONFIG_JBD_DEBUG)
-int jbd2_journal_enable_debug;
+#if defined(CONFIG_JBD2_DEBUG)
+u8 jbd2_journal_enable_debug;
EXPORT_SYMBOL(jbd2_journal_enable_debug);
#endif
-#if defined(CONFIG_JBD_DEBUG) && defined(CONFIG_PROC_FS)
+#if defined(CONFIG_JBD2_DEBUG) && defined(CONFIG_DEBUG_FS)
-static struct proc_dir_entry *proc_jbd_debug;
+#define JBD2_DEBUG_NAME "jbd2-debug"
-static int read_jbd_debug(char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- int ret;
+struct dentry *jbd2_debugfs_dir, *jbd2_debug;
- ret = sprintf(page + off, "%d\n", jbd2_journal_enable_debug);
- *eof = 1;
- return ret;
+static void __init jbd2_create_debugfs_entry(void)
+{
+ jbd2_debugfs_dir = debugfs_create_dir("jbd2", NULL);
+ if (jbd2_debugfs_dir)
+ jbd2_debug = debugfs_create_u8(JBD2_DEBUG_NAME, S_IRUGO,
+ jbd2_debugfs_dir,
+ &jbd2_journal_enable_debug);
}
-static int write_jbd_debug(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
+static void __exit jbd2_remove_debugfs_entry(void)
{
- char buf[32];
-
- if (count > ARRAY_SIZE(buf) - 1)
- count = ARRAY_SIZE(buf) - 1;
- if (copy_from_user(buf, buffer, count))
- return -EFAULT;
- buf[ARRAY_SIZE(buf) - 1] = '\0';
- jbd2_journal_enable_debug = simple_strtoul(buf, NULL, 10);
- return count;
+ if (jbd2_debug)
+ debugfs_remove(jbd2_debug);
+ if (jbd2_debugfs_dir)
+ debugfs_remove(jbd2_debugfs_dir);
}
-#define JBD_PROC_NAME "sys/fs/jbd2-debug"
+#else
-static void __init create_jbd_proc_entry(void)
+static void __init jbd2_create_debugfs_entry(void)
{
- proc_jbd_debug = create_proc_entry(JBD_PROC_NAME, 0644, NULL);
- if (proc_jbd_debug) {
- /* Why is this so hard? */
- proc_jbd_debug->read_proc = read_jbd_debug;
- proc_jbd_debug->write_proc = write_jbd_debug;
- }
+ do {
+ } while (0);
}
-static void __exit jbd2_remove_jbd_proc_entry(void)
+static void __exit jbd2_remove_debugfs_entry(void)
{
- if (proc_jbd_debug)
- remove_proc_entry(JBD_PROC_NAME, NULL);
+ do {
+ } while (0);
}
-#else
-
-#define create_jbd_proc_entry() do {} while (0)
-#define jbd2_remove_jbd_proc_entry() do {} while (0)
-
#endif
struct kmem_cache *jbd2_handle_cache;
@@ -2067,18 +2054,18 @@ static int __init journal_init(void)
ret = journal_init_caches();
if (ret != 0)
jbd2_journal_destroy_caches();
- create_jbd_proc_entry();
+ jbd2_create_debugfs_entry();
return ret;
}
static void __exit journal_exit(void)
{
-#ifdef CONFIG_JBD_DEBUG
+#ifdef CONFIG_JBD2_DEBUG
int n = atomic_read(&nr_journal_heads);
if (n)
printk(KERN_EMERG "JBD: leaked %d journal_heads!\n", n);
#endif
- jbd2_remove_jbd_proc_entry();
+ jbd2_remove_debugfs_entry();
jbd2_journal_destroy_caches();
}
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index 395c92a04ac..e7730a045b9 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -295,7 +295,7 @@ int jbd2_journal_skip_recovery(journal_t *journal)
printk(KERN_ERR "JBD: error %d scanning journal\n", err);
++journal->j_transaction_sequence;
} else {
-#ifdef CONFIG_JBD_DEBUG
+#ifdef CONFIG_JBD2_DEBUG
int dropped = info.end_transaction - be32_to_cpu(sb->s_sequence);
#endif
jbd_debug(0,
diff --git a/fs/ocfs2/heartbeat.c b/fs/ocfs2/heartbeat.c
index 352eb4a13f9..c4c36171240 100644
--- a/fs/ocfs2/heartbeat.c
+++ b/fs/ocfs2/heartbeat.c
@@ -209,7 +209,7 @@ void ocfs2_stop_heartbeat(struct ocfs2_super *osb)
envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
envp[2] = NULL;
- ret = call_usermodehelper(argv[0], argv, envp, 1);
+ ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
if (ret < 0)
mlog_errno(ret);
}
diff --git a/fs/open.c b/fs/open.c
index be6a457f422..a6b054edacb 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -26,6 +26,7 @@
#include <linux/syscalls.h>
#include <linux/rcupdate.h>
#include <linux/audit.h>
+#include <linux/falloc.h>
int vfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
@@ -352,6 +353,64 @@ asmlinkage long sys_ftruncate64(unsigned int fd, loff_t length)
}
#endif
+asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len)
+{
+ struct file *file;
+ struct inode *inode;
+ long ret = -EINVAL;
+
+ if (offset < 0 || len <= 0)
+ goto out;
+
+ /* Return error if mode is not supported */
+ ret = -EOPNOTSUPP;
+ if (mode && !(mode & FALLOC_FL_KEEP_SIZE))
+ goto out;
+
+ ret = -EBADF;
+ file = fget(fd);
+ if (!file)
+ goto out;
+ if (!(file->f_mode & FMODE_WRITE))
+ goto out_fput;
+ /*
+ * Revalidate the write permissions, in case security policy has
+ * changed since the files were opened.
+ */
+ ret = security_file_permission(file, MAY_WRITE);
+ if (ret)
+ goto out_fput;
+
+ inode = file->f_path.dentry->d_inode;
+
+ ret = -ESPIPE;
+ if (S_ISFIFO(inode->i_mode))
+ goto out_fput;
+
+ ret = -ENODEV;
+ /*
+ * Let individual file system decide if it supports preallocation
+ * for directories or not.
+ */
+ if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
+ goto out_fput;
+
+ ret = -EFBIG;
+ /* Check for wrap through zero too */
+ if (((offset + len) > inode->i_sb->s_maxbytes) || ((offset + len) < 0))
+ goto out_fput;
+
+ if (inode->i_op && inode->i_op->fallocate)
+ ret = inode->i_op->fallocate(inode, mode, offset, len);
+ else
+ ret = -ENOSYS;
+
+out_fput:
+ fput(file);
+out:
+ return ret;
+}
+
/*
* access() needs to use the real uid/gid, not the effective uid/gid.
* We do this by temporarily clearing all FS-related capabilities and
diff --git a/include/asm-i386/irq.h b/include/asm-i386/irq.h
index 9e15ce0006e..36f310632c4 100644
--- a/include/asm-i386/irq.h
+++ b/include/asm-i386/irq.h
@@ -41,6 +41,7 @@ extern int irqbalance_disable(char *str);
extern void fixup_irqs(cpumask_t map);
#endif
+unsigned int do_IRQ(struct pt_regs *regs);
void init_IRQ(void);
void __init native_init_IRQ(void);
diff --git a/include/asm-i386/mach-default/irq_vectors_limits.h b/include/asm-i386/mach-default/irq_vectors_limits.h
index 7f161e760be..a90c7a60109 100644
--- a/include/asm-i386/mach-default/irq_vectors_limits.h
+++ b/include/asm-i386/mach-default/irq_vectors_limits.h
@@ -1,7 +1,7 @@
#ifndef _ASM_IRQ_VECTORS_LIMITS_H
#define _ASM_IRQ_VECTORS_LIMITS_H
-#ifdef CONFIG_X86_IO_APIC
+#if defined(CONFIG_X86_IO_APIC) || defined(CONFIG_PARAVIRT)
#define NR_IRQS 224
# if (224 >= 32 * NR_CPUS)
# define NR_IRQ_VECTORS NR_IRQS
diff --git a/include/asm-i386/mmu_context.h b/include/asm-i386/mmu_context.h
index 8198d1cca1f..7eb0b0b1fb3 100644
--- a/include/asm-i386/mmu_context.h
+++ b/include/asm-i386/mmu_context.h
@@ -32,6 +32,8 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
#endif
}
+void leave_mm(unsigned long cpu);
+
static inline void switch_mm(struct mm_struct *prev,
struct mm_struct *next,
struct task_struct *tsk)
diff --git a/include/asm-i386/paravirt.h b/include/asm-i386/paravirt.h
index 7f846a7d6bc..7df88be2dd9 100644
--- a/include/asm-i386/paravirt.h
+++ b/include/asm-i386/paravirt.h
@@ -52,6 +52,8 @@ struct paravirt_ops
/* Basic arch-specific setup */
void (*arch_setup)(void);
char *(*memory_setup)(void);
+ void (*post_allocator_init)(void);
+
void (*init_IRQ)(void);
void (*time_init)(void);
@@ -116,7 +118,7 @@ struct paravirt_ops
u64 (*read_tsc)(void);
u64 (*read_pmc)(void);
- u64 (*get_scheduled_cycles)(void);
+ unsigned long long (*sched_clock)(void);
unsigned long (*get_cpu_khz)(void);
/* Segment descriptor handling */
@@ -173,7 +175,7 @@ struct paravirt_ops
unsigned long va);
/* Hooks for allocating/releasing pagetable pages */
- void (*alloc_pt)(u32 pfn);
+ void (*alloc_pt)(struct mm_struct *mm, u32 pfn);
void (*alloc_pd)(u32 pfn);
void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
void (*release_pt)(u32 pfn);
@@ -260,6 +262,7 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *site, unsigned len)
unsigned paravirt_patch_insns(void *site, unsigned len,
const char *start, const char *end);
+int paravirt_disable_iospace(void);
/*
* This generates an indirect call based on the operation type number.
@@ -563,7 +566,10 @@ static inline u64 paravirt_read_tsc(void)
#define rdtscll(val) (val = paravirt_read_tsc())
-#define get_scheduled_cycles(val) (val = paravirt_ops.get_scheduled_cycles())
+static inline unsigned long long paravirt_sched_clock(void)
+{
+ return PVOP_CALL0(unsigned long long, sched_clock);
+}
#define calculate_cpu_khz() (paravirt_ops.get_cpu_khz())
#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
@@ -669,6 +675,12 @@ static inline void setup_secondary_clock(void)
}
#endif
+static inline void paravirt_post_allocator_init(void)
+{
+ if (paravirt_ops.post_allocator_init)
+ (*paravirt_ops.post_allocator_init)();
+}
+
static inline void paravirt_pagetable_setup_start(pgd_t *base)
{
if (paravirt_ops.pagetable_setup_start)
@@ -725,9 +737,9 @@ static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
PVOP_VCALL3(flush_tlb_others, &cpumask, mm, va);
}
-static inline void paravirt_alloc_pt(unsigned pfn)
+static inline void paravirt_alloc_pt(struct mm_struct *mm, unsigned pfn)
{
- PVOP_VCALL1(alloc_pt, pfn);
+ PVOP_VCALL2(alloc_pt, mm, pfn);
}
static inline void paravirt_release_pt(unsigned pfn)
{
diff --git a/include/asm-i386/pgalloc.h b/include/asm-i386/pgalloc.h
index d07b7afc269..f2fc33ceb9f 100644
--- a/include/asm-i386/pgalloc.h
+++ b/include/asm-i386/pgalloc.h
@@ -7,7 +7,7 @@
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
-#define paravirt_alloc_pt(pfn) do { } while (0)
+#define paravirt_alloc_pt(mm, pfn) do { } while (0)
#define paravirt_alloc_pd(pfn) do { } while (0)
#define paravirt_alloc_pd(pfn) do { } while (0)
#define paravirt_alloc_pd_clone(pfn, clonepfn, start, count) do { } while (0)
@@ -17,13 +17,13 @@
#define pmd_populate_kernel(mm, pmd, pte) \
do { \
- paravirt_alloc_pt(__pa(pte) >> PAGE_SHIFT); \
+ paravirt_alloc_pt(mm, __pa(pte) >> PAGE_SHIFT); \
set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))); \
} while (0)
#define pmd_populate(mm, pmd, pte) \
do { \
- paravirt_alloc_pt(page_to_pfn(pte)); \
+ paravirt_alloc_pt(mm, page_to_pfn(pte)); \
set_pmd(pmd, __pmd(_PAGE_TABLE + \
((unsigned long long)page_to_pfn(pte) << \
(unsigned long long) PAGE_SHIFT))); \
diff --git a/include/asm-i386/setup.h b/include/asm-i386/setup.h
index 0d5bff9dc4a..7862fe858a9 100644
--- a/include/asm-i386/setup.h
+++ b/include/asm-i386/setup.h
@@ -81,6 +81,10 @@ void __init add_memory_region(unsigned long long start,
extern unsigned long init_pg_tables_end;
+#ifndef CONFIG_PARAVIRT
+#define paravirt_post_allocator_init() do {} while (0)
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h
index 0c713278706..1f73bde165b 100644
--- a/include/asm-i386/smp.h
+++ b/include/asm-i386/smp.h
@@ -43,9 +43,12 @@ extern u8 x86_cpu_to_apicid[];
#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu]
+extern void set_cpu_sibling_map(int cpu);
+
#ifdef CONFIG_HOTPLUG_CPU
extern void cpu_exit_clear(void);
extern void cpu_uninit(void);
+extern void remove_siblinginfo(int cpu);
#endif
struct smp_ops
@@ -129,6 +132,8 @@ extern int __cpu_disable(void);
extern void __cpu_die(unsigned int cpu);
extern unsigned int num_processors;
+void __cpuinit smp_store_cpu_info(int id);
+
#endif /* !__ASSEMBLY__ */
#else /* CONFIG_SMP */
diff --git a/include/asm-i386/timer.h b/include/asm-i386/timer.h
index 153770e25fa..51a713e33a9 100644
--- a/include/asm-i386/timer.h
+++ b/include/asm-i386/timer.h
@@ -15,8 +15,38 @@ extern int no_sync_cmos_clock;
extern int recalibrate_cpu_khz(void);
#ifndef CONFIG_PARAVIRT
-#define get_scheduled_cycles(val) rdtscll(val)
#define calculate_cpu_khz() native_calculate_cpu_khz()
#endif
+/* Accellerators for sched_clock()
+ * convert from cycles(64bits) => nanoseconds (64bits)
+ * basic equation:
+ * ns = cycles / (freq / ns_per_sec)
+ * ns = cycles * (ns_per_sec / freq)
+ * ns = cycles * (10^9 / (cpu_khz * 10^3))
+ * ns = cycles * (10^6 / cpu_khz)
+ *
+ * Then we use scaling math (suggested by george@mvista.com) to get:
+ * ns = cycles * (10^6 * SC / cpu_khz) / SC
+ * ns = cycles * cyc2ns_scale / SC
+ *
+ * And since SC is a constant power of two, we can convert the div
+ * into a shift.
+ *
+ * We can use khz divisor instead of mhz to keep a better percision, since
+ * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
+ * (mathieu.desnoyers@polymtl.ca)
+ *
+ * -johnstul@us.ibm.com "math is hard, lets go shopping!"
+ */
+extern unsigned long cyc2ns_scale __read_mostly;
+
+#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
+
+static inline unsigned long long cycles_2_ns(unsigned long long cyc)
+{
+ return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
+}
+
+
#endif
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h
index e84ace1ec8b..9b15545eb9b 100644
--- a/include/asm-i386/unistd.h
+++ b/include/asm-i386/unistd.h
@@ -329,10 +329,11 @@
#define __NR_signalfd 321
#define __NR_timerfd 322
#define __NR_eventfd 323
+#define __NR_fallocate 324
#ifdef __KERNEL__
-#define NR_syscalls 324
+#define NR_syscalls 325
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-i386/vmi_time.h b/include/asm-i386/vmi_time.h
index 213930b995c..47818813032 100644
--- a/include/asm-i386/vmi_time.h
+++ b/include/asm-i386/vmi_time.h
@@ -49,7 +49,7 @@ extern struct vmi_timer_ops {
extern void __init vmi_time_init(void);
extern unsigned long vmi_get_wallclock(void);
extern int vmi_set_wallclock(unsigned long now);
-extern unsigned long long vmi_get_sched_cycles(void);
+extern unsigned long long vmi_sched_clock(void);
extern unsigned long vmi_cpu_khz(void);
#ifdef CONFIG_X86_LOCAL_APIC
diff --git a/include/asm-i386/xen/hypercall.h b/include/asm-i386/xen/hypercall.h
new file mode 100644
index 00000000000..bc0ee7d961c
--- /dev/null
+++ b/include/asm-i386/xen/hypercall.h
@@ -0,0 +1,413 @@
+/******************************************************************************
+ * hypercall.h
+ *
+ * Linux-specific hypervisor handling.
+ *
+ * Copyright (c) 2002-2004, K A Fraser
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __HYPERCALL_H__
+#define __HYPERCALL_H__
+
+#include <linux/errno.h>
+#include <linux/string.h>
+
+#include <xen/interface/xen.h>
+#include <xen/interface/sched.h>
+#include <xen/interface/physdev.h>
+
+extern struct { char _entry[32]; } hypercall_page[];
+
+#define _hypercall0(type, name) \
+({ \
+ long __res; \
+ asm volatile ( \
+ "call %[call]" \
+ : "=a" (__res) \
+ : [call] "m" (hypercall_page[__HYPERVISOR_##name]) \
+ : "memory" ); \
+ (type)__res; \
+})
+
+#define _hypercall1(type, name, a1) \
+({ \
+ long __res, __ign1; \
+ asm volatile ( \
+ "call %[call]" \
+ : "=a" (__res), "=b" (__ign1) \
+ : "1" ((long)(a1)), \
+ [call] "m" (hypercall_page[__HYPERVISOR_##name]) \
+ : "memory" ); \
+ (type)__res; \
+})
+
+#define _hypercall2(type, name, a1, a2) \
+({ \
+ long __res, __ign1, __ign2; \
+ asm volatile ( \
+ "call %[call]" \
+ : "=a" (__res), "=b" (__ign1), "=c" (__ign2) \
+ : "1" ((long)(a1)), "2" ((long)(a2)), \
+ [call] "m" (hypercall_page[__HYPERVISOR_##name]) \
+ : "memory" ); \
+ (type)__res; \
+})
+
+#define _hypercall3(type, name, a1, a2, a3) \
+({ \
+ long __res, __ign1, __ign2, __ign3; \
+ asm volatile ( \
+ "call %[call]" \
+ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \
+ "=d" (__ign3) \
+ : "1" ((long)(a1)), "2" ((long)(a2)), \
+ "3" ((long)(a3)), \
+ [call] "m" (hypercall_page[__HYPERVISOR_##name]) \
+ : "memory" ); \
+ (type)__res; \
+})
+
+#define _hypercall4(type, name, a1, a2, a3, a4) \
+({ \
+ long __res, __ign1, __ign2, __ign3, __ign4; \
+ asm volatile ( \
+ "call %[call]" \
+ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \
+ "=d" (__ign3), "=S" (__ign4) \
+ : "1" ((long)(a1)), "2" ((long)(a2)), \
+ "3" ((long)(a3)), "4" ((long)(a4)), \
+ [call] "m" (hypercall_page[__HYPERVISOR_##name]) \
+ : "memory" ); \
+ (type)__res; \
+})
+
+#define _hypercall5(type, name, a1, a2, a3, a4, a5) \
+({ \
+ long __res, __ign1, __ign2, __ign3, __ign4, __ign5; \
+ asm volatile ( \
+ "call %[call]" \
+ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \
+ "=d" (__ign3), "=S" (__ign4), "=D" (__ign5) \
+ : "1" ((long)(a1)), "2" ((long)(a2)), \
+ "3" ((long)(a3)), "4" ((long)(a4)), \
+ "5" ((long)(a5)), \
+ [call] "m" (hypercall_page[__HYPERVISOR_##name]) \
+ : "memory" ); \
+ (type)__res; \
+})
+
+static inline int
+HYPERVISOR_set_trap_table(struct trap_info *table)
+{
+ return _hypercall1(int, set_trap_table, table);
+}
+
+static inline int
+HYPERVISOR_mmu_update(struct mmu_update *req, int count,
+ int *success_count, domid_t domid)
+{
+ return _hypercall4(int, mmu_update, req, count, success_count, domid);
+}
+
+static inline int
+HYPERVISOR_mmuext_op(struct mmuext_op *op, int count,
+ int *success_count, domid_t domid)
+{
+ return _hypercall4(int, mmuext_op, op, count, success_count, domid);
+}
+
+static inline int
+HYPERVISOR_set_gdt(unsigned long *frame_list, int entries)
+{
+ return _hypercall2(int, set_gdt, frame_list, entries);
+}
+
+static inline int
+HYPERVISOR_stack_switch(unsigned long ss, unsigned long esp)
+{
+ return _hypercall2(int, stack_switch, ss, esp);
+}
+
+static inline int
+HYPERVISOR_set_callbacks(unsigned long event_selector,
+ unsigned long event_address,
+ unsigned long failsafe_selector,
+ unsigned long failsafe_address)
+{
+ return _hypercall4(int, set_callbacks,
+ event_selector, event_address,
+ failsafe_selector, failsafe_address);
+}
+
+static inline int
+HYPERVISOR_fpu_taskswitch(int set)
+{
+ return _hypercall1(int, fpu_taskswitch, set);
+}
+
+static inline int
+HYPERVISOR_sched_op(int cmd, unsigned long arg)
+{
+ return _hypercall2(int, sched_op, cmd, arg);
+}
+
+static inline long
+HYPERVISOR_set_timer_op(u64 timeout)
+{
+ unsigned long timeout_hi = (unsigned long)(timeout>>32);
+ unsigned long timeout_lo = (unsigned long)timeout;
+ return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
+}
+
+static inline int
+HYPERVISOR_set_debugreg(int reg, unsigned long value)
+{
+ return _hypercall2(int, set_debugreg, reg, value);
+}
+
+static inline unsigned long
+HYPERVISOR_get_debugreg(int reg)
+{
+ return _hypercall1(unsigned long, get_debugreg, reg);
+}
+
+static inline int
+HYPERVISOR_update_descriptor(u64 ma, u64 desc)
+{
+ return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
+}
+
+static inline int
+HYPERVISOR_memory_op(unsigned int cmd, void *arg)
+{
+ return _hypercall2(int, memory_op, cmd, arg);
+}
+
+static inline int
+HYPERVISOR_multicall(void *call_list, int nr_calls)
+{
+ return _hypercall2(int, multicall, call_list, nr_calls);
+}
+
+static inline int
+HYPERVISOR_update_va_mapping(unsigned long va, pte_t new_val,
+ unsigned long flags)
+{
+ unsigned long pte_hi = 0;
+#ifdef CONFIG_X86_PAE
+ pte_hi = new_val.pte_high;
+#endif
+ return _hypercall4(int, update_va_mapping, va,
+ new_val.pte_low, pte_hi, flags);
+}
+
+static inline int
+HYPERVISOR_event_channel_op(int cmd, void *arg)
+{
+ int rc = _hypercall2(int, event_channel_op, cmd, arg);
+ if (unlikely(rc == -ENOSYS)) {
+ struct evtchn_op op;
+ op.cmd = cmd;
+ memcpy(&op.u, arg, sizeof(op.u));
+ rc = _hypercall1(int, event_channel_op_compat, &op);
+ memcpy(arg, &op.u, sizeof(op.u));
+ }
+ return rc;
+}
+
+static inline int
+HYPERVISOR_xen_version(int cmd, void *arg)
+{
+ return _hypercall2(int, xen_version, cmd, arg);
+}
+
+static inline int
+HYPERVISOR_console_io(int cmd, int count, char *str)
+{
+ return _hypercall3(int, console_io, cmd, count, str);
+}
+
+static inline int
+HYPERVISOR_physdev_op(int cmd, void *arg)
+{
+ int rc = _hypercall2(int, physdev_op, cmd, arg);
+ if (unlikely(rc == -ENOSYS)) {
+ struct physdev_op op;
+ op.cmd = cmd;
+ memcpy(&op.u, arg, sizeof(op.u));
+ rc = _hypercall1(int, physdev_op_compat, &op);
+ memcpy(arg, &op.u, sizeof(op.u));
+ }
+ return rc;
+}
+
+static inline int
+HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count)
+{
+ return _hypercall3(int, grant_table_op, cmd, uop, count);
+}
+
+static inline int
+HYPERVISOR_update_va_mapping_otherdomain(unsigned long va, pte_t new_val,
+ unsigned long flags, domid_t domid)
+{
+ unsigned long pte_hi = 0;
+#ifdef CONFIG_X86_PAE
+ pte_hi = new_val.pte_high;
+#endif
+ return _hypercall5(int, update_va_mapping_otherdomain, va,
+ new_val.pte_low, pte_hi, flags, domid);
+}
+
+static inline int
+HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type)
+{
+ return _hypercall2(int, vm_assist, cmd, type);
+}
+
+static inline int
+HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args)
+{
+ return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
+}
+
+static inline int
+HYPERVISOR_suspend(unsigned long srec)
+{
+ return _hypercall3(int, sched_op, SCHEDOP_shutdown,
+ SHUTDOWN_suspend, srec);
+}
+
+static inline int
+HYPERVISOR_nmi_op(unsigned long op, unsigned long arg)
+{
+ return _hypercall2(int, nmi_op, op, arg);
+}
+
+static inline void
+MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
+ pte_t new_val, unsigned long flags)
+{
+ mcl->op = __HYPERVISOR_update_va_mapping;
+ mcl->args[0] = va;
+#ifdef CONFIG_X86_PAE
+ mcl->args[1] = new_val.pte_low;
+ mcl->args[2] = new_val.pte_high;
+#else
+ mcl->args[1] = new_val.pte_low;
+ mcl->args[2] = 0;
+#endif
+ mcl->args[3] = flags;
+}
+
+static inline void
+MULTI_grant_table_op(struct multicall_entry *mcl, unsigned int cmd,
+ void *uop, unsigned int count)
+{
+ mcl->op = __HYPERVISOR_grant_table_op;
+ mcl->args[0] = cmd;
+ mcl->args[1] = (unsigned long)uop;
+ mcl->args[2] = count;
+}
+
+static inline void
+MULTI_update_va_mapping_otherdomain(struct multicall_entry *mcl, unsigned long va,
+ pte_t new_val, unsigned long flags,
+ domid_t domid)
+{
+ mcl->op = __HYPERVISOR_update_va_mapping_otherdomain;
+ mcl->args[0] = va;
+#ifdef CONFIG_X86_PAE
+ mcl->args[1] = new_val.pte_low;
+ mcl->args[2] = new_val.pte_high;
+#else
+ mcl->args[1] = new_val.pte_low;
+ mcl->args[2] = 0;
+#endif
+ mcl->args[3] = flags;
+ mcl->args[4] = domid;
+}
+
+static inline void
+MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr,
+ struct desc_struct desc)
+{
+ mcl->op = __HYPERVISOR_update_descriptor;
+ mcl->args[0] = maddr;
+ mcl->args[1] = maddr >> 32;
+ mcl->args[2] = desc.a;
+ mcl->args[3] = desc.b;
+}
+
+static inline void
+MULTI_memory_op(struct multicall_entry *mcl, unsigned int cmd, void *arg)
+{
+ mcl->op = __HYPERVISOR_memory_op;
+ mcl->args[0] = cmd;
+ mcl->args[1] = (unsigned long)arg;
+}
+
+static inline void
+MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
+ int count, int *success_count, domid_t domid)
+{
+ mcl->op = __HYPERVISOR_mmu_update;
+ mcl->args[0] = (unsigned long)req;
+ mcl->args[1] = count;
+ mcl->args[2] = (unsigned long)success_count;
+ mcl->args[3] = domid;
+}
+
+static inline void
+MULTI_mmuext_op(struct multicall_entry *mcl, struct mmuext_op *op, int count,
+ int *success_count, domid_t domid)
+{
+ mcl->op = __HYPERVISOR_mmuext_op;
+ mcl->args[0] = (unsigned long)op;
+ mcl->args[1] = count;
+ mcl->args[2] = (unsigned long)success_count;
+ mcl->args[3] = domid;
+}
+
+static inline void
+MULTI_set_gdt(struct multicall_entry *mcl, unsigned long *frames, int entries)
+{
+ mcl->op = __HYPERVISOR_set_gdt;
+ mcl->args[0] = (unsigned long)frames;
+ mcl->args[1] = entries;
+}
+
+static inline void
+MULTI_stack_switch(struct multicall_entry *mcl,
+ unsigned long ss, unsigned long esp)
+{
+ mcl->op = __HYPERVISOR_stack_switch;
+ mcl->args[0] = ss;
+ mcl->args[1] = esp;
+}
+
+#endif /* __HYPERCALL_H__ */
diff --git a/include/asm-i386/xen/hypervisor.h b/include/asm-i386/xen/hypervisor.h
new file mode 100644
index 00000000000..8e15dd28c91
--- /dev/null
+++ b/include/asm-i386/xen/hypervisor.h
@@ -0,0 +1,73 @@
+/******************************************************************************
+ * hypervisor.h
+ *
+ * Linux-specific hypervisor handling.
+ *
+ * Copyright (c) 2002-2004, K A Fraser
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __HYPERVISOR_H__
+#define __HYPERVISOR_H__
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/version.h>
+
+#include <xen/interface/xen.h>
+#include <xen/interface/version.h>
+
+#include <asm/ptrace.h>
+#include <asm/page.h>
+#include <asm/desc.h>
+#if defined(__i386__)
+# ifdef CONFIG_X86_PAE
+# include <asm-generic/pgtable-nopud.h>
+# else
+# include <asm-generic/pgtable-nopmd.h>
+# endif
+#endif
+#include <asm/xen/hypercall.h>
+
+/* arch/i386/kernel/setup.c */
+extern struct shared_info *HYPERVISOR_shared_info;
+extern struct start_info *xen_start_info;
+#define is_initial_xendomain() (xen_start_info->flags & SIF_INITDOMAIN)
+
+/* arch/i386/mach-xen/evtchn.c */
+/* Force a proper event-channel callback from Xen. */
+extern void force_evtchn_callback(void);
+
+/* Turn jiffies into Xen system time. */
+u64 jiffies_to_st(unsigned long jiffies);
+
+
+#define MULTI_UVMFLAGS_INDEX 3
+#define MULTI_UVMDOMID_INDEX 4
+
+#define is_running_on_xen() (xen_start_info ? 1 : 0)
+
+#endif /* __HYPERVISOR_H__ */
diff --git a/include/asm-i386/xen/interface.h b/include/asm-i386/xen/interface.h
new file mode 100644
index 00000000000..165c3968e13
--- /dev/null
+++ b/include/asm-i386/xen/interface.h
@@ -0,0 +1,188 @@
+/******************************************************************************
+ * arch-x86_32.h
+ *
+ * Guest OS interface to x86 32-bit Xen.
+ *
+ * Copyright (c) 2004, K A Fraser
+ */
+
+#ifndef __XEN_PUBLIC_ARCH_X86_32_H__
+#define __XEN_PUBLIC_ARCH_X86_32_H__
+
+#ifdef __XEN__
+#define __DEFINE_GUEST_HANDLE(name, type) \
+ typedef struct { type *p; } __guest_handle_ ## name
+#else
+#define __DEFINE_GUEST_HANDLE(name, type) \
+ typedef type * __guest_handle_ ## name
+#endif
+
+#define DEFINE_GUEST_HANDLE_STRUCT(name) \
+ __DEFINE_GUEST_HANDLE(name, struct name)
+#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
+#define GUEST_HANDLE(name) __guest_handle_ ## name
+
+#ifndef __ASSEMBLY__
+/* Guest handles for primitive C types. */
+__DEFINE_GUEST_HANDLE(uchar, unsigned char);
+__DEFINE_GUEST_HANDLE(uint, unsigned int);
+__DEFINE_GUEST_HANDLE(ulong, unsigned long);
+DEFINE_GUEST_HANDLE(char);
+DEFINE_GUEST_HANDLE(int);
+DEFINE_GUEST_HANDLE(long);
+DEFINE_GUEST_HANDLE(void);
+#endif
+
+/*
+ * SEGMENT DESCRIPTOR TABLES
+ */
+/*
+ * A number of GDT entries are reserved by Xen. These are not situated at the
+ * start of the GDT because some stupid OSes export hard-coded selector values
+ * in their ABI. These hard-coded values are always near the start of the GDT,
+ * so Xen places itself out of the way, at the far end of the GDT.
+ */
+#define FIRST_RESERVED_GDT_PAGE 14
+#define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096)
+#define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
+
+/*
+ * These flat segments are in the Xen-private section of every GDT. Since these
+ * are also present in the initial GDT, many OSes will be able to avoid
+ * installing their own GDT.
+ */
+#define FLAT_RING1_CS 0xe019 /* GDT index 259 */
+#define FLAT_RING1_DS 0xe021 /* GDT index 260 */
+#define FLAT_RING1_SS 0xe021 /* GDT index 260 */
+#define FLAT_RING3_CS 0xe02b /* GDT index 261 */
+#define FLAT_RING3_DS 0xe033 /* GDT index 262 */
+#define FLAT_RING3_SS 0xe033 /* GDT index 262 */
+
+#define FLAT_KERNEL_CS FLAT_RING1_CS
+#define FLAT_KERNEL_DS FLAT_RING1_DS
+#define FLAT_KERNEL_SS FLAT_RING1_SS
+#define FLAT_USER_CS FLAT_RING3_CS
+#define FLAT_USER_DS FLAT_RING3_DS
+#define FLAT_USER_SS FLAT_RING3_SS
+
+/* And the trap vector is... */
+#define TRAP_INSTR "int $0x82"
+
+/*
+ * Virtual addresses beyond this are not modifiable by guest OSes. The
+ * machine->physical mapping table starts at this address, read-only.
+ */
+#ifdef CONFIG_X86_PAE
+#define __HYPERVISOR_VIRT_START 0xF5800000
+#else
+#define __HYPERVISOR_VIRT_START 0xFC000000
+#endif
+
+#ifndef HYPERVISOR_VIRT_START
+#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
+#endif
+
+#ifndef machine_to_phys_mapping
+#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
+#endif
+
+/* Maximum number of virtual CPUs in multi-processor guests. */
+#define MAX_VIRT_CPUS 32
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Send an array of these to HYPERVISOR_set_trap_table()
+ */
+#define TI_GET_DPL(_ti) ((_ti)->flags & 3)
+#define TI_GET_IF(_ti) ((_ti)->flags & 4)
+#define TI_SET_DPL(_ti, _dpl) ((_ti)->flags |= (_dpl))
+#define TI_SET_IF(_ti, _if) ((_ti)->flags |= ((!!(_if))<<2))
+
+struct trap_info {
+ uint8_t vector; /* exception vector */
+ uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */
+ uint16_t cs; /* code selector */
+ unsigned long address; /* code offset */
+};
+DEFINE_GUEST_HANDLE_STRUCT(trap_info);
+
+struct cpu_user_regs {
+ uint32_t ebx;
+ uint32_t ecx;
+ uint32_t edx;
+ uint32_t esi;
+ uint32_t edi;
+ uint32_t ebp;
+ uint32_t eax;
+ uint16_t error_code; /* private */
+ uint16_t entry_vector; /* private */
+ uint32_t eip;
+ uint16_t cs;
+ uint8_t saved_upcall_mask;
+ uint8_t _pad0;
+ uint32_t eflags; /* eflags.IF == !saved_upcall_mask */
+ uint32_t esp;
+ uint16_t ss, _pad1;
+ uint16_t es, _pad2;
+ uint16_t ds, _pad3;
+ uint16_t fs, _pad4;
+ uint16_t gs, _pad5;
+};
+DEFINE_GUEST_HANDLE_STRUCT(cpu_user_regs);
+
+typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */
+
+/*
+ * The following is all CPU context. Note that the fpu_ctxt block is filled
+ * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
+ */
+struct vcpu_guest_context {
+ /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */
+ struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */
+#define VGCF_I387_VALID (1<<0)
+#define VGCF_HVM_GUEST (1<<1)
+#define VGCF_IN_KERNEL (1<<2)
+ unsigned long flags; /* VGCF_* flags */
+ struct cpu_user_regs user_regs; /* User-level CPU registers */
+ struct trap_info trap_ctxt[256]; /* Virtual IDT */
+ unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */
+ unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
+ unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */
+ unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */
+ unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */
+ unsigned long event_callback_cs; /* CS:EIP of event callback */
+ unsigned long event_callback_eip;
+ unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */
+ unsigned long failsafe_callback_eip;
+ unsigned long vm_assist; /* VMASST_TYPE_* bitmap */
+};
+DEFINE_GUEST_HANDLE_STRUCT(vcpu_guest_context);
+
+struct arch_shared_info {
+ unsigned long max_pfn; /* max pfn that appears in table */
+ /* Frame containing list of mfns containing list of mfns containing p2m. */
+ unsigned long pfn_to_mfn_frame_list_list;
+ unsigned long nmi_reason;
+};
+
+struct arch_vcpu_info {
+ unsigned long cr2;
+ unsigned long pad[5]; /* sizeof(struct vcpu_info) == 64 */
+};
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * Prefix forces emulation of some non-trapping instructions.
+ * Currently only CPUID.
+ */
+#ifdef __ASSEMBLY__
+#define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ;
+#define XEN_CPUID XEN_EMULATE_PREFIX cpuid
+#else
+#define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; "
+#define XEN_CPUID XEN_EMULATE_PREFIX "cpuid"
+#endif
+
+#endif
diff --git a/include/asm-powerpc/systbl.h b/include/asm-powerpc/systbl.h
index 1cc3f9cb6f4..cc6d8722825 100644
--- a/include/asm-powerpc/systbl.h
+++ b/include/asm-powerpc/systbl.h
@@ -308,6 +308,7 @@ COMPAT_SYS_SPU(move_pages)
SYSCALL_SPU(getcpu)
COMPAT_SYS(epoll_pwait)
COMPAT_SYS_SPU(utimensat)
+COMPAT_SYS(fallocate)
COMPAT_SYS_SPU(signalfd)
COMPAT_SYS_SPU(timerfd)
SYSCALL_SPU(eventfd)
diff --git a/include/asm-powerpc/unistd.h b/include/asm-powerpc/unistd.h
index f71c6061f1e..97d82b6a940 100644
--- a/include/asm-powerpc/unistd.h
+++ b/include/asm-powerpc/unistd.h
@@ -331,10 +331,11 @@
#define __NR_timerfd 306
#define __NR_eventfd 307
#define __NR_sync_file_range2 308
+#define __NR_fallocate 309
#ifdef __KERNEL__
-#define __NR_syscalls 309
+#define __NR_syscalls 310
#define __NR__exit __NR_exit
#define NR_syscalls __NR_syscalls
diff --git a/include/asm-sparc64/io.h b/include/asm-sparc64/io.h
index ad595b67984..9565a892801 100644
--- a/include/asm-sparc64/io.h
+++ b/include/asm-sparc64/io.h
@@ -14,11 +14,6 @@
#define __SLOW_DOWN_IO do { } while (0)
#define SLOW_DOWN_IO do { } while (0)
-extern unsigned long virt_to_bus_not_defined_use_pci_map(volatile void *addr);
-#define virt_to_bus virt_to_bus_not_defined_use_pci_map
-extern unsigned long bus_to_virt_not_defined_use_pci_map(volatile void *addr);
-#define bus_to_virt bus_to_virt_not_defined_use_pci_map
-
/* BIO layer definitions. */
extern unsigned long kern_base, kern_size;
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
diff --git a/include/asm-sparc64/mdesc.h b/include/asm-sparc64/mdesc.h
index e97c4313375..1acc7272e53 100644
--- a/include/asm-sparc64/mdesc.h
+++ b/include/asm-sparc64/mdesc.h
@@ -61,6 +61,16 @@ extern u64 mdesc_arc_target(struct mdesc_handle *hp, u64 arc);
extern void mdesc_update(void);
+struct mdesc_notifier_client {
+ void (*add)(struct mdesc_handle *handle, u64 node);
+ void (*remove)(struct mdesc_handle *handle, u64 node);
+
+ const char *node_name;
+ struct mdesc_notifier_client *next;
+};
+
+extern void mdesc_register_notifier(struct mdesc_notifier_client *client);
+
extern void mdesc_fill_in_cpu_data(cpumask_t mask);
extern void sun4v_mdesc_init(void);
diff --git a/include/asm-sparc64/vio.h b/include/asm-sparc64/vio.h
index 83c96422e9d..c0a8d4ed5bc 100644
--- a/include/asm-sparc64/vio.h
+++ b/include/asm-sparc64/vio.h
@@ -264,7 +264,7 @@ static inline u32 vio_dring_avail(struct vio_dring_state *dr,
((dr->prod - dr->cons) & (ring_size - 1)));
}
-#define VIO_MAX_TYPE_LEN 64
+#define VIO_MAX_TYPE_LEN 32
#define VIO_MAX_COMPAT_LEN 64
struct vio_dev {
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h
index 8696f8ad401..fc4e73f5f1f 100644
--- a/include/asm-x86_64/unistd.h
+++ b/include/asm-x86_64/unistd.h
@@ -630,6 +630,8 @@ __SYSCALL(__NR_signalfd, sys_signalfd)
__SYSCALL(__NR_timerfd, sys_timerfd)
#define __NR_eventfd 284
__SYSCALL(__NR_eventfd, sys_eventfd)
+#define __NR_fallocate 285
+__SYSCALL(__NR_fallocate, sys_fallocate)
#ifndef __NO_STUBS
#define __ARCH_WANT_OLD_READDIR
diff --git a/include/linux/elfnote.h b/include/linux/elfnote.h
index 9a1e0674e56..e831759b2fb 100644
--- a/include/linux/elfnote.h
+++ b/include/linux/elfnote.h
@@ -38,17 +38,25 @@
* e.g. ELFNOTE(XYZCo, 42, .asciz, "forty-two")
* ELFNOTE(XYZCo, 12, .long, 0xdeadbeef)
*/
-#define ELFNOTE(name, type, desctype, descdata) \
-.pushsection .note.name, "",@note ; \
- .align 4 ; \
+#define ELFNOTE_START(name, type, flags) \
+.pushsection .note.name, flags,@note ; \
+ .balign 4 ; \
.long 2f - 1f /* namesz */ ; \
- .long 4f - 3f /* descsz */ ; \
+ .long 4484f - 3f /* descsz */ ; \
.long type ; \
1:.asciz #name ; \
-2:.align 4 ; \
-3:desctype descdata ; \
-4:.align 4 ; \
+2:.balign 4 ; \
+3:
+
+#define ELFNOTE_END \
+4484:.balign 4 ; \
.popsection ;
+
+#define ELFNOTE(name, type, desc) \
+ ELFNOTE_START(name, type, "") \
+ desc ; \
+ ELFNOTE_END
+
#else /* !__ASSEMBLER__ */
#include <linux/elf.h>
/*
diff --git a/include/linux/ext4_fs.h b/include/linux/ext4_fs.h
index de1f9f78625..cdee7aaa57a 100644
--- a/include/linux/ext4_fs.h
+++ b/include/linux/ext4_fs.h
@@ -71,7 +71,7 @@
/*
* Maximal count of links to a file
*/
-#define EXT4_LINK_MAX 32000
+#define EXT4_LINK_MAX 65000
/*
* Macro-instructions used to manage several block sizes
@@ -102,6 +102,7 @@
EXT4_GOOD_OLD_FIRST_INO : \
(s)->s_first_ino)
#endif
+#define EXT4_BLOCK_ALIGN(size, blkbits) ALIGN((size), (1 << (blkbits)))
/*
* Macro-instructions used to manage fragments
@@ -201,6 +202,7 @@ struct ext4_group_desc
#define EXT4_STATE_JDATA 0x00000001 /* journaled data exists */
#define EXT4_STATE_NEW 0x00000002 /* inode is newly created */
#define EXT4_STATE_XATTR 0x00000004 /* has in-inode xattrs */
+#define EXT4_STATE_NO_EXPAND 0x00000008 /* No space for expansion */
/* Used to pass group descriptor data when online resize is done */
struct ext4_new_group_input {
@@ -225,6 +227,11 @@ struct ext4_new_group_data {
__u32 free_blocks_count;
};
+/*
+ * Following is used by preallocation code to tell get_blocks() that we
+ * want uninitialzed extents.
+ */
+#define EXT4_CREATE_UNINITIALIZED_EXT 2
/*
* ioctl commands
@@ -237,7 +244,7 @@ struct ext4_new_group_data {
#define EXT4_IOC_GROUP_ADD _IOW('f', 8,struct ext4_new_group_input)
#define EXT4_IOC_GETVERSION_OLD FS_IOC_GETVERSION
#define EXT4_IOC_SETVERSION_OLD FS_IOC_SETVERSION
-#ifdef CONFIG_JBD_DEBUG
+#ifdef CONFIG_JBD2_DEBUG
#define EXT4_IOC_WAIT_FOR_READONLY _IOR('f', 99, long)
#endif
#define EXT4_IOC_GETRSVSZ _IOR('f', 5, long)
@@ -253,7 +260,7 @@ struct ext4_new_group_data {
#define EXT4_IOC32_GETRSVSZ _IOR('f', 5, int)
#define EXT4_IOC32_SETRSVSZ _IOW('f', 6, int)
#define EXT4_IOC32_GROUP_EXTEND _IOW('f', 7, unsigned int)
-#ifdef CONFIG_JBD_DEBUG
+#ifdef CONFIG_JBD2_DEBUG
#define EXT4_IOC32_WAIT_FOR_READONLY _IOR('f', 99, int)
#endif
#define EXT4_IOC32_GETVERSION_OLD FS_IOC32_GETVERSION
@@ -282,7 +289,7 @@ struct ext4_inode {
__le16 i_uid; /* Low 16 bits of Owner Uid */
__le32 i_size; /* Size in bytes */
__le32 i_atime; /* Access time */
- __le32 i_ctime; /* Creation time */
+ __le32 i_ctime; /* Inode Change time */
__le32 i_mtime; /* Modification time */
__le32 i_dtime; /* Deletion Time */
__le16 i_gid; /* Low 16 bits of Group Id */
@@ -331,10 +338,85 @@ struct ext4_inode {
} osd2; /* OS dependent 2 */
__le16 i_extra_isize;
__le16 i_pad1;
+ __le32 i_ctime_extra; /* extra Change time (nsec << 2 | epoch) */
+ __le32 i_mtime_extra; /* extra Modification time(nsec << 2 | epoch) */
+ __le32 i_atime_extra; /* extra Access time (nsec << 2 | epoch) */
+ __le32 i_crtime; /* File Creation time */
+ __le32 i_crtime_extra; /* extra FileCreationtime (nsec << 2 | epoch) */
};
#define i_size_high i_dir_acl
+#define EXT4_EPOCH_BITS 2
+#define EXT4_EPOCH_MASK ((1 << EXT4_EPOCH_BITS) - 1)
+#define EXT4_NSEC_MASK (~0UL << EXT4_EPOCH_BITS)
+
+/*
+ * Extended fields will fit into an inode if the filesystem was formatted
+ * with large inodes (-I 256 or larger) and there are not currently any EAs
+ * consuming all of the available space. For new inodes we always reserve
+ * enough space for the kernel's known extended fields, but for inodes
+ * created with an old kernel this might not have been the case. None of
+ * the extended inode fields is critical for correct filesystem operation.
+ * This macro checks if a certain field fits in the inode. Note that
+ * inode-size = GOOD_OLD_INODE_SIZE + i_extra_isize
+ */
+#define EXT4_FITS_IN_INODE(ext4_inode, einode, field) \
+ ((offsetof(typeof(*ext4_inode), field) + \
+ sizeof((ext4_inode)->field)) \
+ <= (EXT4_GOOD_OLD_INODE_SIZE + \
+ (einode)->i_extra_isize)) \
+
+static inline __le32 ext4_encode_extra_time(struct timespec *time)
+{
+ return cpu_to_le32((sizeof(time->tv_sec) > 4 ?
+ time->tv_sec >> 32 : 0) |
+ ((time->tv_nsec << 2) & EXT4_NSEC_MASK));
+}
+
+static inline void ext4_decode_extra_time(struct timespec *time, __le32 extra)
+{
+ if (sizeof(time->tv_sec) > 4)
+ time->tv_sec |= (__u64)(le32_to_cpu(extra) & EXT4_EPOCH_MASK)
+ << 32;
+ time->tv_nsec = (le32_to_cpu(extra) & EXT4_NSEC_MASK) >> 2;
+}
+
+#define EXT4_INODE_SET_XTIME(xtime, inode, raw_inode) \
+do { \
+ (raw_inode)->xtime = cpu_to_le32((inode)->xtime.tv_sec); \
+ if (EXT4_FITS_IN_INODE(raw_inode, EXT4_I(inode), xtime ## _extra)) \
+ (raw_inode)->xtime ## _extra = \
+ ext4_encode_extra_time(&(inode)->xtime); \
+} while (0)
+
+#define EXT4_EINODE_SET_XTIME(xtime, einode, raw_inode) \
+do { \
+ if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime)) \
+ (raw_inode)->xtime = cpu_to_le32((einode)->xtime.tv_sec); \
+ if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime ## _extra)) \
+ (raw_inode)->xtime ## _extra = \
+ ext4_encode_extra_time(&(einode)->xtime); \
+} while (0)
+
+#define EXT4_INODE_GET_XTIME(xtime, inode, raw_inode) \
+do { \
+ (inode)->xtime.tv_sec = (signed)le32_to_cpu((raw_inode)->xtime); \
+ if (EXT4_FITS_IN_INODE(raw_inode, EXT4_I(inode), xtime ## _extra)) \
+ ext4_decode_extra_time(&(inode)->xtime, \
+ raw_inode->xtime ## _extra); \
+} while (0)
+
+#define EXT4_EINODE_GET_XTIME(xtime, einode, raw_inode) \
+do { \
+ if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime)) \
+ (einode)->xtime.tv_sec = \
+ (signed)le32_to_cpu((raw_inode)->xtime); \
+ if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime ## _extra)) \
+ ext4_decode_extra_time(&(einode)->xtime, \
+ raw_inode->xtime ## _extra); \
+} while (0)
+
#if defined(__KERNEL__) || defined(__linux__)
#define i_reserved1 osd1.linux1.l_i_reserved1
#define i_frag osd2.linux2.l_i_frag
@@ -533,6 +615,13 @@ static inline struct ext4_inode_info *EXT4_I(struct inode *inode)
return container_of(inode, struct ext4_inode_info, vfs_inode);
}
+static inline struct timespec ext4_current_time(struct inode *inode)
+{
+ return (inode->i_sb->s_time_gran < NSEC_PER_SEC) ?
+ current_fs_time(inode->i_sb) : CURRENT_TIME_SEC;
+}
+
+
static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
{
return ino == EXT4_ROOT_INO ||
@@ -603,6 +692,8 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
#define EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER 0x0001
#define EXT4_FEATURE_RO_COMPAT_LARGE_FILE 0x0002
#define EXT4_FEATURE_RO_COMPAT_BTREE_DIR 0x0004
+#define EXT4_FEATURE_RO_COMPAT_DIR_NLINK 0x0020
+#define EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE 0x0040
#define EXT4_FEATURE_INCOMPAT_COMPRESSION 0x0001
#define EXT4_FEATURE_INCOMPAT_FILETYPE 0x0002
@@ -620,6 +711,8 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
EXT4_FEATURE_INCOMPAT_64BIT)
#define EXT4_FEATURE_RO_COMPAT_SUPP (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \
+ EXT4_FEATURE_RO_COMPAT_DIR_NLINK | \
+ EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE | \
EXT4_FEATURE_RO_COMPAT_BTREE_DIR)
/*
@@ -862,6 +955,7 @@ extern int ext4_change_inode_journal_flag(struct inode *, int);
extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *);
extern void ext4_truncate (struct inode *);
extern void ext4_set_inode_flags(struct inode *);
+extern void ext4_get_inode_flags(struct ext4_inode_info *);
extern void ext4_set_aops(struct inode *inode);
extern int ext4_writepage_trans_blocks(struct inode *);
extern int ext4_block_truncate_page(handle_t *handle, struct page *page,
@@ -983,6 +1077,8 @@ extern int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
extern void ext4_ext_truncate(struct inode *, struct page *);
extern void ext4_ext_init(struct super_block *);
extern void ext4_ext_release(struct super_block *);
+extern long ext4_fallocate(struct inode *inode, int mode, loff_t offset,
+ loff_t len);
static inline int
ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block,
unsigned long max_blocks, struct buffer_head *bh,
diff --git a/include/linux/ext4_fs_extents.h b/include/linux/ext4_fs_extents.h
index acfe59740b0..81406f3655d 100644
--- a/include/linux/ext4_fs_extents.h
+++ b/include/linux/ext4_fs_extents.h
@@ -141,7 +141,25 @@ typedef int (*ext_prepare_callback)(struct inode *, struct ext4_ext_path *,
#define EXT_MAX_BLOCK 0xffffffff
-#define EXT_MAX_LEN ((1UL << 15) - 1)
+/*
+ * EXT_INIT_MAX_LEN is the maximum number of blocks we can have in an
+ * initialized extent. This is 2^15 and not (2^16 - 1), since we use the
+ * MSB of ee_len field in the extent datastructure to signify if this
+ * particular extent is an initialized extent or an uninitialized (i.e.
+ * preallocated).
+ * EXT_UNINIT_MAX_LEN is the maximum number of blocks we can have in an
+ * uninitialized extent.
+ * If ee_len is <= 0x8000, it is an initialized extent. Otherwise, it is an
+ * uninitialized one. In other words, if MSB of ee_len is set, it is an
+ * uninitialized extent with only one special scenario when ee_len = 0x8000.
+ * In this case we can not have an uninitialized extent of zero length and
+ * thus we make it as a special case of initialized extent with 0x8000 length.
+ * This way we get better extent-to-group alignment for initialized extents.
+ * Hence, the maximum number of blocks we can have in an *initialized*
+ * extent is 2^15 (32768) and in an *uninitialized* extent is 2^15-1 (32767).
+ */
+#define EXT_INIT_MAX_LEN (1UL << 15)
+#define EXT_UNINIT_MAX_LEN (EXT_INIT_MAX_LEN - 1)
#define EXT_FIRST_EXTENT(__hdr__) \
@@ -188,8 +206,31 @@ ext4_ext_invalidate_cache(struct inode *inode)
EXT4_I(inode)->i_cached_extent.ec_type = EXT4_EXT_CACHE_NO;
}
+static inline void ext4_ext_mark_uninitialized(struct ext4_extent *ext)
+{
+ /* We can not have an uninitialized extent of zero length! */
+ BUG_ON((le16_to_cpu(ext->ee_len) & ~EXT_INIT_MAX_LEN) == 0);
+ ext->ee_len |= cpu_to_le16(EXT_INIT_MAX_LEN);
+}
+
+static inline int ext4_ext_is_uninitialized(struct ext4_extent *ext)
+{
+ /* Extent with ee_len of 0x8000 is treated as an initialized extent */
+ return (le16_to_cpu(ext->ee_len) > EXT_INIT_MAX_LEN);
+}
+
+static inline int ext4_ext_get_actual_len(struct ext4_extent *ext)
+{
+ return (le16_to_cpu(ext->ee_len) <= EXT_INIT_MAX_LEN ?
+ le16_to_cpu(ext->ee_len) :
+ (le16_to_cpu(ext->ee_len) - EXT_INIT_MAX_LEN));
+}
+
extern int ext4_extent_tree_init(handle_t *, struct inode *);
extern int ext4_ext_calc_credits_for_insert(struct inode *, struct ext4_ext_path *);
+extern int ext4_ext_try_to_merge(struct inode *inode,
+ struct ext4_ext_path *path,
+ struct ext4_extent *);
extern unsigned int ext4_ext_check_overlap(struct inode *, struct ext4_extent *, struct ext4_ext_path *);
extern int ext4_ext_insert_extent(handle_t *, struct inode *, struct ext4_ext_path *, struct ext4_extent *);
extern int ext4_ext_walk_space(struct inode *, unsigned long, unsigned long, ext_prepare_callback, void *);
diff --git a/include/linux/ext4_fs_i.h b/include/linux/ext4_fs_i.h
index 9de49440699..1a511e9905a 100644
--- a/include/linux/ext4_fs_i.h
+++ b/include/linux/ext4_fs_i.h
@@ -153,6 +153,11 @@ struct ext4_inode_info {
unsigned long i_ext_generation;
struct ext4_ext_cache i_cached_extent;
+ /*
+ * File creation time. Its function is same as that of
+ * struct timespec i_{a,c,m}time in the generic inode.
+ */
+ struct timespec i_crtime;
};
#endif /* _LINUX_EXT4_FS_I */
diff --git a/include/linux/ext4_fs_sb.h b/include/linux/ext4_fs_sb.h
index 2347557a327..1b2ffee12be 100644
--- a/include/linux/ext4_fs_sb.h
+++ b/include/linux/ext4_fs_sb.h
@@ -73,7 +73,7 @@ struct ext4_sb_info {
struct list_head s_orphan;
unsigned long s_commit_interval;
struct block_device *journal_bdev;
-#ifdef CONFIG_JBD_DEBUG
+#ifdef CONFIG_JBD2_DEBUG
struct timer_list turn_ro_timer; /* For turning read-only (crash simulation) */
wait_queue_head_t ro_wait_queue; /* For people waiting for the fs to go read-only */
#endif
@@ -81,6 +81,7 @@ struct ext4_sb_info {
char *s_qf_names[MAXQUOTAS]; /* Names of quota files with journalled quota */
int s_jquota_fmt; /* Format of quota to use */
#endif
+ unsigned int s_want_extra_isize; /* New inodes should reserve # bytes */
#ifdef EXTENTS_STATS
/* ext4 extents stats */
diff --git a/include/linux/falloc.h b/include/linux/falloc.h
new file mode 100644
index 00000000000..8e912ab6a07
--- /dev/null
+++ b/include/linux/falloc.h
@@ -0,0 +1,6 @@
+#ifndef _FALLOC_H_
+#define _FALLOC_H_
+
+#define FALLOC_FL_KEEP_SIZE 0x01 /* default is extend size */
+
+#endif /* _FALLOC_H_ */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 98205f68047..0b806c5e32e 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1147,6 +1147,8 @@ struct inode_operations {
ssize_t (*listxattr) (struct dentry *, char *, size_t);
int (*removexattr) (struct dentry *, const char *);
void (*truncate_range)(struct inode *, loff_t, loff_t);
+ long (*fallocate)(struct inode *inode, int mode, loff_t offset,
+ loff_t len);
};
struct seq_file;
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 0e0fedd2039..260d6d76c5f 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -50,14 +50,14 @@
*/
#define JBD_DEFAULT_MAX_COMMIT_AGE 5
-#ifdef CONFIG_JBD_DEBUG
+#ifdef CONFIG_JBD2_DEBUG
/*
* Define JBD_EXPENSIVE_CHECKING to enable more expensive internal
* consistency checks. By default we don't do this unless
- * CONFIG_JBD_DEBUG is on.
+ * CONFIG_JBD2_DEBUG is on.
*/
#define JBD_EXPENSIVE_CHECKING
-extern int jbd2_journal_enable_debug;
+extern u8 jbd2_journal_enable_debug;
#define jbd_debug(n, f, a...) \
do { \
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index 10f505c8431..5dc13848891 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -36,13 +36,57 @@ static inline int request_module(const char * name, ...) { return -ENOSYS; }
#define try_then_request_module(x, mod...) ((x) ?: (request_module(mod), (x)))
struct key;
-extern int call_usermodehelper_keys(char *path, char *argv[], char *envp[],
- struct key *session_keyring, int wait);
+struct file;
+struct subprocess_info;
+
+/* Allocate a subprocess_info structure */
+struct subprocess_info *call_usermodehelper_setup(char *path,
+ char **argv, char **envp);
+
+/* Set various pieces of state into the subprocess_info structure */
+void call_usermodehelper_setkeys(struct subprocess_info *info,
+ struct key *session_keyring);
+int call_usermodehelper_stdinpipe(struct subprocess_info *sub_info,
+ struct file **filp);
+void call_usermodehelper_setcleanup(struct subprocess_info *info,
+ void (*cleanup)(char **argv, char **envp));
+
+enum umh_wait {
+ UMH_NO_WAIT = -1, /* don't wait at all */
+ UMH_WAIT_EXEC = 0, /* wait for the exec, but not the process */
+ UMH_WAIT_PROC = 1, /* wait for the process to complete */
+};
+
+/* Actually execute the sub-process */
+int call_usermodehelper_exec(struct subprocess_info *info, enum umh_wait wait);
+
+/* Free the subprocess_info. This is only needed if you're not going
+ to call call_usermodehelper_exec */
+void call_usermodehelper_freeinfo(struct subprocess_info *info);
static inline int
-call_usermodehelper(char *path, char **argv, char **envp, int wait)
+call_usermodehelper(char *path, char **argv, char **envp, enum umh_wait wait)
{
- return call_usermodehelper_keys(path, argv, envp, NULL, wait);
+ struct subprocess_info *info;
+
+ info = call_usermodehelper_setup(path, argv, envp);
+ if (info == NULL)
+ return -ENOMEM;
+ return call_usermodehelper_exec(info, wait);
+}
+
+static inline int
+call_usermodehelper_keys(char *path, char **argv, char **envp,
+ struct key *session_keyring, enum umh_wait wait)
+{
+ struct subprocess_info *info;
+
+ info = call_usermodehelper_setup(path, argv, envp);
+ if (info == NULL)
+ return -ENOMEM;
+
+ call_usermodehelper_setkeys(info, session_keyring);
+ return call_usermodehelper_exec(info, wait);
}
extern void usermodehelper_init(void);
diff --git a/include/linux/major.h b/include/linux/major.h
index 7e7c9093919..0cb98053537 100644
--- a/include/linux/major.h
+++ b/include/linux/major.h
@@ -158,6 +158,8 @@
#define VXSPEC_MAJOR 200 /* VERITAS volume config driver */
#define VXDMP_MAJOR 201 /* VERITAS volume multipath driver */
+#define XENVBD_MAJOR 202 /* Xen virtual block device */
+
#define MSR_MAJOR 202
#define CPUID_MAJOR 203
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index da7a13c97eb..9820ca1e45e 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1098,10 +1098,8 @@ extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all
extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly);
extern int dev_mc_sync(struct net_device *to, struct net_device *from);
extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
-extern void dev_mc_discard(struct net_device *dev);
extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all);
extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly);
-extern void __dev_addr_discard(struct dev_addr_list **list);
extern void dev_set_promiscuity(struct net_device *dev, int inc);
extern void dev_set_allmulti(struct net_device *dev, int inc);
extern void netdev_state_change(struct net_device *dev);
diff --git a/include/linux/netfilter_ipv4/ipt_iprange.h b/include/linux/netfilter_ipv4/ipt_iprange.h
index 34ab0fb736e..a92fefc3c7e 100644
--- a/include/linux/netfilter_ipv4/ipt_iprange.h
+++ b/include/linux/netfilter_ipv4/ipt_iprange.h
@@ -1,6 +1,8 @@
#ifndef _IPT_IPRANGE_H
#define _IPT_IPRANGE_H
+#include <linux/types.h>
+
#define IPRANGE_SRC 0x01 /* Match source IP address */
#define IPRANGE_DST 0x02 /* Match destination IP address */
#define IPRANGE_SRC_INV 0x10 /* Negate the condition */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index ae2d79f2107..731cd2ac322 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -92,6 +92,7 @@
/* PG_owner_priv_1 users should have descriptive aliases */
#define PG_checked PG_owner_priv_1 /* Used by some filesystems */
+#define PG_pinned PG_owner_priv_1 /* Xen pinned pagetable */
#if (BITS_PER_LONG > 32)
/*
@@ -170,6 +171,10 @@ static inline void SetPageUptodate(struct page *page)
#define SetPageChecked(page) set_bit(PG_checked, &(page)->flags)
#define ClearPageChecked(page) clear_bit(PG_checked, &(page)->flags)
+#define PagePinned(page) test_bit(PG_pinned, &(page)->flags)
+#define SetPagePinned(page) set_bit(PG_pinned, &(page)->flags)
+#define ClearPagePinned(page) clear_bit(PG_pinned, &(page)->flags)
+
#define PageReserved(page) test_bit(PG_reserved, &(page)->flags)
#define SetPageReserved(page) set_bit(PG_reserved, &(page)->flags)
#define ClearPageReserved(page) clear_bit(PG_reserved, &(page)->flags)
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index 1dd1c707311..85ea63f462a 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -67,6 +67,11 @@ extern void kernel_power_off(void);
void ctrl_alt_del(void);
+#define POWEROFF_CMD_PATH_LEN 256
+extern char poweroff_cmd[POWEROFF_CMD_PATH_LEN];
+
+extern int orderly_poweroff(bool force);
+
/*
* Emergency restart, callable from an interrupt handler.
*/
diff --git a/include/linux/string.h b/include/linux/string.h
index 7f2eb6a477f..836062b7582 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -105,8 +105,12 @@ extern void * memchr(const void *,int,__kernel_size_t);
#endif
extern char *kstrdup(const char *s, gfp_t gfp);
+extern char *kstrndup(const char *s, size_t len, gfp_t gfp);
extern void *kmemdup(const void *src, size_t len, gfp_t gfp);
+extern char **argv_split(gfp_t gfp, const char *str, int *argcp);
+extern void argv_free(char **argv);
+
#ifdef __cplusplus
}
#endif
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 83d0ec11235..7a8b1e3322e 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -610,6 +610,7 @@ asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemas
asmlinkage long sys_timerfd(int ufd, int clockid, int flags,
const struct itimerspec __user *utmr);
asmlinkage long sys_eventfd(unsigned int count);
+asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len);
int kernel_execve(const char *filename, char *const argv[], char *const envp[]);
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 132b260aef1..c2b10cae5da 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -70,6 +70,10 @@ extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
struct page ***pages);
extern void unmap_kernel_range(unsigned long addr, unsigned long size);
+/* Allocate/destroy a 'vmalloc' VM area. */
+extern struct vm_struct *alloc_vm_area(size_t size);
+extern void free_vm_area(struct vm_struct *area);
+
/*
* Internals. Dont't use..
*/
diff --git a/include/mtd/ubi-header.h b/include/mtd/ubi-header.h
index fa479c71aa3..74efa776347 100644
--- a/include/mtd/ubi-header.h
+++ b/include/mtd/ubi-header.h
@@ -74,42 +74,13 @@ enum {
UBI_COMPAT_REJECT = 5
};
-/*
- * ubi16_t/ubi32_t/ubi64_t - 16, 32, and 64-bit integers used in UBI on-flash
- * data structures.
- */
-typedef struct {
- uint16_t int16;
-} __attribute__ ((packed)) ubi16_t;
-
-typedef struct {
- uint32_t int32;
-} __attribute__ ((packed)) ubi32_t;
-
-typedef struct {
- uint64_t int64;
-} __attribute__ ((packed)) ubi64_t;
-
-/*
- * In this implementation of UBI uses the big-endian format for on-flash
- * integers. The below are the corresponding conversion macros.
- */
-#define cpu_to_ubi16(x) ((ubi16_t){__cpu_to_be16(x)})
-#define ubi16_to_cpu(x) ((uint16_t)__be16_to_cpu((x).int16))
-
-#define cpu_to_ubi32(x) ((ubi32_t){__cpu_to_be32(x)})
-#define ubi32_to_cpu(x) ((uint32_t)__be32_to_cpu((x).int32))
-
-#define cpu_to_ubi64(x) ((ubi64_t){__cpu_to_be64(x)})
-#define ubi64_to_cpu(x) ((uint64_t)__be64_to_cpu((x).int64))
-
/* Sizes of UBI headers */
#define UBI_EC_HDR_SIZE sizeof(struct ubi_ec_hdr)
#define UBI_VID_HDR_SIZE sizeof(struct ubi_vid_hdr)
/* Sizes of UBI headers without the ending CRC */
-#define UBI_EC_HDR_SIZE_CRC (UBI_EC_HDR_SIZE - sizeof(ubi32_t))
-#define UBI_VID_HDR_SIZE_CRC (UBI_VID_HDR_SIZE - sizeof(ubi32_t))
+#define UBI_EC_HDR_SIZE_CRC (UBI_EC_HDR_SIZE - sizeof(__be32))
+#define UBI_VID_HDR_SIZE_CRC (UBI_VID_HDR_SIZE - sizeof(__be32))
/**
* struct ubi_ec_hdr - UBI erase counter header.
@@ -137,14 +108,14 @@ typedef struct {
* eraseblocks.
*/
struct ubi_ec_hdr {
- ubi32_t magic;
- uint8_t version;
- uint8_t padding1[3];
- ubi64_t ec; /* Warning: the current limit is 31-bit anyway! */
- ubi32_t vid_hdr_offset;
- ubi32_t data_offset;
- uint8_t padding2[36];
- ubi32_t hdr_crc;
+ __be32 magic;
+ __u8 version;
+ __u8 padding1[3];
+ __be64 ec; /* Warning: the current limit is 31-bit anyway! */
+ __be32 vid_hdr_offset;
+ __be32 data_offset;
+ __u8 padding2[36];
+ __be32 hdr_crc;
} __attribute__ ((packed));
/**
@@ -262,22 +233,22 @@ struct ubi_ec_hdr {
* software (say, cramfs) on top of the UBI volume.
*/
struct ubi_vid_hdr {
- ubi32_t magic;
- uint8_t version;
- uint8_t vol_type;
- uint8_t copy_flag;
- uint8_t compat;
- ubi32_t vol_id;
- ubi32_t lnum;
- ubi32_t leb_ver; /* obsolete, to be removed, don't use */
- ubi32_t data_size;
- ubi32_t used_ebs;
- ubi32_t data_pad;
- ubi32_t data_crc;
- uint8_t padding1[4];
- ubi64_t sqnum;
- uint8_t padding2[12];
- ubi32_t hdr_crc;
+ __be32 magic;
+ __u8 version;
+ __u8 vol_type;
+ __u8 copy_flag;
+ __u8 compat;
+ __be32 vol_id;
+ __be32 lnum;
+ __be32 leb_ver; /* obsolete, to be removed, don't use */
+ __be32 data_size;
+ __be32 used_ebs;
+ __be32 data_pad;
+ __be32 data_crc;
+ __u8 padding1[4];
+ __be64 sqnum;
+ __u8 padding2[12];
+ __be32 hdr_crc;
} __attribute__ ((packed));
/* Internal UBI volumes count */
@@ -306,7 +277,7 @@ struct ubi_vid_hdr {
#define UBI_VTBL_RECORD_SIZE sizeof(struct ubi_vtbl_record)
/* Size of the volume table record without the ending CRC */
-#define UBI_VTBL_RECORD_SIZE_CRC (UBI_VTBL_RECORD_SIZE - sizeof(ubi32_t))
+#define UBI_VTBL_RECORD_SIZE_CRC (UBI_VTBL_RECORD_SIZE - sizeof(__be32))
/**
* struct ubi_vtbl_record - a record in the volume table.
@@ -346,15 +317,15 @@ struct ubi_vid_hdr {
* Empty records contain all zeroes and the CRC checksum of those zeroes.
*/
struct ubi_vtbl_record {
- ubi32_t reserved_pebs;
- ubi32_t alignment;
- ubi32_t data_pad;
- uint8_t vol_type;
- uint8_t upd_marker;
- ubi16_t name_len;
- uint8_t name[UBI_VOL_NAME_MAX+1];
- uint8_t padding2[24];
- ubi32_t crc;
+ __be32 reserved_pebs;
+ __be32 alignment;
+ __be32 data_pad;
+ __u8 vol_type;
+ __u8 upd_marker;
+ __be16 name_len;
+ __u8 name[UBI_VOL_NAME_MAX+1];
+ __u8 padding2[24];
+ __be32 crc;
} __attribute__ ((packed));
#endif /* !__UBI_HEADER_H__ */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index a8af9ae0017..8b404b1ef7c 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -652,8 +652,7 @@ struct tcp_congestion_ops {
/* lower bound for congestion window (optional) */
u32 (*min_cwnd)(const struct sock *sk);
/* do new cwnd calculation (required) */
- void (*cong_avoid)(struct sock *sk, u32 ack,
- u32 rtt, u32 in_flight, int good_ack);
+ void (*cong_avoid)(struct sock *sk, u32 ack, u32 in_flight, int good_ack);
/* call before changing ca_state (optional) */
void (*set_state)(struct sock *sk, u8 new_state);
/* call when cwnd event occurs (optional) */
@@ -684,8 +683,7 @@ extern void tcp_slow_start(struct tcp_sock *tp);
extern struct tcp_congestion_ops tcp_init_congestion_ops;
extern u32 tcp_reno_ssthresh(struct sock *sk);
-extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack,
- u32 rtt, u32 in_flight, int flag);
+extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight, int flag);
extern u32 tcp_reno_min_cwnd(const struct sock *sk);
extern struct tcp_congestion_ops tcp_reno;
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index ae959e95017..a5f80bfbaaa 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -585,7 +585,6 @@ static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ct
struct xfrm_dst
{
union {
- struct xfrm_dst *next;
struct dst_entry dst;
struct rtable rt;
struct rt6_info rt6;
diff --git a/include/xen/events.h b/include/xen/events.h
new file mode 100644
index 00000000000..2bde54d29be
--- /dev/null
+++ b/include/xen/events.h
@@ -0,0 +1,48 @@
+#ifndef _XEN_EVENTS_H
+#define _XEN_EVENTS_H
+
+#include <linux/interrupt.h>
+
+#include <xen/interface/event_channel.h>
+#include <asm/xen/hypercall.h>
+
+enum ipi_vector {
+ XEN_RESCHEDULE_VECTOR,
+ XEN_CALL_FUNCTION_VECTOR,
+
+ XEN_NR_IPIS,
+};
+
+int bind_evtchn_to_irq(unsigned int evtchn);
+int bind_evtchn_to_irqhandler(unsigned int evtchn,
+ irq_handler_t handler,
+ unsigned long irqflags, const char *devname,
+ void *dev_id);
+int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
+ irq_handler_t handler,
+ unsigned long irqflags, const char *devname,
+ void *dev_id);
+int bind_ipi_to_irqhandler(enum ipi_vector ipi,
+ unsigned int cpu,
+ irq_handler_t handler,
+ unsigned long irqflags,
+ const char *devname,
+ void *dev_id);
+
+/*
+ * Common unbind function for all event sources. Takes IRQ to unbind from.
+ * Automatically closes the underlying event channel (even for bindings
+ * made with bind_evtchn_to_irqhandler()).
+ */
+void unbind_from_irqhandler(unsigned int irq, void *dev_id);
+
+void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector);
+
+static inline void notify_remote_via_evtchn(int port)
+{
+ struct evtchn_send send = { .port = port };
+ (void)HYPERVISOR_event_channel_op(EVTCHNOP_send, &send);
+}
+
+extern void notify_remote_via_irq(int irq);
+#endif /* _XEN_EVENTS_H */
diff --git a/include/xen/features.h b/include/xen/features.h
new file mode 100644
index 00000000000..27292d4d2a6
--- /dev/null
+++ b/include/xen/features.h
@@ -0,0 +1,23 @@
+/******************************************************************************
+ * features.h
+ *
+ * Query the features reported by Xen.
+ *
+ * Copyright (c) 2006, Ian Campbell
+ */
+
+#ifndef __XEN_FEATURES_H__
+#define __XEN_FEATURES_H__
+
+#include <xen/interface/features.h>
+
+void xen_setup_features(void);
+
+extern u8 xen_features[XENFEAT_NR_SUBMAPS * 32];
+
+static inline int xen_feature(int flag)
+{
+ return xen_features[flag];
+}
+
+#endif /* __ASM_XEN_FEATURES_H__ */
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
new file mode 100644
index 00000000000..761c83498e0
--- /dev/null
+++ b/include/xen/grant_table.h
@@ -0,0 +1,107 @@
+/******************************************************************************
+ * grant_table.h
+ *
+ * Two sets of functionality:
+ * 1. Granting foreign access to our memory reservation.
+ * 2. Accessing others' memory reservations via grant references.
+ * (i.e., mechanisms for both sender and recipient of grant references)
+ *
+ * Copyright (c) 2004-2005, K A Fraser
+ * Copyright (c) 2005, Christopher Clark
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __ASM_GNTTAB_H__
+#define __ASM_GNTTAB_H__
+
+#include <asm/xen/hypervisor.h>
+#include <xen/interface/grant_table.h>
+
+/* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */
+#define NR_GRANT_FRAMES 4
+
+struct gnttab_free_callback {
+ struct gnttab_free_callback *next;
+ void (*fn)(void *);
+ void *arg;
+ u16 count;
+};
+
+int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
+ int readonly);
+
+/*
+ * End access through the given grant reference, iff the grant entry is no
+ * longer in use. Return 1 if the grant entry was freed, 0 if it is still in
+ * use.
+ */
+int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly);
+
+/*
+ * Eventually end access through the given grant reference, and once that
+ * access has been ended, free the given page too. Access will be ended
+ * immediately iff the grant entry is not in use, otherwise it will happen
+ * some time later. page may be 0, in which case no freeing will occur.
+ */
+void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
+ unsigned long page);
+
+int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn);
+
+unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref);
+unsigned long gnttab_end_foreign_transfer(grant_ref_t ref);
+
+int gnttab_query_foreign_access(grant_ref_t ref);
+
+/*
+ * operations on reserved batches of grant references
+ */
+int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head);
+
+void gnttab_free_grant_reference(grant_ref_t ref);
+
+void gnttab_free_grant_references(grant_ref_t head);
+
+int gnttab_empty_grant_references(const grant_ref_t *pprivate_head);
+
+int gnttab_claim_grant_reference(grant_ref_t *pprivate_head);
+
+void gnttab_release_grant_reference(grant_ref_t *private_head,
+ grant_ref_t release);
+
+void gnttab_request_free_callback(struct gnttab_free_callback *callback,
+ void (*fn)(void *), void *arg, u16 count);
+void gnttab_cancel_free_callback(struct gnttab_free_callback *callback);
+
+void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
+ unsigned long frame, int readonly);
+
+void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid,
+ unsigned long pfn);
+
+#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
+
+#endif /* __ASM_GNTTAB_H__ */
diff --git a/include/xen/hvc-console.h b/include/xen/hvc-console.h
new file mode 100644
index 00000000000..21c0ecfd786
--- /dev/null
+++ b/include/xen/hvc-console.h
@@ -0,0 +1,6 @@
+#ifndef XEN_HVC_CONSOLE_H
+#define XEN_HVC_CONSOLE_H
+
+extern struct console xenboot_console;
+
+#endif /* XEN_HVC_CONSOLE_H */
diff --git a/include/xen/interface/elfnote.h b/include/xen/interface/elfnote.h
new file mode 100644
index 00000000000..a64d3df5bd9
--- /dev/null
+++ b/include/xen/interface/elfnote.h
@@ -0,0 +1,133 @@
+/******************************************************************************
+ * elfnote.h
+ *
+ * Definitions used for the Xen ELF notes.
+ *
+ * Copyright (c) 2006, Ian Campbell, XenSource Ltd.
+ */
+
+#ifndef __XEN_PUBLIC_ELFNOTE_H__
+#define __XEN_PUBLIC_ELFNOTE_H__
+
+/*
+ * The notes should live in a SHT_NOTE segment and have "Xen" in the
+ * name field.
+ *
+ * Numeric types are either 4 or 8 bytes depending on the content of
+ * the desc field.
+ *
+ * LEGACY indicated the fields in the legacy __xen_guest string which
+ * this a note type replaces.
+ */
+
+/*
+ * NAME=VALUE pair (string).
+ *
+ * LEGACY: FEATURES and PAE
+ */
+#define XEN_ELFNOTE_INFO 0
+
+/*
+ * The virtual address of the entry point (numeric).
+ *
+ * LEGACY: VIRT_ENTRY
+ */
+#define XEN_ELFNOTE_ENTRY 1
+
+/* The virtual address of the hypercall transfer page (numeric).
+ *
+ * LEGACY: HYPERCALL_PAGE. (n.b. legacy value is a physical page
+ * number not a virtual address)
+ */
+#define XEN_ELFNOTE_HYPERCALL_PAGE 2
+
+/* The virtual address where the kernel image should be mapped (numeric).
+ *
+ * Defaults to 0.
+ *
+ * LEGACY: VIRT_BASE
+ */
+#define XEN_ELFNOTE_VIRT_BASE 3
+
+/*
+ * The offset of the ELF paddr field from the acutal required
+ * psuedo-physical address (numeric).
+ *
+ * This is used to maintain backwards compatibility with older kernels
+ * which wrote __PAGE_OFFSET into that field. This field defaults to 0
+ * if not present.
+ *
+ * LEGACY: ELF_PADDR_OFFSET. (n.b. legacy default is VIRT_BASE)
+ */
+#define XEN_ELFNOTE_PADDR_OFFSET 4
+
+/*
+ * The version of Xen that we work with (string).
+ *
+ * LEGACY: XEN_VER
+ */
+#define XEN_ELFNOTE_XEN_VERSION 5
+
+/*
+ * The name of the guest operating system (string).
+ *
+ * LEGACY: GUEST_OS
+ */
+#define XEN_ELFNOTE_GUEST_OS 6
+
+/*
+ * The version of the guest operating system (string).
+ *
+ * LEGACY: GUEST_VER
+ */
+#define XEN_ELFNOTE_GUEST_VERSION 7
+
+/*
+ * The loader type (string).
+ *
+ * LEGACY: LOADER
+ */
+#define XEN_ELFNOTE_LOADER 8
+
+/*
+ * The kernel supports PAE (x86/32 only, string = "yes" or "no").
+ *
+ * LEGACY: PAE (n.b. The legacy interface included a provision to
+ * indicate 'extended-cr3' support allowing L3 page tables to be
+ * placed above 4G. It is assumed that any kernel new enough to use
+ * these ELF notes will include this and therefore "yes" here is
+ * equivalent to "yes[entended-cr3]" in the __xen_guest interface.
+ */
+#define XEN_ELFNOTE_PAE_MODE 9
+
+/*
+ * The features supported/required by this kernel (string).
+ *
+ * The string must consist of a list of feature names (as given in
+ * features.h, without the "XENFEAT_" prefix) separated by '|'
+ * characters. If a feature is required for the kernel to function
+ * then the feature name must be preceded by a '!' character.
+ *
+ * LEGACY: FEATURES
+ */
+#define XEN_ELFNOTE_FEATURES 10
+
+/*
+ * The kernel requires the symbol table to be loaded (string = "yes" or "no")
+ * LEGACY: BSD_SYMTAB (n.b. The legacy treated the presence or absence
+ * of this string as a boolean flag rather than requiring "yes" or
+ * "no".
+ */
+#define XEN_ELFNOTE_BSD_SYMTAB 11
+
+#endif /* __XEN_PUBLIC_ELFNOTE_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/include/xen/interface/event_channel.h b/include/xen/interface/event_channel.h
new file mode 100644
index 00000000000..919b5bdcb2b
--- /dev/null
+++ b/include/xen/interface/event_channel.h
@@ -0,0 +1,195 @@
+/******************************************************************************
+ * event_channel.h
+ *
+ * Event channels between domains.
+ *
+ * Copyright (c) 2003-2004, K A Fraser.
+ */
+
+#ifndef __XEN_PUBLIC_EVENT_CHANNEL_H__
+#define __XEN_PUBLIC_EVENT_CHANNEL_H__
+
+typedef uint32_t evtchn_port_t;
+DEFINE_GUEST_HANDLE(evtchn_port_t);
+
+/*
+ * EVTCHNOP_alloc_unbound: Allocate a port in domain <dom> and mark as
+ * accepting interdomain bindings from domain <remote_dom>. A fresh port
+ * is allocated in <dom> and returned as <port>.
+ * NOTES:
+ * 1. If the caller is unprivileged then <dom> must be DOMID_SELF.
+ * 2. <rdom> may be DOMID_SELF, allowing loopback connections.
+ */
+#define EVTCHNOP_alloc_unbound 6
+struct evtchn_alloc_unbound {
+ /* IN parameters */
+ domid_t dom, remote_dom;
+ /* OUT parameters */
+ evtchn_port_t port;
+};
+
+/*
+ * EVTCHNOP_bind_interdomain: Construct an interdomain event channel between
+ * the calling domain and <remote_dom>. <remote_dom,remote_port> must identify
+ * a port that is unbound and marked as accepting bindings from the calling
+ * domain. A fresh port is allocated in the calling domain and returned as
+ * <local_port>.
+ * NOTES:
+ * 2. <remote_dom> may be DOMID_SELF, allowing loopback connections.
+ */
+#define EVTCHNOP_bind_interdomain 0
+struct evtchn_bind_interdomain {
+ /* IN parameters. */
+ domid_t remote_dom;
+ evtchn_port_t remote_port;
+ /* OUT parameters. */
+ evtchn_port_t local_port;
+};
+
+/*
+ * EVTCHNOP_bind_virq: Bind a local event channel to VIRQ <irq> on specified
+ * vcpu.
+ * NOTES:
+ * 1. A virtual IRQ may be bound to at most one event channel per vcpu.
+ * 2. The allocated event channel is bound to the specified vcpu. The binding
+ * may not be changed.
+ */
+#define EVTCHNOP_bind_virq 1
+struct evtchn_bind_virq {
+ /* IN parameters. */
+ uint32_t virq;
+ uint32_t vcpu;
+ /* OUT parameters. */
+ evtchn_port_t port;
+};
+
+/*
+ * EVTCHNOP_bind_pirq: Bind a local event channel to PIRQ <irq>.
+ * NOTES:
+ * 1. A physical IRQ may be bound to at most one event channel per domain.
+ * 2. Only a sufficiently-privileged domain may bind to a physical IRQ.
+ */
+#define EVTCHNOP_bind_pirq 2
+struct evtchn_bind_pirq {
+ /* IN parameters. */
+ uint32_t pirq;
+#define BIND_PIRQ__WILL_SHARE 1
+ uint32_t flags; /* BIND_PIRQ__* */
+ /* OUT parameters. */
+ evtchn_port_t port;
+};
+
+/*
+ * EVTCHNOP_bind_ipi: Bind a local event channel to receive events.
+ * NOTES:
+ * 1. The allocated event channel is bound to the specified vcpu. The binding
+ * may not be changed.
+ */
+#define EVTCHNOP_bind_ipi 7
+struct evtchn_bind_ipi {
+ uint32_t vcpu;
+ /* OUT parameters. */
+ evtchn_port_t port;
+};
+
+/*
+ * EVTCHNOP_close: Close a local event channel <port>. If the channel is
+ * interdomain then the remote end is placed in the unbound state
+ * (EVTCHNSTAT_unbound), awaiting a new connection.
+ */
+#define EVTCHNOP_close 3
+struct evtchn_close {
+ /* IN parameters. */
+ evtchn_port_t port;
+};
+
+/*
+ * EVTCHNOP_send: Send an event to the remote end of the channel whose local
+ * endpoint is <port>.
+ */
+#define EVTCHNOP_send 4
+struct evtchn_send {
+ /* IN parameters. */
+ evtchn_port_t port;
+};
+
+/*
+ * EVTCHNOP_status: Get the current status of the communication channel which
+ * has an endpoint at <dom, port>.
+ * NOTES:
+ * 1. <dom> may be specified as DOMID_SELF.
+ * 2. Only a sufficiently-privileged domain may obtain the status of an event
+ * channel for which <dom> is not DOMID_SELF.
+ */
+#define EVTCHNOP_status 5
+struct evtchn_status {
+ /* IN parameters */
+ domid_t dom;
+ evtchn_port_t port;
+ /* OUT parameters */
+#define EVTCHNSTAT_closed 0 /* Channel is not in use. */
+#define EVTCHNSTAT_unbound 1 /* Channel is waiting interdom connection.*/
+#define EVTCHNSTAT_interdomain 2 /* Channel is connected to remote domain. */
+#define EVTCHNSTAT_pirq 3 /* Channel is bound to a phys IRQ line. */
+#define EVTCHNSTAT_virq 4 /* Channel is bound to a virtual IRQ line */
+#define EVTCHNSTAT_ipi 5 /* Channel is bound to a virtual IPI line */
+ uint32_t status;
+ uint32_t vcpu; /* VCPU to which this channel is bound. */
+ union {
+ struct {
+ domid_t dom;
+ } unbound; /* EVTCHNSTAT_unbound */
+ struct {
+ domid_t dom;
+ evtchn_port_t port;
+ } interdomain; /* EVTCHNSTAT_interdomain */
+ uint32_t pirq; /* EVTCHNSTAT_pirq */
+ uint32_t virq; /* EVTCHNSTAT_virq */
+ } u;
+};
+
+/*
+ * EVTCHNOP_bind_vcpu: Specify which vcpu a channel should notify when an
+ * event is pending.
+ * NOTES:
+ * 1. IPI- and VIRQ-bound channels always notify the vcpu that initialised
+ * the binding. This binding cannot be changed.
+ * 2. All other channels notify vcpu0 by default. This default is set when
+ * the channel is allocated (a port that is freed and subsequently reused
+ * has its binding reset to vcpu0).
+ */
+#define EVTCHNOP_bind_vcpu 8
+struct evtchn_bind_vcpu {
+ /* IN parameters. */
+ evtchn_port_t port;
+ uint32_t vcpu;
+};
+
+/*
+ * EVTCHNOP_unmask: Unmask the specified local event-channel port and deliver
+ * a notification to the appropriate VCPU if an event is pending.
+ */
+#define EVTCHNOP_unmask 9
+struct evtchn_unmask {
+ /* IN parameters. */
+ evtchn_port_t port;
+};
+
+struct evtchn_op {
+ uint32_t cmd; /* EVTCHNOP_* */
+ union {
+ struct evtchn_alloc_unbound alloc_unbound;
+ struct evtchn_bind_interdomain bind_interdomain;
+ struct evtchn_bind_virq bind_virq;
+ struct evtchn_bind_pirq bind_pirq;
+ struct evtchn_bind_ipi bind_ipi;
+ struct evtchn_close close;
+ struct evtchn_send send;
+ struct evtchn_status status;
+ struct evtchn_bind_vcpu bind_vcpu;
+ struct evtchn_unmask unmask;
+ } u;
+};
+DEFINE_GUEST_HANDLE_STRUCT(evtchn_op);
+
+#endif /* __XEN_PUBLIC_EVENT_CHANNEL_H__ */
diff --git a/include/xen/interface/features.h b/include/xen/interface/features.h
new file mode 100644
index 00000000000..d73228d1648
--- /dev/null
+++ b/include/xen/interface/features.h
@@ -0,0 +1,43 @@
+/******************************************************************************
+ * features.h
+ *
+ * Feature flags, reported by XENVER_get_features.
+ *
+ * Copyright (c) 2006, Keir Fraser <keir@xensource.com>
+ */
+
+#ifndef __XEN_PUBLIC_FEATURES_H__
+#define __XEN_PUBLIC_FEATURES_H__
+
+/*
+ * If set, the guest does not need to write-protect its pagetables, and can
+ * update them via direct writes.
+ */
+#define XENFEAT_writable_page_tables 0
+
+/*
+ * If set, the guest does not need to write-protect its segment descriptor
+ * tables, and can update them via direct writes.
+ */
+#define XENFEAT_writable_descriptor_tables 1
+
+/*
+ * If set, translation between the guest's 'pseudo-physical' address space
+ * and the host's machine address space are handled by the hypervisor. In this
+ * mode the guest does not need to perform phys-to/from-machine translations
+ * when performing page table operations.
+ */
+#define XENFEAT_auto_translated_physmap 2
+
+/* If set, the guest is running in supervisor mode (e.g., x86 ring 0). */
+#define XENFEAT_supervisor_mode_kernel 3
+
+/*
+ * If set, the guest does not need to allocate x86 PAE page directories
+ * below 4GB. This flag is usually implied by auto_translated_physmap.
+ */
+#define XENFEAT_pae_pgdir_above_4gb 4
+
+#define XENFEAT_NR_SUBMAPS 1
+
+#endif /* __XEN_PUBLIC_FEATURES_H__ */
diff --git a/include/xen/interface/grant_table.h b/include/xen/interface/grant_table.h
new file mode 100644
index 00000000000..219049802cf
--- /dev/null
+++ b/include/xen/interface/grant_table.h
@@ -0,0 +1,375 @@
+/******************************************************************************
+ * grant_table.h
+ *
+ * Interface for granting foreign access to page frames, and receiving
+ * page-ownership transfers.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2004, K A Fraser
+ */
+
+#ifndef __XEN_PUBLIC_GRANT_TABLE_H__
+#define __XEN_PUBLIC_GRANT_TABLE_H__
+
+
+/***********************************
+ * GRANT TABLE REPRESENTATION
+ */
+
+/* Some rough guidelines on accessing and updating grant-table entries
+ * in a concurrency-safe manner. For more information, Linux contains a
+ * reference implementation for guest OSes (arch/xen/kernel/grant_table.c).
+ *
+ * NB. WMB is a no-op on current-generation x86 processors. However, a
+ * compiler barrier will still be required.
+ *
+ * Introducing a valid entry into the grant table:
+ * 1. Write ent->domid.
+ * 2. Write ent->frame:
+ * GTF_permit_access: Frame to which access is permitted.
+ * GTF_accept_transfer: Pseudo-phys frame slot being filled by new
+ * frame, or zero if none.
+ * 3. Write memory barrier (WMB).
+ * 4. Write ent->flags, inc. valid type.
+ *
+ * Invalidating an unused GTF_permit_access entry:
+ * 1. flags = ent->flags.
+ * 2. Observe that !(flags & (GTF_reading|GTF_writing)).
+ * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0).
+ * NB. No need for WMB as reuse of entry is control-dependent on success of
+ * step 3, and all architectures guarantee ordering of ctrl-dep writes.
+ *
+ * Invalidating an in-use GTF_permit_access entry:
+ * This cannot be done directly. Request assistance from the domain controller
+ * which can set a timeout on the use of a grant entry and take necessary
+ * action. (NB. This is not yet implemented!).
+ *
+ * Invalidating an unused GTF_accept_transfer entry:
+ * 1. flags = ent->flags.
+ * 2. Observe that !(flags & GTF_transfer_committed). [*]
+ * 3. Check result of SMP-safe CMPXCHG(&ent->flags, flags, 0).
+ * NB. No need for WMB as reuse of entry is control-dependent on success of
+ * step 3, and all architectures guarantee ordering of ctrl-dep writes.
+ * [*] If GTF_transfer_committed is set then the grant entry is 'committed'.
+ * The guest must /not/ modify the grant entry until the address of the
+ * transferred frame is written. It is safe for the guest to spin waiting
+ * for this to occur (detect by observing GTF_transfer_completed in
+ * ent->flags).
+ *
+ * Invalidating a committed GTF_accept_transfer entry:
+ * 1. Wait for (ent->flags & GTF_transfer_completed).
+ *
+ * Changing a GTF_permit_access from writable to read-only:
+ * Use SMP-safe CMPXCHG to set GTF_readonly, while checking !GTF_writing.
+ *
+ * Changing a GTF_permit_access from read-only to writable:
+ * Use SMP-safe bit-setting instruction.
+ */
+
+/*
+ * A grant table comprises a packed array of grant entries in one or more
+ * page frames shared between Xen and a guest.
+ * [XEN]: This field is written by Xen and read by the sharing guest.
+ * [GST]: This field is written by the guest and read by Xen.
+ */
+struct grant_entry {
+ /* GTF_xxx: various type and flag information. [XEN,GST] */
+ uint16_t flags;
+ /* The domain being granted foreign privileges. [GST] */
+ domid_t domid;
+ /*
+ * GTF_permit_access: Frame that @domid is allowed to map and access. [GST]
+ * GTF_accept_transfer: Frame whose ownership transferred by @domid. [XEN]
+ */
+ uint32_t frame;
+};
+
+/*
+ * Type of grant entry.
+ * GTF_invalid: This grant entry grants no privileges.
+ * GTF_permit_access: Allow @domid to map/access @frame.
+ * GTF_accept_transfer: Allow @domid to transfer ownership of one page frame
+ * to this guest. Xen writes the page number to @frame.
+ */
+#define GTF_invalid (0U<<0)
+#define GTF_permit_access (1U<<0)
+#define GTF_accept_transfer (2U<<0)
+#define GTF_type_mask (3U<<0)
+
+/*
+ * Subflags for GTF_permit_access.
+ * GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST]
+ * GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN]
+ * GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN]
+ */
+#define _GTF_readonly (2)
+#define GTF_readonly (1U<<_GTF_readonly)
+#define _GTF_reading (3)
+#define GTF_reading (1U<<_GTF_reading)
+#define _GTF_writing (4)
+#define GTF_writing (1U<<_GTF_writing)
+
+/*
+ * Subflags for GTF_accept_transfer:
+ * GTF_transfer_committed: Xen sets this flag to indicate that it is committed
+ * to transferring ownership of a page frame. When a guest sees this flag
+ * it must /not/ modify the grant entry until GTF_transfer_completed is
+ * set by Xen.
+ * GTF_transfer_completed: It is safe for the guest to spin-wait on this flag
+ * after reading GTF_transfer_committed. Xen will always write the frame
+ * address, followed by ORing this flag, in a timely manner.
+ */
+#define _GTF_transfer_committed (2)
+#define GTF_transfer_committed (1U<<_GTF_transfer_committed)
+#define _GTF_transfer_completed (3)
+#define GTF_transfer_completed (1U<<_GTF_transfer_completed)
+
+
+/***********************************
+ * GRANT TABLE QUERIES AND USES
+ */
+
+/*
+ * Reference to a grant entry in a specified domain's grant table.
+ */
+typedef uint32_t grant_ref_t;
+
+/*
+ * Handle to track a mapping created via a grant reference.
+ */
+typedef uint32_t grant_handle_t;
+
+/*
+ * GNTTABOP_map_grant_ref: Map the grant entry (<dom>,<ref>) for access
+ * by devices and/or host CPUs. If successful, <handle> is a tracking number
+ * that must be presented later to destroy the mapping(s). On error, <handle>
+ * is a negative status code.
+ * NOTES:
+ * 1. If GNTMAP_device_map is specified then <dev_bus_addr> is the address
+ * via which I/O devices may access the granted frame.
+ * 2. If GNTMAP_host_map is specified then a mapping will be added at
+ * either a host virtual address in the current address space, or at
+ * a PTE at the specified machine address. The type of mapping to
+ * perform is selected through the GNTMAP_contains_pte flag, and the
+ * address is specified in <host_addr>.
+ * 3. Mappings should only be destroyed via GNTTABOP_unmap_grant_ref. If a
+ * host mapping is destroyed by other means then it is *NOT* guaranteed
+ * to be accounted to the correct grant reference!
+ */
+#define GNTTABOP_map_grant_ref 0
+struct gnttab_map_grant_ref {
+ /* IN parameters. */
+ uint64_t host_addr;
+ uint32_t flags; /* GNTMAP_* */
+ grant_ref_t ref;
+ domid_t dom;
+ /* OUT parameters. */
+ int16_t status; /* GNTST_* */
+ grant_handle_t handle;
+ uint64_t dev_bus_addr;
+};
+
+/*
+ * GNTTABOP_unmap_grant_ref: Destroy one or more grant-reference mappings
+ * tracked by <handle>. If <host_addr> or <dev_bus_addr> is zero, that
+ * field is ignored. If non-zero, they must refer to a device/host mapping
+ * that is tracked by <handle>
+ * NOTES:
+ * 1. The call may fail in an undefined manner if either mapping is not
+ * tracked by <handle>.
+ * 3. After executing a batch of unmaps, it is guaranteed that no stale
+ * mappings will remain in the device or host TLBs.
+ */
+#define GNTTABOP_unmap_grant_ref 1
+struct gnttab_unmap_grant_ref {
+ /* IN parameters. */
+ uint64_t host_addr;
+ uint64_t dev_bus_addr;
+ grant_handle_t handle;
+ /* OUT parameters. */
+ int16_t status; /* GNTST_* */
+};
+
+/*
+ * GNTTABOP_setup_table: Set up a grant table for <dom> comprising at least
+ * <nr_frames> pages. The frame addresses are written to the <frame_list>.
+ * Only <nr_frames> addresses are written, even if the table is larger.
+ * NOTES:
+ * 1. <dom> may be specified as DOMID_SELF.
+ * 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
+ * 3. Xen may not support more than a single grant-table page per domain.
+ */
+#define GNTTABOP_setup_table 2
+struct gnttab_setup_table {
+ /* IN parameters. */
+ domid_t dom;
+ uint32_t nr_frames;
+ /* OUT parameters. */
+ int16_t status; /* GNTST_* */
+ ulong *frame_list;
+};
+
+/*
+ * GNTTABOP_dump_table: Dump the contents of the grant table to the
+ * xen console. Debugging use only.
+ */
+#define GNTTABOP_dump_table 3
+struct gnttab_dump_table {
+ /* IN parameters. */
+ domid_t dom;
+ /* OUT parameters. */
+ int16_t status; /* GNTST_* */
+};
+
+/*
+ * GNTTABOP_transfer_grant_ref: Transfer <frame> to a foreign domain. The
+ * foreign domain has previously registered its interest in the transfer via
+ * <domid, ref>.
+ *
+ * Note that, even if the transfer fails, the specified page no longer belongs
+ * to the calling domain *unless* the error is GNTST_bad_page.
+ */
+#define GNTTABOP_transfer 4
+struct gnttab_transfer {
+ /* IN parameters. */
+ unsigned long mfn;
+ domid_t domid;
+ grant_ref_t ref;
+ /* OUT parameters. */
+ int16_t status;
+};
+
+
+/*
+ * GNTTABOP_copy: Hypervisor based copy
+ * source and destinations can be eithers MFNs or, for foreign domains,
+ * grant references. the foreign domain has to grant read/write access
+ * in its grant table.
+ *
+ * The flags specify what type source and destinations are (either MFN
+ * or grant reference).
+ *
+ * Note that this can also be used to copy data between two domains
+ * via a third party if the source and destination domains had previously
+ * grant appropriate access to their pages to the third party.
+ *
+ * source_offset specifies an offset in the source frame, dest_offset
+ * the offset in the target frame and len specifies the number of
+ * bytes to be copied.
+ */
+
+#define _GNTCOPY_source_gref (0)
+#define GNTCOPY_source_gref (1<<_GNTCOPY_source_gref)
+#define _GNTCOPY_dest_gref (1)
+#define GNTCOPY_dest_gref (1<<_GNTCOPY_dest_gref)
+
+#define GNTTABOP_copy 5
+struct gnttab_copy {
+ /* IN parameters. */
+ struct {
+ union {
+ grant_ref_t ref;
+ unsigned long gmfn;
+ } u;
+ domid_t domid;
+ uint16_t offset;
+ } source, dest;
+ uint16_t len;
+ uint16_t flags; /* GNTCOPY_* */
+ /* OUT parameters. */
+ int16_t status;
+};
+
+/*
+ * GNTTABOP_query_size: Query the current and maximum sizes of the shared
+ * grant table.
+ * NOTES:
+ * 1. <dom> may be specified as DOMID_SELF.
+ * 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
+ */
+#define GNTTABOP_query_size 6
+struct gnttab_query_size {
+ /* IN parameters. */
+ domid_t dom;
+ /* OUT parameters. */
+ uint32_t nr_frames;
+ uint32_t max_nr_frames;
+ int16_t status; /* GNTST_* */
+};
+
+
+/*
+ * Bitfield values for update_pin_status.flags.
+ */
+ /* Map the grant entry for access by I/O devices. */
+#define _GNTMAP_device_map (0)
+#define GNTMAP_device_map (1<<_GNTMAP_device_map)
+ /* Map the grant entry for access by host CPUs. */
+#define _GNTMAP_host_map (1)
+#define GNTMAP_host_map (1<<_GNTMAP_host_map)
+ /* Accesses to the granted frame will be restricted to read-only access. */
+#define _GNTMAP_readonly (2)
+#define GNTMAP_readonly (1<<_GNTMAP_readonly)
+ /*
+ * GNTMAP_host_map subflag:
+ * 0 => The host mapping is usable only by the guest OS.
+ * 1 => The host mapping is usable by guest OS + current application.
+ */
+#define _GNTMAP_application_map (3)
+#define GNTMAP_application_map (1<<_GNTMAP_application_map)
+
+ /*
+ * GNTMAP_contains_pte subflag:
+ * 0 => This map request contains a host virtual address.
+ * 1 => This map request contains the machine addess of the PTE to update.
+ */
+#define _GNTMAP_contains_pte (4)
+#define GNTMAP_contains_pte (1<<_GNTMAP_contains_pte)
+
+/*
+ * Values for error status returns. All errors are -ve.
+ */
+#define GNTST_okay (0) /* Normal return. */
+#define GNTST_general_error (-1) /* General undefined error. */
+#define GNTST_bad_domain (-2) /* Unrecognsed domain id. */
+#define GNTST_bad_gntref (-3) /* Unrecognised or inappropriate gntref. */
+#define GNTST_bad_handle (-4) /* Unrecognised or inappropriate handle. */
+#define GNTST_bad_virt_addr (-5) /* Inappropriate virtual address to map. */
+#define GNTST_bad_dev_addr (-6) /* Inappropriate device address to unmap.*/
+#define GNTST_no_device_space (-7) /* Out of space in I/O MMU. */
+#define GNTST_permission_denied (-8) /* Not enough privilege for operation. */
+#define GNTST_bad_page (-9) /* Specified page was invalid for op. */
+#define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary */
+
+#define GNTTABOP_error_msgs { \
+ "okay", \
+ "undefined error", \
+ "unrecognised domain id", \
+ "invalid grant reference", \
+ "invalid mapping handle", \
+ "invalid virtual address", \
+ "invalid device address", \
+ "no spare translation slot in the I/O MMU", \
+ "permission denied", \
+ "bad page", \
+ "copy arguments cross page boundary" \
+}
+
+#endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
new file mode 100644
index 00000000000..c2d1fa4dc1e
--- /dev/null
+++ b/include/xen/interface/io/blkif.h
@@ -0,0 +1,94 @@
+/******************************************************************************
+ * blkif.h
+ *
+ * Unified block-device I/O interface for Xen guest OSes.
+ *
+ * Copyright (c) 2003-2004, Keir Fraser
+ */
+
+#ifndef __XEN_PUBLIC_IO_BLKIF_H__
+#define __XEN_PUBLIC_IO_BLKIF_H__
+
+#include "ring.h"
+#include "../grant_table.h"
+
+/*
+ * Front->back notifications: When enqueuing a new request, sending a
+ * notification can be made conditional on req_event (i.e., the generic
+ * hold-off mechanism provided by the ring macros). Backends must set
+ * req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()).
+ *
+ * Back->front notifications: When enqueuing a new response, sending a
+ * notification can be made conditional on rsp_event (i.e., the generic
+ * hold-off mechanism provided by the ring macros). Frontends must set
+ * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()).
+ */
+
+typedef uint16_t blkif_vdev_t;
+typedef uint64_t blkif_sector_t;
+
+/*
+ * REQUEST CODES.
+ */
+#define BLKIF_OP_READ 0
+#define BLKIF_OP_WRITE 1
+/*
+ * Recognised only if "feature-barrier" is present in backend xenbus info.
+ * The "feature_barrier" node contains a boolean indicating whether barrier
+ * requests are likely to succeed or fail. Either way, a barrier request
+ * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by
+ * the underlying block-device hardware. The boolean simply indicates whether
+ * or not it is worthwhile for the frontend to attempt barrier requests.
+ * If a backend does not recognise BLKIF_OP_WRITE_BARRIER, it should *not*
+ * create the "feature-barrier" node!
+ */
+#define BLKIF_OP_WRITE_BARRIER 2
+
+/*
+ * Maximum scatter/gather segments per request.
+ * This is carefully chosen so that sizeof(struct blkif_ring) <= PAGE_SIZE.
+ * NB. This could be 12 if the ring indexes weren't stored in the same page.
+ */
+#define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
+
+struct blkif_request {
+ uint8_t operation; /* BLKIF_OP_??? */
+ uint8_t nr_segments; /* number of segments */
+ blkif_vdev_t handle; /* only for read/write requests */
+ uint64_t id; /* private guest value, echoed in resp */
+ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
+ struct blkif_request_segment {
+ grant_ref_t gref; /* reference to I/O buffer frame */
+ /* @first_sect: first sector in frame to transfer (inclusive). */
+ /* @last_sect: last sector in frame to transfer (inclusive). */
+ uint8_t first_sect, last_sect;
+ } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+};
+
+struct blkif_response {
+ uint64_t id; /* copied from request */
+ uint8_t operation; /* copied from request */
+ int16_t status; /* BLKIF_RSP_??? */
+};
+
+/*
+ * STATUS RETURN CODES.
+ */
+ /* Operation not supported (only happens on barrier writes). */
+#define BLKIF_RSP_EOPNOTSUPP -2
+ /* Operation failed for some unspecified reason (-EIO). */
+#define BLKIF_RSP_ERROR -1
+ /* Operation completed successfully. */
+#define BLKIF_RSP_OKAY 0
+
+/*
+ * Generate blkif ring structures and types.
+ */
+
+DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response);
+
+#define VDISK_CDROM 0x1
+#define VDISK_REMOVABLE 0x2
+#define VDISK_READONLY 0x4
+
+#endif /* __XEN_PUBLIC_IO_BLKIF_H__ */
diff --git a/include/xen/interface/io/console.h b/include/xen/interface/io/console.h
new file mode 100644
index 00000000000..e563de70f78
--- /dev/null
+++ b/include/xen/interface/io/console.h
@@ -0,0 +1,23 @@
+/******************************************************************************
+ * console.h
+ *
+ * Console I/O interface for Xen guest OSes.
+ *
+ * Copyright (c) 2005, Keir Fraser
+ */
+
+#ifndef __XEN_PUBLIC_IO_CONSOLE_H__
+#define __XEN_PUBLIC_IO_CONSOLE_H__
+
+typedef uint32_t XENCONS_RING_IDX;
+
+#define MASK_XENCONS_IDX(idx, ring) ((idx) & (sizeof(ring)-1))
+
+struct xencons_interface {
+ char in[1024];
+ char out[2048];
+ XENCONS_RING_IDX in_cons, in_prod;
+ XENCONS_RING_IDX out_cons, out_prod;
+};
+
+#endif /* __XEN_PUBLIC_IO_CONSOLE_H__ */
diff --git a/include/xen/interface/io/netif.h b/include/xen/interface/io/netif.h
new file mode 100644
index 00000000000..518481c95f1
--- /dev/null
+++ b/include/xen/interface/io/netif.h
@@ -0,0 +1,158 @@
+/******************************************************************************
+ * netif.h
+ *
+ * Unified network-device I/O interface for Xen guest OSes.
+ *
+ * Copyright (c) 2003-2004, Keir Fraser
+ */
+
+#ifndef __XEN_PUBLIC_IO_NETIF_H__
+#define __XEN_PUBLIC_IO_NETIF_H__
+
+#include "ring.h"
+#include "../grant_table.h"
+
+/*
+ * Notifications after enqueuing any type of message should be conditional on
+ * the appropriate req_event or rsp_event field in the shared ring.
+ * If the client sends notification for rx requests then it should specify
+ * feature 'feature-rx-notify' via xenbus. Otherwise the backend will assume
+ * that it cannot safely queue packets (as it may not be kicked to send them).
+ */
+
+/*
+ * This is the 'wire' format for packets:
+ * Request 1: netif_tx_request -- NETTXF_* (any flags)
+ * [Request 2: netif_tx_extra] (only if request 1 has NETTXF_extra_info)
+ * [Request 3: netif_tx_extra] (only if request 2 has XEN_NETIF_EXTRA_MORE)
+ * Request 4: netif_tx_request -- NETTXF_more_data
+ * Request 5: netif_tx_request -- NETTXF_more_data
+ * ...
+ * Request N: netif_tx_request -- 0
+ */
+
+/* Protocol checksum field is blank in the packet (hardware offload)? */
+#define _NETTXF_csum_blank (0)
+#define NETTXF_csum_blank (1U<<_NETTXF_csum_blank)
+
+/* Packet data has been validated against protocol checksum. */
+#define _NETTXF_data_validated (1)
+#define NETTXF_data_validated (1U<<_NETTXF_data_validated)
+
+/* Packet continues in the next request descriptor. */
+#define _NETTXF_more_data (2)
+#define NETTXF_more_data (1U<<_NETTXF_more_data)
+
+/* Packet to be followed by extra descriptor(s). */
+#define _NETTXF_extra_info (3)
+#define NETTXF_extra_info (1U<<_NETTXF_extra_info)
+
+struct xen_netif_tx_request {
+ grant_ref_t gref; /* Reference to buffer page */
+ uint16_t offset; /* Offset within buffer page */
+ uint16_t flags; /* NETTXF_* */
+ uint16_t id; /* Echoed in response message. */
+ uint16_t size; /* Packet size in bytes. */
+};
+
+/* Types of netif_extra_info descriptors. */
+#define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */
+#define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */
+#define XEN_NETIF_EXTRA_TYPE_MAX (2)
+
+/* netif_extra_info flags. */
+#define _XEN_NETIF_EXTRA_FLAG_MORE (0)
+#define XEN_NETIF_EXTRA_FLAG_MORE (1U<<_XEN_NETIF_EXTRA_FLAG_MORE)
+
+/* GSO types - only TCPv4 currently supported. */
+#define XEN_NETIF_GSO_TYPE_TCPV4 (1)
+
+/*
+ * This structure needs to fit within both netif_tx_request and
+ * netif_rx_response for compatibility.
+ */
+struct xen_netif_extra_info {
+ uint8_t type; /* XEN_NETIF_EXTRA_TYPE_* */
+ uint8_t flags; /* XEN_NETIF_EXTRA_FLAG_* */
+
+ union {
+ struct {
+ /*
+ * Maximum payload size of each segment. For
+ * example, for TCP this is just the path MSS.
+ */
+ uint16_t size;
+
+ /*
+ * GSO type. This determines the protocol of
+ * the packet and any extra features required
+ * to segment the packet properly.
+ */
+ uint8_t type; /* XEN_NETIF_GSO_TYPE_* */
+
+ /* Future expansion. */
+ uint8_t pad;
+
+ /*
+ * GSO features. This specifies any extra GSO
+ * features required to process this packet,
+ * such as ECN support for TCPv4.
+ */
+ uint16_t features; /* XEN_NETIF_GSO_FEAT_* */
+ } gso;
+
+ uint16_t pad[3];
+ } u;
+};
+
+struct xen_netif_tx_response {
+ uint16_t id;
+ int16_t status; /* NETIF_RSP_* */
+};
+
+struct xen_netif_rx_request {
+ uint16_t id; /* Echoed in response message. */
+ grant_ref_t gref; /* Reference to incoming granted frame */
+};
+
+/* Packet data has been validated against protocol checksum. */
+#define _NETRXF_data_validated (0)
+#define NETRXF_data_validated (1U<<_NETRXF_data_validated)
+
+/* Protocol checksum field is blank in the packet (hardware offload)? */
+#define _NETRXF_csum_blank (1)
+#define NETRXF_csum_blank (1U<<_NETRXF_csum_blank)
+
+/* Packet continues in the next request descriptor. */
+#define _NETRXF_more_data (2)
+#define NETRXF_more_data (1U<<_NETRXF_more_data)
+
+/* Packet to be followed by extra descriptor(s). */
+#define _NETRXF_extra_info (3)
+#define NETRXF_extra_info (1U<<_NETRXF_extra_info)
+
+struct xen_netif_rx_response {
+ uint16_t id;
+ uint16_t offset; /* Offset in page of start of received packet */
+ uint16_t flags; /* NETRXF_* */
+ int16_t status; /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */
+};
+
+/*
+ * Generate netif ring structures and types.
+ */
+
+DEFINE_RING_TYPES(xen_netif_tx,
+ struct xen_netif_tx_request,
+ struct xen_netif_tx_response);
+DEFINE_RING_TYPES(xen_netif_rx,
+ struct xen_netif_rx_request,
+ struct xen_netif_rx_response);
+
+#define NETIF_RSP_DROPPED -2
+#define NETIF_RSP_ERROR -1
+#define NETIF_RSP_OKAY 0
+/* No response: used for auxiliary requests (e.g., netif_tx_extra). */
+#define NETIF_RSP_NULL 1
+
+#endif
diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h
new file mode 100644
index 00000000000..e8cbf431c8c
--- /dev/null
+++ b/include/xen/interface/io/ring.h
@@ -0,0 +1,260 @@
+/******************************************************************************
+ * ring.h
+ *
+ * Shared producer-consumer ring macros.
+ *
+ * Tim Deegan and Andrew Warfield November 2004.
+ */
+
+#ifndef __XEN_PUBLIC_IO_RING_H__
+#define __XEN_PUBLIC_IO_RING_H__
+
+typedef unsigned int RING_IDX;
+
+/* Round a 32-bit unsigned constant down to the nearest power of two. */
+#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
+#define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x))
+#define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x))
+#define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x))
+#define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x))
+
+/*
+ * Calculate size of a shared ring, given the total available space for the
+ * ring and indexes (_sz), and the name tag of the request/response structure.
+ * A ring contains as many entries as will fit, rounded down to the nearest
+ * power of two (so we can mask with (size-1) to loop around).
+ */
+#define __RING_SIZE(_s, _sz) \
+ (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
+
+/*
+ * Macros to make the correct C datatypes for a new kind of ring.
+ *
+ * To make a new ring datatype, you need to have two message structures,
+ * let's say struct request, and struct response already defined.
+ *
+ * In a header where you want the ring datatype declared, you then do:
+ *
+ * DEFINE_RING_TYPES(mytag, struct request, struct response);
+ *
+ * These expand out to give you a set of types, as you can see below.
+ * The most important of these are:
+ *
+ * struct mytag_sring - The shared ring.
+ * struct mytag_front_ring - The 'front' half of the ring.
+ * struct mytag_back_ring - The 'back' half of the ring.
+ *
+ * To initialize a ring in your code you need to know the location and size
+ * of the shared memory area (PAGE_SIZE, for instance). To initialise
+ * the front half:
+ *
+ * struct mytag_front_ring front_ring;
+ * SHARED_RING_INIT((struct mytag_sring *)shared_page);
+ * FRONT_RING_INIT(&front_ring, (struct mytag_sring *)shared_page,
+ * PAGE_SIZE);
+ *
+ * Initializing the back follows similarly (note that only the front
+ * initializes the shared ring):
+ *
+ * struct mytag_back_ring back_ring;
+ * BACK_RING_INIT(&back_ring, (struct mytag_sring *)shared_page,
+ * PAGE_SIZE);
+ */
+
+#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
+ \
+/* Shared ring entry */ \
+union __name##_sring_entry { \
+ __req_t req; \
+ __rsp_t rsp; \
+}; \
+ \
+/* Shared ring page */ \
+struct __name##_sring { \
+ RING_IDX req_prod, req_event; \
+ RING_IDX rsp_prod, rsp_event; \
+ uint8_t pad[48]; \
+ union __name##_sring_entry ring[1]; /* variable-length */ \
+}; \
+ \
+/* "Front" end's private variables */ \
+struct __name##_front_ring { \
+ RING_IDX req_prod_pvt; \
+ RING_IDX rsp_cons; \
+ unsigned int nr_ents; \
+ struct __name##_sring *sring; \
+}; \
+ \
+/* "Back" end's private variables */ \
+struct __name##_back_ring { \
+ RING_IDX rsp_prod_pvt; \
+ RING_IDX req_cons; \
+ unsigned int nr_ents; \
+ struct __name##_sring *sring; \
+};
+
+/*
+ * Macros for manipulating rings.
+ *
+ * FRONT_RING_whatever works on the "front end" of a ring: here
+ * requests are pushed on to the ring and responses taken off it.
+ *
+ * BACK_RING_whatever works on the "back end" of a ring: here
+ * requests are taken off the ring and responses put on.
+ *
+ * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL.
+ * This is OK in 1-for-1 request-response situations where the
+ * requestor (front end) never has more than RING_SIZE()-1
+ * outstanding requests.
+ */
+
+/* Initialising empty rings */
+#define SHARED_RING_INIT(_s) do { \
+ (_s)->req_prod = (_s)->rsp_prod = 0; \
+ (_s)->req_event = (_s)->rsp_event = 1; \
+ memset((_s)->pad, 0, sizeof((_s)->pad)); \
+} while(0)
+
+#define FRONT_RING_INIT(_r, _s, __size) do { \
+ (_r)->req_prod_pvt = 0; \
+ (_r)->rsp_cons = 0; \
+ (_r)->nr_ents = __RING_SIZE(_s, __size); \
+ (_r)->sring = (_s); \
+} while (0)
+
+#define BACK_RING_INIT(_r, _s, __size) do { \
+ (_r)->rsp_prod_pvt = 0; \
+ (_r)->req_cons = 0; \
+ (_r)->nr_ents = __RING_SIZE(_s, __size); \
+ (_r)->sring = (_s); \
+} while (0)
+
+/* Initialize to existing shared indexes -- for recovery */
+#define FRONT_RING_ATTACH(_r, _s, __size) do { \
+ (_r)->sring = (_s); \
+ (_r)->req_prod_pvt = (_s)->req_prod; \
+ (_r)->rsp_cons = (_s)->rsp_prod; \
+ (_r)->nr_ents = __RING_SIZE(_s, __size); \
+} while (0)
+
+#define BACK_RING_ATTACH(_r, _s, __size) do { \
+ (_r)->sring = (_s); \
+ (_r)->rsp_prod_pvt = (_s)->rsp_prod; \
+ (_r)->req_cons = (_s)->req_prod; \
+ (_r)->nr_ents = __RING_SIZE(_s, __size); \
+} while (0)
+
+/* How big is this ring? */
+#define RING_SIZE(_r) \
+ ((_r)->nr_ents)
+
+/* Number of free requests (for use on front side only). */
+#define RING_FREE_REQUESTS(_r) \
+ (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons))
+
+/* Test if there is an empty slot available on the front ring.
+ * (This is only meaningful from the front. )
+ */
+#define RING_FULL(_r) \
+ (RING_FREE_REQUESTS(_r) == 0)
+
+/* Test if there are outstanding messages to be processed on a ring. */
+#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
+ ((_r)->sring->rsp_prod - (_r)->rsp_cons)
+
+#define RING_HAS_UNCONSUMED_REQUESTS(_r) \
+ ({ \
+ unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
+ unsigned int rsp = RING_SIZE(_r) - \
+ ((_r)->req_cons - (_r)->rsp_prod_pvt); \
+ req < rsp ? req : rsp; \
+ })
+
+/* Direct access to individual ring elements, by index. */
+#define RING_GET_REQUEST(_r, _idx) \
+ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
+
+#define RING_GET_RESPONSE(_r, _idx) \
+ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
+
+/* Loop termination condition: Would the specified index overflow the ring? */
+#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
+ (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
+
+#define RING_PUSH_REQUESTS(_r) do { \
+ wmb(); /* back sees requests /before/ updated producer index */ \
+ (_r)->sring->req_prod = (_r)->req_prod_pvt; \
+} while (0)
+
+#define RING_PUSH_RESPONSES(_r) do { \
+ wmb(); /* front sees responses /before/ updated producer index */ \
+ (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
+} while (0)
+
+/*
+ * Notification hold-off (req_event and rsp_event):
+ *
+ * When queueing requests or responses on a shared ring, it may not always be
+ * necessary to notify the remote end. For example, if requests are in flight
+ * in a backend, the front may be able to queue further requests without
+ * notifying the back (if the back checks for new requests when it queues
+ * responses).
+ *
+ * When enqueuing requests or responses:
+ *
+ * Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument
+ * is a boolean return value. True indicates that the receiver requires an
+ * asynchronous notification.
+ *
+ * After dequeuing requests or responses (before sleeping the connection):
+ *
+ * Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES().
+ * The second argument is a boolean return value. True indicates that there
+ * are pending messages on the ring (i.e., the connection should not be put
+ * to sleep).
+ *
+ * These macros will set the req_event/rsp_event field to trigger a
+ * notification on the very next message that is enqueued. If you want to
+ * create batches of work (i.e., only receive a notification after several
+ * messages have been enqueued) then you will need to create a customised
+ * version of the FINAL_CHECK macro in your own code, which sets the event
+ * field appropriately.
+ */
+
+#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
+ RING_IDX __old = (_r)->sring->req_prod; \
+ RING_IDX __new = (_r)->req_prod_pvt; \
+ wmb(); /* back sees requests /before/ updated producer index */ \
+ (_r)->sring->req_prod = __new; \
+ mb(); /* back sees new requests /before/ we check req_event */ \
+ (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
+ (RING_IDX)(__new - __old)); \
+} while (0)
+
+#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
+ RING_IDX __old = (_r)->sring->rsp_prod; \
+ RING_IDX __new = (_r)->rsp_prod_pvt; \
+ wmb(); /* front sees responses /before/ updated producer index */ \
+ (_r)->sring->rsp_prod = __new; \
+ mb(); /* front sees new responses /before/ we check rsp_event */ \
+ (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
+ (RING_IDX)(__new - __old)); \
+} while (0)
+
+#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
+ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
+ if (_work_to_do) break; \
+ (_r)->sring->req_event = (_r)->req_cons + 1; \
+ mb(); \
+ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
+} while (0)
+
+#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
+ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
+ if (_work_to_do) break; \
+ (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
+ mb(); \
+ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
+} while (0)
+
+#endif /* __XEN_PUBLIC_IO_RING_H__ */
diff --git a/include/xen/interface/io/xenbus.h b/include/xen/interface/io/xenbus.h
new file mode 100644
index 00000000000..46508c7fa39
--- /dev/null
+++ b/include/xen/interface/io/xenbus.h
@@ -0,0 +1,44 @@
+/*****************************************************************************
+ * xenbus.h
+ *
+ * Xenbus protocol details.
+ *
+ * Copyright (C) 2005 XenSource Ltd.
+ */
+
+#ifndef _XEN_PUBLIC_IO_XENBUS_H
+#define _XEN_PUBLIC_IO_XENBUS_H
+
+/* The state of either end of the Xenbus, i.e. the current communication
+ status of initialisation across the bus. States here imply nothing about
+ the state of the connection between the driver and the kernel's device
+ layers. */
+enum xenbus_state
+{
+ XenbusStateUnknown = 0,
+ XenbusStateInitialising = 1,
+ XenbusStateInitWait = 2, /* Finished early
+ initialisation, but waiting
+ for information from the peer
+ or hotplug scripts. */
+ XenbusStateInitialised = 3, /* Initialised and waiting for a
+ connection from the peer. */
+ XenbusStateConnected = 4,
+ XenbusStateClosing = 5, /* The device is being closed
+ due to an error or an unplug
+ event. */
+ XenbusStateClosed = 6
+
+};
+
+#endif /* _XEN_PUBLIC_IO_XENBUS_H */
+
+/*
+ * Local variables:
+ * c-file-style: "linux"
+ * indent-tabs-mode: t
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */
diff --git a/include/xen/interface/io/xs_wire.h b/include/xen/interface/io/xs_wire.h
new file mode 100644
index 00000000000..99fcffb372d
--- /dev/null
+++ b/include/xen/interface/io/xs_wire.h
@@ -0,0 +1,87 @@
+/*
+ * Details of the "wire" protocol between Xen Store Daemon and client
+ * library or guest kernel.
+ * Copyright (C) 2005 Rusty Russell IBM Corporation
+ */
+
+#ifndef _XS_WIRE_H
+#define _XS_WIRE_H
+
+enum xsd_sockmsg_type
+{
+ XS_DEBUG,
+ XS_DIRECTORY,
+ XS_READ,
+ XS_GET_PERMS,
+ XS_WATCH,
+ XS_UNWATCH,
+ XS_TRANSACTION_START,
+ XS_TRANSACTION_END,
+ XS_INTRODUCE,
+ XS_RELEASE,
+ XS_GET_DOMAIN_PATH,
+ XS_WRITE,
+ XS_MKDIR,
+ XS_RM,
+ XS_SET_PERMS,
+ XS_WATCH_EVENT,
+ XS_ERROR,
+ XS_IS_DOMAIN_INTRODUCED
+};
+
+#define XS_WRITE_NONE "NONE"
+#define XS_WRITE_CREATE "CREATE"
+#define XS_WRITE_CREATE_EXCL "CREATE|EXCL"
+
+/* We hand errors as strings, for portability. */
+struct xsd_errors
+{
+ int errnum;
+ const char *errstring;
+};
+#define XSD_ERROR(x) { x, #x }
+static struct xsd_errors xsd_errors[] __attribute__((unused)) = {
+ XSD_ERROR(EINVAL),
+ XSD_ERROR(EACCES),
+ XSD_ERROR(EEXIST),
+ XSD_ERROR(EISDIR),
+ XSD_ERROR(ENOENT),
+ XSD_ERROR(ENOMEM),
+ XSD_ERROR(ENOSPC),
+ XSD_ERROR(EIO),
+ XSD_ERROR(ENOTEMPTY),
+ XSD_ERROR(ENOSYS),
+ XSD_ERROR(EROFS),
+ XSD_ERROR(EBUSY),
+ XSD_ERROR(EAGAIN),
+ XSD_ERROR(EISCONN)
+};
+
+struct xsd_sockmsg
+{
+ uint32_t type; /* XS_??? */
+ uint32_t req_id;/* Request identifier, echoed in daemon's response. */
+ uint32_t tx_id; /* Transaction id (0 if not related to a transaction). */
+ uint32_t len; /* Length of data following this. */
+
+ /* Generally followed by nul-terminated string(s). */
+};
+
+enum xs_watch_type
+{
+ XS_WATCH_PATH = 0,
+ XS_WATCH_TOKEN
+};
+
+/* Inter-domain shared memory communications. */
+#define XENSTORE_RING_SIZE 1024
+typedef uint32_t XENSTORE_RING_IDX;
+#define MASK_XENSTORE_IDX(idx) ((idx) & (XENSTORE_RING_SIZE-1))
+struct xenstore_domain_interface {
+ char req[XENSTORE_RING_SIZE]; /* Requests to xenstore daemon. */
+ char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */
+ XENSTORE_RING_IDX req_cons, req_prod;
+ XENSTORE_RING_IDX rsp_cons, rsp_prod;
+};
+
+#endif /* _XS_WIRE_H */
diff --git a/include/xen/interface/memory.h b/include/xen/interface/memory.h
new file mode 100644
index 00000000000..af36ead1681
--- /dev/null
+++ b/include/xen/interface/memory.h
@@ -0,0 +1,145 @@
+/******************************************************************************
+ * memory.h
+ *
+ * Memory reservation and information.
+ *
+ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
+ */
+
+#ifndef __XEN_PUBLIC_MEMORY_H__
+#define __XEN_PUBLIC_MEMORY_H__
+
+/*
+ * Increase or decrease the specified domain's memory reservation. Returns a
+ * -ve errcode on failure, or the # extents successfully allocated or freed.
+ * arg == addr of struct xen_memory_reservation.
+ */
+#define XENMEM_increase_reservation 0
+#define XENMEM_decrease_reservation 1
+#define XENMEM_populate_physmap 6
+struct xen_memory_reservation {
+
+ /*
+ * XENMEM_increase_reservation:
+ * OUT: MFN (*not* GMFN) bases of extents that were allocated
+ * XENMEM_decrease_reservation:
+ * IN: GMFN bases of extents to free
+ * XENMEM_populate_physmap:
+ * IN: GPFN bases of extents to populate with memory
+ * OUT: GMFN bases of extents that were allocated
+ * (NB. This command also updates the mach_to_phys translation table)
+ */
+ GUEST_HANDLE(ulong) extent_start;
+
+ /* Number of extents, and size/alignment of each (2^extent_order pages). */
+ unsigned long nr_extents;
+ unsigned int extent_order;
+
+ /*
+ * Maximum # bits addressable by the user of the allocated region (e.g.,
+ * I/O devices often have a 32-bit limitation even in 64-bit systems). If
+ * zero then the user has no addressing restriction.
+ * This field is not used by XENMEM_decrease_reservation.
+ */
+ unsigned int address_bits;
+
+ /*
+ * Domain whose reservation is being changed.
+ * Unprivileged domains can specify only DOMID_SELF.
+ */
+ domid_t domid;
+
+};
+DEFINE_GUEST_HANDLE_STRUCT(xen_memory_reservation);
+
+/*
+ * Returns the maximum machine frame number of mapped RAM in this system.
+ * This command always succeeds (it never returns an error code).
+ * arg == NULL.
+ */
+#define XENMEM_maximum_ram_page 2
+
+/*
+ * Returns the current or maximum memory reservation, in pages, of the
+ * specified domain (may be DOMID_SELF). Returns -ve errcode on failure.
+ * arg == addr of domid_t.
+ */
+#define XENMEM_current_reservation 3
+#define XENMEM_maximum_reservation 4
+
+/*
+ * Returns a list of MFN bases of 2MB extents comprising the machine_to_phys
+ * mapping table. Architectures which do not have a m2p table do not implement
+ * this command.
+ * arg == addr of xen_machphys_mfn_list_t.
+ */
+#define XENMEM_machphys_mfn_list 5
+struct xen_machphys_mfn_list {
+ /*
+ * Size of the 'extent_start' array. Fewer entries will be filled if the
+ * machphys table is smaller than max_extents * 2MB.
+ */
+ unsigned int max_extents;
+
+ /*
+ * Pointer to buffer to fill with list of extent starts. If there are
+ * any large discontiguities in the machine address space, 2MB gaps in
+ * the machphys table will be represented by an MFN base of zero.
+ */
+ GUEST_HANDLE(ulong) extent_start;
+
+ /*
+ * Number of extents written to the above array. This will be smaller
+ * than 'max_extents' if the machphys table is smaller than max_e * 2MB.
+ */
+ unsigned int nr_extents;
+};
+DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_mfn_list);
+
+/*
+ * Sets the GPFN at which a particular page appears in the specified guest's
+ * pseudophysical address space.
+ * arg == addr of xen_add_to_physmap_t.
+ */
+#define XENMEM_add_to_physmap 7
+struct xen_add_to_physmap {
+ /* Which domain to change the mapping for. */
+ domid_t domid;
+
+ /* Source mapping space. */
+#define XENMAPSPACE_shared_info 0 /* shared info page */
+#define XENMAPSPACE_grant_table 1 /* grant table page */
+ unsigned int space;
+
+ /* Index into source mapping space. */
+ unsigned long idx;
+
+ /* GPFN where the source mapping page should appear. */
+ unsigned long gpfn;
+};
+DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap);
+
+/*
+ * Translates a list of domain-specific GPFNs into MFNs. Returns a -ve error
+ * code on failure. This call only works for auto-translated guests.
+ */
+#define XENMEM_translate_gpfn_list 8
+struct xen_translate_gpfn_list {
+ /* Which domain to translate for? */
+ domid_t domid;
+
+ /* Length of list. */
+ unsigned long nr_gpfns;
+
+ /* List of GPFNs to translate. */
+ GUEST_HANDLE(ulong) gpfn_list;
+
+ /*
+ * Output list to contain MFN translations. May be the same as the input
+ * list (in which case each input GPFN is overwritten with the output MFN).
+ */
+ GUEST_HANDLE(ulong) mfn_list;
+};
+DEFINE_GUEST_HANDLE_STRUCT(xen_translate_gpfn_list);
+
+#endif /* __XEN_PUBLIC_MEMORY_H__ */
diff --git a/include/xen/interface/physdev.h b/include/xen/interface/physdev.h
new file mode 100644
index 00000000000..cd6939147cb
--- /dev/null
+++ b/include/xen/interface/physdev.h
@@ -0,0 +1,145 @@
+/*
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __XEN_PUBLIC_PHYSDEV_H__
+#define __XEN_PUBLIC_PHYSDEV_H__
+
+/*
+ * Prototype for this hypercall is:
+ * int physdev_op(int cmd, void *args)
+ * @cmd == PHYSDEVOP_??? (physdev operation).
+ * @args == Operation-specific extra arguments (NULL if none).
+ */
+
+/*
+ * Notify end-of-interrupt (EOI) for the specified IRQ.
+ * @arg == pointer to physdev_eoi structure.
+ */
+#define PHYSDEVOP_eoi 12
+struct physdev_eoi {
+ /* IN */
+ uint32_t irq;
+};
+
+/*
+ * Query the status of an IRQ line.
+ * @arg == pointer to physdev_irq_status_query structure.
+ */
+#define PHYSDEVOP_irq_status_query 5
+struct physdev_irq_status_query {
+ /* IN */
+ uint32_t irq;
+ /* OUT */
+ uint32_t flags; /* XENIRQSTAT_* */
+};
+
+/* Need to call PHYSDEVOP_eoi when the IRQ has been serviced? */
+#define _XENIRQSTAT_needs_eoi (0)
+#define XENIRQSTAT_needs_eoi (1U<<_XENIRQSTAT_needs_eoi)
+
+/* IRQ shared by multiple guests? */
+#define _XENIRQSTAT_shared (1)
+#define XENIRQSTAT_shared (1U<<_XENIRQSTAT_shared)
+
+/*
+ * Set the current VCPU's I/O privilege level.
+ * @arg == pointer to physdev_set_iopl structure.
+ */
+#define PHYSDEVOP_set_iopl 6
+struct physdev_set_iopl {
+ /* IN */
+ uint32_t iopl;
+};
+
+/*
+ * Set the current VCPU's I/O-port permissions bitmap.
+ * @arg == pointer to physdev_set_iobitmap structure.
+ */
+#define PHYSDEVOP_set_iobitmap 7
+struct physdev_set_iobitmap {
+ /* IN */
+ uint8_t * bitmap;
+ uint32_t nr_ports;
+};
+
+/*
+ * Read or write an IO-APIC register.
+ * @arg == pointer to physdev_apic structure.
+ */
+#define PHYSDEVOP_apic_read 8
+#define PHYSDEVOP_apic_write 9
+struct physdev_apic {
+ /* IN */
+ unsigned long apic_physbase;
+ uint32_t reg;
+ /* IN or OUT */
+ uint32_t value;
+};
+
+/*
+ * Allocate or free a physical upcall vector for the specified IRQ line.
+ * @arg == pointer to physdev_irq structure.
+ */
+#define PHYSDEVOP_alloc_irq_vector 10
+#define PHYSDEVOP_free_irq_vector 11
+struct physdev_irq {
+ /* IN */
+ uint32_t irq;
+ /* IN or OUT */
+ uint32_t vector;
+};
+
+/*
+ * Argument to physdev_op_compat() hypercall. Superceded by new physdev_op()
+ * hypercall since 0x00030202.
+ */
+struct physdev_op {
+ uint32_t cmd;
+ union {
+ struct physdev_irq_status_query irq_status_query;
+ struct physdev_set_iopl set_iopl;
+ struct physdev_set_iobitmap set_iobitmap;
+ struct physdev_apic apic_op;
+ struct physdev_irq irq_op;
+ } u;
+};
+
+/*
+ * Notify that some PIRQ-bound event channels have been unmasked.
+ * ** This command is obsolete since interface version 0x00030202 and is **
+ * ** unsupported by newer versions of Xen. **
+ */
+#define PHYSDEVOP_IRQ_UNMASK_NOTIFY 4
+
+/*
+ * These all-capitals physdev operation names are superceded by the new names
+ * (defined above) since interface version 0x00030202.
+ */
+#define PHYSDEVOP_IRQ_STATUS_QUERY PHYSDEVOP_irq_status_query
+#define PHYSDEVOP_SET_IOPL PHYSDEVOP_set_iopl
+#define PHYSDEVOP_SET_IOBITMAP PHYSDEVOP_set_iobitmap
+#define PHYSDEVOP_APIC_READ PHYSDEVOP_apic_read
+#define PHYSDEVOP_APIC_WRITE PHYSDEVOP_apic_write
+#define PHYSDEVOP_ASSIGN_VECTOR PHYSDEVOP_alloc_irq_vector
+#define PHYSDEVOP_FREE_VECTOR PHYSDEVOP_free_irq_vector
+#define PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY XENIRQSTAT_needs_eoi
+#define PHYSDEVOP_IRQ_SHARED XENIRQSTAT_shared
+
+#endif /* __XEN_PUBLIC_PHYSDEV_H__ */
diff --git a/include/xen/interface/sched.h b/include/xen/interface/sched.h
new file mode 100644
index 00000000000..5fec575a800
--- /dev/null
+++ b/include/xen/interface/sched.h
@@ -0,0 +1,77 @@
+/******************************************************************************
+ * sched.h
+ *
+ * Scheduler state interactions
+ *
+ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
+ */
+
+#ifndef __XEN_PUBLIC_SCHED_H__
+#define __XEN_PUBLIC_SCHED_H__
+
+#include "event_channel.h"
+
+/*
+ * The prototype for this hypercall is:
+ * long sched_op_new(int cmd, void *arg)
+ * @cmd == SCHEDOP_??? (scheduler operation).
+ * @arg == Operation-specific extra argument(s), as described below.
+ *
+ * **NOTE**:
+ * Versions of Xen prior to 3.0.2 provide only the following legacy version
+ * of this hypercall, supporting only the commands yield, block and shutdown:
+ * long sched_op(int cmd, unsigned long arg)
+ * @cmd == SCHEDOP_??? (scheduler operation).
+ * @arg == 0 (SCHEDOP_yield and SCHEDOP_block)
+ * == SHUTDOWN_* code (SCHEDOP_shutdown)
+ */
+
+/*
+ * Voluntarily yield the CPU.
+ * @arg == NULL.
+ */
+#define SCHEDOP_yield 0
+
+/*
+ * Block execution of this VCPU until an event is received for processing.
+ * If called with event upcalls masked, this operation will atomically
+ * reenable event delivery and check for pending events before blocking the
+ * VCPU. This avoids a "wakeup waiting" race.
+ * @arg == NULL.
+ */
+#define SCHEDOP_block 1
+
+/*
+ * Halt execution of this domain (all VCPUs) and notify the system controller.
+ * @arg == pointer to sched_shutdown structure.
+ */
+#define SCHEDOP_shutdown 2
+struct sched_shutdown {
+ unsigned int reason; /* SHUTDOWN_* */
+};
+DEFINE_GUEST_HANDLE_STRUCT(sched_shutdown);
+
+/*
+ * Poll a set of event-channel ports. Return when one or more are pending. An
+ * optional timeout may be specified.
+ * @arg == pointer to sched_poll structure.
+ */
+#define SCHEDOP_poll 3
+struct sched_poll {
+ GUEST_HANDLE(evtchn_port_t) ports;
+ unsigned int nr_ports;
+ uint64_t timeout;
+};
+DEFINE_GUEST_HANDLE_STRUCT(sched_poll);
+
+/*
+ * Reason codes for SCHEDOP_shutdown. These may be interpreted by control
+ * software to determine the appropriate action. For the most part, Xen does
+ * not care about the shutdown code.
+ */
+#define SHUTDOWN_poweroff 0 /* Domain exited normally. Clean up and kill. */
+#define SHUTDOWN_reboot 1 /* Clean up, kill, and then restart. */
+#define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */
+#define SHUTDOWN_crash 3 /* Tell controller we've crashed. */
+
+#endif /* __XEN_PUBLIC_SCHED_H__ */
diff --git a/include/xen/interface/vcpu.h b/include/xen/interface/vcpu.h
new file mode 100644
index 00000000000..ff61ea36599
--- /dev/null
+++ b/include/xen/interface/vcpu.h
@@ -0,0 +1,167 @@
+/******************************************************************************
+ * vcpu.h
+ *
+ * VCPU initialisation, query, and hotplug.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
+ */
+
+#ifndef __XEN_PUBLIC_VCPU_H__
+#define __XEN_PUBLIC_VCPU_H__
+
+/*
+ * Prototype for this hypercall is:
+ * int vcpu_op(int cmd, int vcpuid, void *extra_args)
+ * @cmd == VCPUOP_??? (VCPU operation).
+ * @vcpuid == VCPU to operate on.
+ * @extra_args == Operation-specific extra arguments (NULL if none).
+ */
+
+/*
+ * Initialise a VCPU. Each VCPU can be initialised only once. A
+ * newly-initialised VCPU will not run until it is brought up by VCPUOP_up.
+ *
+ * @extra_arg == pointer to vcpu_guest_context structure containing initial
+ * state for the VCPU.
+ */
+#define VCPUOP_initialise 0
+
+/*
+ * Bring up a VCPU. This makes the VCPU runnable. This operation will fail
+ * if the VCPU has not been initialised (VCPUOP_initialise).
+ */
+#define VCPUOP_up 1
+
+/*
+ * Bring down a VCPU (i.e., make it non-runnable).
+ * There are a few caveats that callers should observe:
+ * 1. This operation may return, and VCPU_is_up may return false, before the
+ * VCPU stops running (i.e., the command is asynchronous). It is a good
+ * idea to ensure that the VCPU has entered a non-critical loop before
+ * bringing it down. Alternatively, this operation is guaranteed
+ * synchronous if invoked by the VCPU itself.
+ * 2. After a VCPU is initialised, there is currently no way to drop all its
+ * references to domain memory. Even a VCPU that is down still holds
+ * memory references via its pagetable base pointer and GDT. It is good
+ * practise to move a VCPU onto an 'idle' or default page table, LDT and
+ * GDT before bringing it down.
+ */
+#define VCPUOP_down 2
+
+/* Returns 1 if the given VCPU is up. */
+#define VCPUOP_is_up 3
+
+/*
+ * Return information about the state and running time of a VCPU.
+ * @extra_arg == pointer to vcpu_runstate_info structure.
+ */
+#define VCPUOP_get_runstate_info 4
+struct vcpu_runstate_info {
+ /* VCPU's current state (RUNSTATE_*). */
+ int state;
+ /* When was current state entered (system time, ns)? */
+ uint64_t state_entry_time;
+ /*
+ * Time spent in each RUNSTATE_* (ns). The sum of these times is
+ * guaranteed not to drift from system time.
+ */
+ uint64_t time[4];
+};
+
+/* VCPU is currently running on a physical CPU. */
+#define RUNSTATE_running 0
+
+/* VCPU is runnable, but not currently scheduled on any physical CPU. */
+#define RUNSTATE_runnable 1
+
+/* VCPU is blocked (a.k.a. idle). It is therefore not runnable. */
+#define RUNSTATE_blocked 2
+
+/*
+ * VCPU is not runnable, but it is not blocked.
+ * This is a 'catch all' state for things like hotplug and pauses by the
+ * system administrator (or for critical sections in the hypervisor).
+ * RUNSTATE_blocked dominates this state (it is the preferred state).
+ */
+#define RUNSTATE_offline 3
+
+/*
+ * Register a shared memory area from which the guest may obtain its own
+ * runstate information without needing to execute a hypercall.
+ * Notes:
+ * 1. The registered address may be virtual or physical, depending on the
+ * platform. The virtual address should be registered on x86 systems.
+ * 2. Only one shared area may be registered per VCPU. The shared area is
+ * updated by the hypervisor each time the VCPU is scheduled. Thus
+ * runstate.state will always be RUNSTATE_running and
+ * runstate.state_entry_time will indicate the system time at which the
+ * VCPU was last scheduled to run.
+ * @extra_arg == pointer to vcpu_register_runstate_memory_area structure.
+ */
+#define VCPUOP_register_runstate_memory_area 5
+struct vcpu_register_runstate_memory_area {
+ union {
+ struct vcpu_runstate_info *v;
+ uint64_t p;
+ } addr;
+};
+
+/*
+ * Set or stop a VCPU's periodic timer. Every VCPU has one periodic timer
+ * which can be set via these commands. Periods smaller than one millisecond
+ * may not be supported.
+ */
+#define VCPUOP_set_periodic_timer 6 /* arg == vcpu_set_periodic_timer_t */
+#define VCPUOP_stop_periodic_timer 7 /* arg == NULL */
+struct vcpu_set_periodic_timer {
+ uint64_t period_ns;
+};
+
+/*
+ * Set or stop a VCPU's single-shot timer. Every VCPU has one single-shot
+ * timer which can be set via these commands.
+ */
+#define VCPUOP_set_singleshot_timer 8 /* arg == vcpu_set_singleshot_timer_t */
+#define VCPUOP_stop_singleshot_timer 9 /* arg == NULL */
+struct vcpu_set_singleshot_timer {
+ uint64_t timeout_abs_ns;
+ uint32_t flags; /* VCPU_SSHOTTMR_??? */
+};
+
+/* Flags to VCPUOP_set_singleshot_timer. */
+ /* Require the timeout to be in the future (return -ETIME if it's passed). */
+#define _VCPU_SSHOTTMR_future (0)
+#define VCPU_SSHOTTMR_future (1U << _VCPU_SSHOTTMR_future)
+
+/*
+ * Register a memory location in the guest address space for the
+ * vcpu_info structure. This allows the guest to place the vcpu_info
+ * structure in a convenient place, such as in a per-cpu data area.
+ * The pointer need not be page aligned, but the structure must not
+ * cross a page boundary.
+ */
+#define VCPUOP_register_vcpu_info 10 /* arg == struct vcpu_info */
+struct vcpu_register_vcpu_info {
+ uint32_t mfn; /* mfn of page to place vcpu_info */
+ uint32_t offset; /* offset within page */
+};
+
+#endif /* __XEN_PUBLIC_VCPU_H__ */
diff --git a/include/xen/interface/version.h b/include/xen/interface/version.h
new file mode 100644
index 00000000000..453235e923f
--- /dev/null
+++ b/include/xen/interface/version.h
@@ -0,0 +1,60 @@
+/******************************************************************************
+ * version.h
+ *
+ * Xen version, type, and compile information.
+ *
+ * Copyright (c) 2005, Nguyen Anh Quynh <aquynh@gmail.com>
+ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
+ */
+
+#ifndef __XEN_PUBLIC_VERSION_H__
+#define __XEN_PUBLIC_VERSION_H__
+
+/* NB. All ops return zero on success, except XENVER_version. */
+
+/* arg == NULL; returns major:minor (16:16). */
+#define XENVER_version 0
+
+/* arg == xen_extraversion_t. */
+#define XENVER_extraversion 1
+struct xen_extraversion {
+ char extraversion[16];
+};
+#define XEN_EXTRAVERSION_LEN (sizeof(struct xen_extraversion))
+
+/* arg == xen_compile_info_t. */
+#define XENVER_compile_info 2
+struct xen_compile_info {
+ char compiler[64];
+ char compile_by[16];
+ char compile_domain[32];
+ char compile_date[32];
+};
+
+#define XENVER_capabilities 3
+struct xen_capabilities_info {
+ char info[1024];
+};
+#define XEN_CAPABILITIES_INFO_LEN (sizeof(struct xen_capabilities_info))
+
+#define XENVER_changeset 4
+struct xen_changeset_info {
+ char info[64];
+};
+#define XEN_CHANGESET_INFO_LEN (sizeof(struct xen_changeset_info))
+
+#define XENVER_platform_parameters 5
+struct xen_platform_parameters {
+ unsigned long virt_start;
+};
+
+#define XENVER_get_features 6
+struct xen_feature_info {
+ unsigned int submap_idx; /* IN: which 32-bit submap to return */
+ uint32_t submap; /* OUT: 32-bit submap */
+};
+
+/* Declares the features reported by XENVER_get_features. */
+#include "features.h"
+
+#endif /* __XEN_PUBLIC_VERSION_H__ */
diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h
new file mode 100644
index 00000000000..518a5bf79ed
--- /dev/null
+++ b/include/xen/interface/xen.h
@@ -0,0 +1,447 @@
+/******************************************************************************
+ * xen.h
+ *
+ * Guest OS interface to Xen.
+ *
+ * Copyright (c) 2004, K A Fraser
+ */
+
+#ifndef __XEN_PUBLIC_XEN_H__
+#define __XEN_PUBLIC_XEN_H__
+
+#include <asm/xen/interface.h>
+
+/*
+ * XEN "SYSTEM CALLS" (a.k.a. HYPERCALLS).
+ */
+
+/*
+ * x86_32: EAX = vector; EBX, ECX, EDX, ESI, EDI = args 1, 2, 3, 4, 5.
+ * EAX = return value
+ * (argument registers may be clobbered on return)
+ * x86_64: RAX = vector; RDI, RSI, RDX, R10, R8, R9 = args 1, 2, 3, 4, 5, 6.
+ * RAX = return value
+ * (argument registers not clobbered on return; RCX, R11 are)
+ */
+#define __HYPERVISOR_set_trap_table 0
+#define __HYPERVISOR_mmu_update 1
+#define __HYPERVISOR_set_gdt 2
+#define __HYPERVISOR_stack_switch 3
+#define __HYPERVISOR_set_callbacks 4
+#define __HYPERVISOR_fpu_taskswitch 5
+#define __HYPERVISOR_sched_op 6
+#define __HYPERVISOR_dom0_op 7
+#define __HYPERVISOR_set_debugreg 8
+#define __HYPERVISOR_get_debugreg 9
+#define __HYPERVISOR_update_descriptor 10
+#define __HYPERVISOR_memory_op 12
+#define __HYPERVISOR_multicall 13
+#define __HYPERVISOR_update_va_mapping 14
+#define __HYPERVISOR_set_timer_op 15
+#define __HYPERVISOR_event_channel_op_compat 16
+#define __HYPERVISOR_xen_version 17
+#define __HYPERVISOR_console_io 18
+#define __HYPERVISOR_physdev_op_compat 19
+#define __HYPERVISOR_grant_table_op 20
+#define __HYPERVISOR_vm_assist 21
+#define __HYPERVISOR_update_va_mapping_otherdomain 22
+#define __HYPERVISOR_iret 23 /* x86 only */
+#define __HYPERVISOR_vcpu_op 24
+#define __HYPERVISOR_set_segment_base 25 /* x86/64 only */
+#define __HYPERVISOR_mmuext_op 26
+#define __HYPERVISOR_acm_op 27
+#define __HYPERVISOR_nmi_op 28
+#define __HYPERVISOR_sched_op_new 29
+#define __HYPERVISOR_callback_op 30
+#define __HYPERVISOR_xenoprof_op 31
+#define __HYPERVISOR_event_channel_op 32
+#define __HYPERVISOR_physdev_op 33
+#define __HYPERVISOR_hvm_op 34
+
+/*
+ * VIRTUAL INTERRUPTS
+ *
+ * Virtual interrupts that a guest OS may receive from Xen.
+ */
+#define VIRQ_TIMER 0 /* Timebase update, and/or requested timeout. */
+#define VIRQ_DEBUG 1 /* Request guest to dump debug info. */
+#define VIRQ_CONSOLE 2 /* (DOM0) Bytes received on emergency console. */
+#define VIRQ_DOM_EXC 3 /* (DOM0) Exceptional event for some domain. */
+#define VIRQ_DEBUGGER 6 /* (DOM0) A domain has paused for debugging. */
+#define NR_VIRQS 8
+
+/*
+ * MMU-UPDATE REQUESTS
+ *
+ * HYPERVISOR_mmu_update() accepts a list of (ptr, val) pairs.
+ * A foreigndom (FD) can be specified (or DOMID_SELF for none).
+ * Where the FD has some effect, it is described below.
+ * ptr[1:0] specifies the appropriate MMU_* command.
+ *
+ * ptr[1:0] == MMU_NORMAL_PT_UPDATE:
+ * Updates an entry in a page table. If updating an L1 table, and the new
+ * table entry is valid/present, the mapped frame must belong to the FD, if
+ * an FD has been specified. If attempting to map an I/O page then the
+ * caller assumes the privilege of the FD.
+ * FD == DOMID_IO: Permit /only/ I/O mappings, at the priv level of the caller.
+ * FD == DOMID_XEN: Map restricted areas of Xen's heap space.
+ * ptr[:2] -- Machine address of the page-table entry to modify.
+ * val -- Value to write.
+ *
+ * ptr[1:0] == MMU_MACHPHYS_UPDATE:
+ * Updates an entry in the machine->pseudo-physical mapping table.
+ * ptr[:2] -- Machine address within the frame whose mapping to modify.
+ * The frame must belong to the FD, if one is specified.
+ * val -- Value to write into the mapping entry.
+ */
+#define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */
+#define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */
+
+/*
+ * MMU EXTENDED OPERATIONS
+ *
+ * HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures.
+ * A foreigndom (FD) can be specified (or DOMID_SELF for none).
+ * Where the FD has some effect, it is described below.
+ *
+ * cmd: MMUEXT_(UN)PIN_*_TABLE
+ * mfn: Machine frame number to be (un)pinned as a p.t. page.
+ * The frame must belong to the FD, if one is specified.
+ *
+ * cmd: MMUEXT_NEW_BASEPTR
+ * mfn: Machine frame number of new page-table base to install in MMU.
+ *
+ * cmd: MMUEXT_NEW_USER_BASEPTR [x86/64 only]
+ * mfn: Machine frame number of new page-table base to install in MMU
+ * when in user space.
+ *
+ * cmd: MMUEXT_TLB_FLUSH_LOCAL
+ * No additional arguments. Flushes local TLB.
+ *
+ * cmd: MMUEXT_INVLPG_LOCAL
+ * linear_addr: Linear address to be flushed from the local TLB.
+ *
+ * cmd: MMUEXT_TLB_FLUSH_MULTI
+ * vcpumask: Pointer to bitmap of VCPUs to be flushed.
+ *
+ * cmd: MMUEXT_INVLPG_MULTI
+ * linear_addr: Linear address to be flushed.
+ * vcpumask: Pointer to bitmap of VCPUs to be flushed.
+ *
+ * cmd: MMUEXT_TLB_FLUSH_ALL
+ * No additional arguments. Flushes all VCPUs' TLBs.
+ *
+ * cmd: MMUEXT_INVLPG_ALL
+ * linear_addr: Linear address to be flushed from all VCPUs' TLBs.
+ *
+ * cmd: MMUEXT_FLUSH_CACHE
+ * No additional arguments. Writes back and flushes cache contents.
+ *
+ * cmd: MMUEXT_SET_LDT
+ * linear_addr: Linear address of LDT base (NB. must be page-aligned).
+ * nr_ents: Number of entries in LDT.
+ */
+#define MMUEXT_PIN_L1_TABLE 0
+#define MMUEXT_PIN_L2_TABLE 1
+#define MMUEXT_PIN_L3_TABLE 2
+#define MMUEXT_PIN_L4_TABLE 3
+#define MMUEXT_UNPIN_TABLE 4
+#define MMUEXT_NEW_BASEPTR 5
+#define MMUEXT_TLB_FLUSH_LOCAL 6
+#define MMUEXT_INVLPG_LOCAL 7
+#define MMUEXT_TLB_FLUSH_MULTI 8
+#define MMUEXT_INVLPG_MULTI 9
+#define MMUEXT_TLB_FLUSH_ALL 10
+#define MMUEXT_INVLPG_ALL 11
+#define MMUEXT_FLUSH_CACHE 12
+#define MMUEXT_SET_LDT 13
+#define MMUEXT_NEW_USER_BASEPTR 15
+
+#ifndef __ASSEMBLY__
+struct mmuext_op {
+ unsigned int cmd;
+ union {
+ /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR */
+ unsigned long mfn;
+ /* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */
+ unsigned long linear_addr;
+ } arg1;
+ union {
+ /* SET_LDT */
+ unsigned int nr_ents;
+ /* TLB_FLUSH_MULTI, INVLPG_MULTI */
+ void *vcpumask;
+ } arg2;
+};
+DEFINE_GUEST_HANDLE_STRUCT(mmuext_op);
+#endif
+
+/* These are passed as 'flags' to update_va_mapping. They can be ORed. */
+/* When specifying UVMF_MULTI, also OR in a pointer to a CPU bitmap. */
+/* UVMF_LOCAL is merely UVMF_MULTI with a NULL bitmap pointer. */
+#define UVMF_NONE (0UL<<0) /* No flushing at all. */
+#define UVMF_TLB_FLUSH (1UL<<0) /* Flush entire TLB(s). */
+#define UVMF_INVLPG (2UL<<0) /* Flush only one entry. */
+#define UVMF_FLUSHTYPE_MASK (3UL<<0)
+#define UVMF_MULTI (0UL<<2) /* Flush subset of TLBs. */
+#define UVMF_LOCAL (0UL<<2) /* Flush local TLB. */
+#define UVMF_ALL (1UL<<2) /* Flush all TLBs. */
+
+/*
+ * Commands to HYPERVISOR_console_io().
+ */
+#define CONSOLEIO_write 0
+#define CONSOLEIO_read 1
+
+/*
+ * Commands to HYPERVISOR_vm_assist().
+ */
+#define VMASST_CMD_enable 0
+#define VMASST_CMD_disable 1
+#define VMASST_TYPE_4gb_segments 0
+#define VMASST_TYPE_4gb_segments_notify 1
+#define VMASST_TYPE_writable_pagetables 2
+#define VMASST_TYPE_pae_extended_cr3 3
+#define MAX_VMASST_TYPE 3
+
+#ifndef __ASSEMBLY__
+
+typedef uint16_t domid_t;
+
+/* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */
+#define DOMID_FIRST_RESERVED (0x7FF0U)
+
+/* DOMID_SELF is used in certain contexts to refer to oneself. */
+#define DOMID_SELF (0x7FF0U)
+
+/*
+ * DOMID_IO is used to restrict page-table updates to mapping I/O memory.
+ * Although no Foreign Domain need be specified to map I/O pages, DOMID_IO
+ * is useful to ensure that no mappings to the OS's own heap are accidentally
+ * installed. (e.g., in Linux this could cause havoc as reference counts
+ * aren't adjusted on the I/O-mapping code path).
+ * This only makes sense in MMUEXT_SET_FOREIGNDOM, but in that context can
+ * be specified by any calling domain.
+ */
+#define DOMID_IO (0x7FF1U)
+
+/*
+ * DOMID_XEN is used to allow privileged domains to map restricted parts of
+ * Xen's heap space (e.g., the machine_to_phys table).
+ * This only makes sense in MMUEXT_SET_FOREIGNDOM, and is only permitted if
+ * the caller is privileged.
+ */
+#define DOMID_XEN (0x7FF2U)
+
+/*
+ * Send an array of these to HYPERVISOR_mmu_update().
+ * NB. The fields are natural pointer/address size for this architecture.
+ */
+struct mmu_update {
+ uint64_t ptr; /* Machine address of PTE. */
+ uint64_t val; /* New contents of PTE. */
+};
+DEFINE_GUEST_HANDLE_STRUCT(mmu_update);
+
+/*
+ * Send an array of these to HYPERVISOR_multicall().
+ * NB. The fields are natural register size for this architecture.
+ */
+struct multicall_entry {
+ unsigned long op;
+ long result;
+ unsigned long args[6];
+};
+DEFINE_GUEST_HANDLE_STRUCT(multicall_entry);
+
+/*
+ * Event channel endpoints per domain:
+ * 1024 if a long is 32 bits; 4096 if a long is 64 bits.
+ */
+#define NR_EVENT_CHANNELS (sizeof(unsigned long) * sizeof(unsigned long) * 64)
+
+struct vcpu_time_info {
+ /*
+ * Updates to the following values are preceded and followed
+ * by an increment of 'version'. The guest can therefore
+ * detect updates by looking for changes to 'version'. If the
+ * least-significant bit of the version number is set then an
+ * update is in progress and the guest must wait to read a
+ * consistent set of values. The correct way to interact with
+ * the version number is similar to Linux's seqlock: see the
+ * implementations of read_seqbegin/read_seqretry.
+ */
+ uint32_t version;
+ uint32_t pad0;
+ uint64_t tsc_timestamp; /* TSC at last update of time vals. */
+ uint64_t system_time; /* Time, in nanosecs, since boot. */
+ /*
+ * Current system time:
+ * system_time + ((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul
+ * CPU frequency (Hz):
+ * ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift
+ */
+ uint32_t tsc_to_system_mul;
+ int8_t tsc_shift;
+ int8_t pad1[3];
+}; /* 32 bytes */
+
+struct vcpu_info {
+ /*
+ * 'evtchn_upcall_pending' is written non-zero by Xen to indicate
+ * a pending notification for a particular VCPU. It is then cleared
+ * by the guest OS /before/ checking for pending work, thus avoiding
+ * a set-and-check race. Note that the mask is only accessed by Xen
+ * on the CPU that is currently hosting the VCPU. This means that the
+ * pending and mask flags can be updated by the guest without special
+ * synchronisation (i.e., no need for the x86 LOCK prefix).
+ * This may seem suboptimal because if the pending flag is set by
+ * a different CPU then an IPI may be scheduled even when the mask
+ * is set. However, note:
+ * 1. The task of 'interrupt holdoff' is covered by the per-event-
+ * channel mask bits. A 'noisy' event that is continually being
+ * triggered can be masked at source at this very precise
+ * granularity.
+ * 2. The main purpose of the per-VCPU mask is therefore to restrict
+ * reentrant execution: whether for concurrency control, or to
+ * prevent unbounded stack usage. Whatever the purpose, we expect
+ * that the mask will be asserted only for short periods at a time,
+ * and so the likelihood of a 'spurious' IPI is suitably small.
+ * The mask is read before making an event upcall to the guest: a
+ * non-zero mask therefore guarantees that the VCPU will not receive
+ * an upcall activation. The mask is cleared when the VCPU requests
+ * to block: this avoids wakeup-waiting races.
+ */
+ uint8_t evtchn_upcall_pending;
+ uint8_t evtchn_upcall_mask;
+ unsigned long evtchn_pending_sel;
+ struct arch_vcpu_info arch;
+ struct vcpu_time_info time;
+}; /* 64 bytes (x86) */
+
+/*
+ * Xen/kernel shared data -- pointer provided in start_info.
+ * NB. We expect that this struct is smaller than a page.
+ */
+struct shared_info {
+ struct vcpu_info vcpu_info[MAX_VIRT_CPUS];
+
+ /*
+ * A domain can create "event channels" on which it can send and receive
+ * asynchronous event notifications. There are three classes of event that
+ * are delivered by this mechanism:
+ * 1. Bi-directional inter- and intra-domain connections. Domains must
+ * arrange out-of-band to set up a connection (usually by allocating
+ * an unbound 'listener' port and avertising that via a storage service
+ * such as xenstore).
+ * 2. Physical interrupts. A domain with suitable hardware-access
+ * privileges can bind an event-channel port to a physical interrupt
+ * source.
+ * 3. Virtual interrupts ('events'). A domain can bind an event-channel
+ * port to a virtual interrupt source, such as the virtual-timer
+ * device or the emergency console.
+ *
+ * Event channels are addressed by a "port index". Each channel is
+ * associated with two bits of information:
+ * 1. PENDING -- notifies the domain that there is a pending notification
+ * to be processed. This bit is cleared by the guest.
+ * 2. MASK -- if this bit is clear then a 0->1 transition of PENDING
+ * will cause an asynchronous upcall to be scheduled. This bit is only
+ * updated by the guest. It is read-only within Xen. If a channel
+ * becomes pending while the channel is masked then the 'edge' is lost
+ * (i.e., when the channel is unmasked, the guest must manually handle
+ * pending notifications as no upcall will be scheduled by Xen).
+ *
+ * To expedite scanning of pending notifications, any 0->1 pending
+ * transition on an unmasked channel causes a corresponding bit in a
+ * per-vcpu selector word to be set. Each bit in the selector covers a
+ * 'C long' in the PENDING bitfield array.
+ */
+ unsigned long evtchn_pending[sizeof(unsigned long) * 8];
+ unsigned long evtchn_mask[sizeof(unsigned long) * 8];
+
+ /*
+ * Wallclock time: updated only by control software. Guests should base
+ * their gettimeofday() syscall on this wallclock-base value.
+ */
+ uint32_t wc_version; /* Version counter: see vcpu_time_info_t. */
+ uint32_t wc_sec; /* Secs 00:00:00 UTC, Jan 1, 1970. */
+ uint32_t wc_nsec; /* Nsecs 00:00:00 UTC, Jan 1, 1970. */
+
+ struct arch_shared_info arch;
+
+};
+
+/*
+ * Start-of-day memory layout for the initial domain (DOM0):
+ * 1. The domain is started within contiguous virtual-memory region.
+ * 2. The contiguous region begins and ends on an aligned 4MB boundary.
+ * 3. The region start corresponds to the load address of the OS image.
+ * If the load address is not 4MB aligned then the address is rounded down.
+ * 4. This the order of bootstrap elements in the initial virtual region:
+ * a. relocated kernel image
+ * b. initial ram disk [mod_start, mod_len]
+ * c. list of allocated page frames [mfn_list, nr_pages]
+ * d. start_info_t structure [register ESI (x86)]
+ * e. bootstrap page tables [pt_base, CR3 (x86)]
+ * f. bootstrap stack [register ESP (x86)]
+ * 5. Bootstrap elements are packed together, but each is 4kB-aligned.
+ * 6. The initial ram disk may be omitted.
+ * 7. The list of page frames forms a contiguous 'pseudo-physical' memory
+ * layout for the domain. In particular, the bootstrap virtual-memory
+ * region is a 1:1 mapping to the first section of the pseudo-physical map.
+ * 8. All bootstrap elements are mapped read-writable for the guest OS. The
+ * only exception is the bootstrap page table, which is mapped read-only.
+ * 9. There is guaranteed to be at least 512kB padding after the final
+ * bootstrap element. If necessary, the bootstrap virtual region is
+ * extended by an extra 4MB to ensure this.
+ */
+
+#define MAX_GUEST_CMDLINE 1024
+struct start_info {
+ /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME. */
+ char magic[32]; /* "xen-<version>-<platform>". */
+ unsigned long nr_pages; /* Total pages allocated to this domain. */
+ unsigned long shared_info; /* MACHINE address of shared info struct. */
+ uint32_t flags; /* SIF_xxx flags. */
+ unsigned long store_mfn; /* MACHINE page number of shared page. */
+ uint32_t store_evtchn; /* Event channel for store communication. */
+ union {
+ struct {
+ unsigned long mfn; /* MACHINE page number of console page. */
+ uint32_t evtchn; /* Event channel for console page. */
+ } domU;
+ struct {
+ uint32_t info_off; /* Offset of console_info struct. */
+ uint32_t info_size; /* Size of console_info struct from start.*/
+ } dom0;
+ } console;
+ /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME). */
+ unsigned long pt_base; /* VIRTUAL address of page directory. */
+ unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames. */
+ unsigned long mfn_list; /* VIRTUAL address of page-frame list. */
+ unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */
+ unsigned long mod_len; /* Size (bytes) of pre-loaded module. */
+ int8_t cmd_line[MAX_GUEST_CMDLINE];
+};
+
+/* These flags are passed in the 'flags' field of start_info_t. */
+#define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */
+#define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */
+
+typedef uint64_t cpumap_t;
+
+typedef uint8_t xen_domain_handle_t[16];
+
+/* Turn a plain number into a C unsigned long constant. */
+#define __mk_unsigned_long(x) x ## UL
+#define mk_unsigned_long(x) __mk_unsigned_long(x)
+
+#else /* __ASSEMBLY__ */
+
+/* In assembly code we cannot use C numeric constant suffixes. */
+#define mk_unsigned_long(x) x
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __XEN_PUBLIC_XEN_H__ */
diff --git a/include/xen/page.h b/include/xen/page.h
new file mode 100644
index 00000000000..1df6c193057
--- /dev/null
+++ b/include/xen/page.h
@@ -0,0 +1,179 @@
+#ifndef __XEN_PAGE_H
+#define __XEN_PAGE_H
+
+#include <linux/pfn.h>
+
+#include <asm/uaccess.h>
+
+#include <xen/features.h>
+
+#ifdef CONFIG_X86_PAE
+/* Xen machine address */
+typedef struct xmaddr {
+ unsigned long long maddr;
+} xmaddr_t;
+
+/* Xen pseudo-physical address */
+typedef struct xpaddr {
+ unsigned long long paddr;
+} xpaddr_t;
+#else
+/* Xen machine address */
+typedef struct xmaddr {
+ unsigned long maddr;
+} xmaddr_t;
+
+/* Xen pseudo-physical address */
+typedef struct xpaddr {
+ unsigned long paddr;
+} xpaddr_t;
+#endif
+
+#define XMADDR(x) ((xmaddr_t) { .maddr = (x) })
+#define XPADDR(x) ((xpaddr_t) { .paddr = (x) })
+
+/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
+#define INVALID_P2M_ENTRY (~0UL)
+#define FOREIGN_FRAME_BIT (1UL<<31)
+#define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT)
+
+extern unsigned long *phys_to_machine_mapping;
+
+static inline unsigned long pfn_to_mfn(unsigned long pfn)
+{
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return pfn;
+
+ return phys_to_machine_mapping[(unsigned int)(pfn)] &
+ ~FOREIGN_FRAME_BIT;
+}
+
+static inline int phys_to_machine_mapping_valid(unsigned long pfn)
+{
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return 1;
+
+ return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY);
+}
+
+static inline unsigned long mfn_to_pfn(unsigned long mfn)
+{
+ unsigned long pfn;
+
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return mfn;
+
+#if 0
+ if (unlikely((mfn >> machine_to_phys_order) != 0))
+ return max_mapnr;
+#endif
+
+ pfn = 0;
+ /*
+ * The array access can fail (e.g., device space beyond end of RAM).
+ * In such cases it doesn't matter what we return (we return garbage),
+ * but we must handle the fault without crashing!
+ */
+ __get_user(pfn, &machine_to_phys_mapping[mfn]);
+
+ return pfn;
+}
+
+static inline xmaddr_t phys_to_machine(xpaddr_t phys)
+{
+ unsigned offset = phys.paddr & ~PAGE_MASK;
+ return XMADDR(PFN_PHYS((u64)pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset);
+}
+
+static inline xpaddr_t machine_to_phys(xmaddr_t machine)
+{
+ unsigned offset = machine.maddr & ~PAGE_MASK;
+ return XPADDR(PFN_PHYS((u64)mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset);
+}
+
+/*
+ * We detect special mappings in one of two ways:
+ * 1. If the MFN is an I/O page then Xen will set the m2p entry
+ * to be outside our maximum possible pseudophys range.
+ * 2. If the MFN belongs to a different domain then we will certainly
+ * not have MFN in our p2m table. Conversely, if the page is ours,
+ * then we'll have p2m(m2p(MFN))==MFN.
+ * If we detect a special mapping then it doesn't have a 'struct page'.
+ * We force !pfn_valid() by returning an out-of-range pointer.
+ *
+ * NB. These checks require that, for any MFN that is not in our reservation,
+ * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
+ * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
+ * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
+ *
+ * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
+ * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
+ * require. In all the cases we care about, the FOREIGN_FRAME bit is
+ * masked (e.g., pfn_to_mfn()) so behaviour there is correct.
+ */
+static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
+{
+ extern unsigned long max_mapnr;
+ unsigned long pfn = mfn_to_pfn(mfn);
+ if ((pfn < max_mapnr)
+ && !xen_feature(XENFEAT_auto_translated_physmap)
+ && (phys_to_machine_mapping[pfn] != mfn))
+ return max_mapnr; /* force !pfn_valid() */
+ return pfn;
+}
+
+static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
+{
+ if (xen_feature(XENFEAT_auto_translated_physmap)) {
+ BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
+ return;
+ }
+ phys_to_machine_mapping[pfn] = mfn;
+}
+
+/* VIRT <-> MACHINE conversion */
+#define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v))))
+#define virt_to_mfn(v) (pfn_to_mfn(PFN_DOWN(__pa(v))))
+#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
+
+#ifdef CONFIG_X86_PAE
+#define pte_mfn(_pte) (((_pte).pte_low >> PAGE_SHIFT) | \
+ (((_pte).pte_high & 0xfff) << (32-PAGE_SHIFT)))
+
+static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot)
+{
+ pte_t pte;
+
+ pte.pte_high = (page_nr >> (32 - PAGE_SHIFT)) |
+ (pgprot_val(pgprot) >> 32);
+ pte.pte_high &= (__supported_pte_mask >> 32);
+ pte.pte_low = ((page_nr << PAGE_SHIFT) | pgprot_val(pgprot));
+ pte.pte_low &= __supported_pte_mask;
+
+ return pte;
+}
+
+static inline unsigned long long pte_val_ma(pte_t x)
+{
+ return ((unsigned long long)x.pte_high << 32) | x.pte_low;
+}
+#define pmd_val_ma(v) ((v).pmd)
+#define pud_val_ma(v) ((v).pgd.pgd)
+#define __pte_ma(x) ((pte_t) { .pte_low = (x), .pte_high = (x)>>32 } )
+#define __pmd_ma(x) ((pmd_t) { (x) } )
+#else /* !X86_PAE */
+#define pte_mfn(_pte) ((_pte).pte_low >> PAGE_SHIFT)
+#define mfn_pte(pfn, prot) __pte_ma(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+#define pte_val_ma(x) ((x).pte_low)
+#define pmd_val_ma(v) ((v).pud.pgd.pgd)
+#define __pte_ma(x) ((pte_t) { (x) } )
+#endif /* CONFIG_X86_PAE */
+
+#define pgd_val_ma(x) ((x).pgd)
+
+
+xmaddr_t arbitrary_virt_to_machine(unsigned long address);
+void make_lowmem_page_readonly(void *vaddr);
+void make_lowmem_page_readwrite(void *vaddr);
+
+#endif /* __XEN_PAGE_H */
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
new file mode 100644
index 00000000000..6f7c290651a
--- /dev/null
+++ b/include/xen/xenbus.h
@@ -0,0 +1,234 @@
+/******************************************************************************
+ * xenbus.h
+ *
+ * Talks to Xen Store to figure out what devices we have.
+ *
+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
+ * Copyright (C) 2005 XenSource Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _XEN_XENBUS_H
+#define _XEN_XENBUS_H
+
+#include <linux/device.h>
+#include <linux/notifier.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/init.h>
+#include <xen/interface/xen.h>
+#include <xen/interface/grant_table.h>
+#include <xen/interface/io/xenbus.h>
+#include <xen/interface/io/xs_wire.h>
+
+/* Register callback to watch this node. */
+struct xenbus_watch
+{
+ struct list_head list;
+
+ /* Path being watched. */
+ const char *node;
+
+ /* Callback (executed in a process context with no locks held). */
+ void (*callback)(struct xenbus_watch *,
+ const char **vec, unsigned int len);
+};
+
+
+/* A xenbus device. */
+struct xenbus_device {
+ const char *devicetype;
+ const char *nodename;
+ const char *otherend;
+ int otherend_id;
+ struct xenbus_watch otherend_watch;
+ struct device dev;
+ enum xenbus_state state;
+ struct completion down;
+};
+
+static inline struct xenbus_device *to_xenbus_device(struct device *dev)
+{
+ return container_of(dev, struct xenbus_device, dev);
+}
+
+struct xenbus_device_id
+{
+ /* .../device/<device_type>/<identifier> */
+ char devicetype[32]; /* General class of device. */
+};
+
+/* A xenbus driver. */
+struct xenbus_driver {
+ char *name;
+ struct module *owner;
+ const struct xenbus_device_id *ids;
+ int (*probe)(struct xenbus_device *dev,
+ const struct xenbus_device_id *id);
+ void (*otherend_changed)(struct xenbus_device *dev,
+ enum xenbus_state backend_state);
+ int (*remove)(struct xenbus_device *dev);
+ int (*suspend)(struct xenbus_device *dev);
+ int (*suspend_cancel)(struct xenbus_device *dev);
+ int (*resume)(struct xenbus_device *dev);
+ int (*uevent)(struct xenbus_device *, char **, int, char *, int);
+ struct device_driver driver;
+ int (*read_otherend_details)(struct xenbus_device *dev);
+};
+
+static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv)
+{
+ return container_of(drv, struct xenbus_driver, driver);
+}
+
+int __must_check __xenbus_register_frontend(struct xenbus_driver *drv,
+ struct module *owner,
+ const char *mod_name);
+
+static inline int __must_check
+xenbus_register_frontend(struct xenbus_driver *drv)
+{
+ WARN_ON(drv->owner != THIS_MODULE);
+ return __xenbus_register_frontend(drv, THIS_MODULE, KBUILD_MODNAME);
+}
+
+int __must_check __xenbus_register_backend(struct xenbus_driver *drv,
+ struct module *owner,
+ const char *mod_name);
+static inline int __must_check
+xenbus_register_backend(struct xenbus_driver *drv)
+{
+ WARN_ON(drv->owner != THIS_MODULE);
+ return __xenbus_register_backend(drv, THIS_MODULE, KBUILD_MODNAME);
+}
+
+void xenbus_unregister_driver(struct xenbus_driver *drv);
+
+struct xenbus_transaction
+{
+ u32 id;
+};
+
+/* Nil transaction ID. */
+#define XBT_NIL ((struct xenbus_transaction) { 0 })
+
+int __init xenbus_dev_init(void);
+
+char **xenbus_directory(struct xenbus_transaction t,
+ const char *dir, const char *node, unsigned int *num);
+void *xenbus_read(struct xenbus_transaction t,
+ const char *dir, const char *node, unsigned int *len);
+int xenbus_write(struct xenbus_transaction t,
+ const char *dir, const char *node, const char *string);
+int xenbus_mkdir(struct xenbus_transaction t,
+ const char *dir, const char *node);
+int xenbus_exists(struct xenbus_transaction t,
+ const char *dir, const char *node);
+int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node);
+int xenbus_transaction_start(struct xenbus_transaction *t);
+int xenbus_transaction_end(struct xenbus_transaction t, int abort);
+
+/* Single read and scanf: returns -errno or num scanned if > 0. */
+int xenbus_scanf(struct xenbus_transaction t,
+ const char *dir, const char *node, const char *fmt, ...)
+ __attribute__((format(scanf, 4, 5)));
+
+/* Single printf and write: returns -errno or 0. */
+int xenbus_printf(struct xenbus_transaction t,
+ const char *dir, const char *node, const char *fmt, ...)
+ __attribute__((format(printf, 4, 5)));
+
+/* Generic read function: NULL-terminated triples of name,
+ * sprintf-style type string, and pointer. Returns 0 or errno.*/
+int xenbus_gather(struct xenbus_transaction t, const char *dir, ...);
+
+/* notifer routines for when the xenstore comes up */
+extern int xenstored_ready;
+int register_xenstore_notifier(struct notifier_block *nb);
+void unregister_xenstore_notifier(struct notifier_block *nb);
+
+int register_xenbus_watch(struct xenbus_watch *watch);
+void unregister_xenbus_watch(struct xenbus_watch *watch);
+void xs_suspend(void);
+void xs_resume(void);
+void xs_suspend_cancel(void);
+
+/* Used by xenbus_dev to borrow kernel's store connection. */
+void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg);
+
+struct work_struct;
+
+/* Prepare for domain suspend: then resume or cancel the suspend. */
+void xenbus_suspend(void);
+void xenbus_resume(void);
+void xenbus_probe(struct work_struct *);
+void xenbus_suspend_cancel(void);
+
+#define XENBUS_IS_ERR_READ(str) ({ \
+ if (!IS_ERR(str) && strlen(str) == 0) { \
+ kfree(str); \
+ str = ERR_PTR(-ERANGE); \
+ } \
+ IS_ERR(str); \
+})
+
+#define XENBUS_EXIST_ERR(err) ((err) == -ENOENT || (err) == -ERANGE)
+
+int xenbus_watch_path(struct xenbus_device *dev, const char *path,
+ struct xenbus_watch *watch,
+ void (*callback)(struct xenbus_watch *,
+ const char **, unsigned int));
+int xenbus_watch_pathfmt(struct xenbus_device *dev, struct xenbus_watch *watch,
+ void (*callback)(struct xenbus_watch *,
+ const char **, unsigned int),
+ const char *pathfmt, ...)
+ __attribute__ ((format (printf, 4, 5)));
+
+int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state new_state);
+int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn);
+int xenbus_map_ring_valloc(struct xenbus_device *dev,
+ int gnt_ref, void **vaddr);
+int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
+ grant_handle_t *handle, void *vaddr);
+
+int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr);
+int xenbus_unmap_ring(struct xenbus_device *dev,
+ grant_handle_t handle, void *vaddr);
+
+int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port);
+int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port);
+int xenbus_free_evtchn(struct xenbus_device *dev, int port);
+
+enum xenbus_state xenbus_read_driver_state(const char *path);
+
+void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...);
+void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...);
+
+const char *xenbus_strstate(enum xenbus_state state);
+int xenbus_dev_is_online(struct xenbus_device *dev);
+int xenbus_frontend_closed(struct xenbus_device *dev);
+
+#endif /* _XEN_XENBUS_H */
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index b4796d85014..57e6448b171 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -516,7 +516,7 @@ static void cpuset_release_agent(const char *pathbuf)
envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
envp[i] = NULL;
- call_usermodehelper(argv[0], argv, envp, 0);
+ call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
kfree(pathbuf);
}
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 4d32eb07717..78d365c524e 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -119,9 +119,10 @@ struct subprocess_info {
char **argv;
char **envp;
struct key *ring;
- int wait;
+ enum umh_wait wait;
int retval;
struct file *stdin;
+ void (*cleanup)(char **argv, char **envp);
};
/*
@@ -180,6 +181,14 @@ static int ____call_usermodehelper(void *data)
do_exit(0);
}
+void call_usermodehelper_freeinfo(struct subprocess_info *info)
+{
+ if (info->cleanup)
+ (*info->cleanup)(info->argv, info->envp);
+ kfree(info);
+}
+EXPORT_SYMBOL(call_usermodehelper_freeinfo);
+
/* Keventd can't block, but this (a child) can. */
static int wait_for_helper(void *data)
{
@@ -216,8 +225,8 @@ static int wait_for_helper(void *data)
sub_info->retval = ret;
}
- if (sub_info->wait < 0)
- kfree(sub_info);
+ if (sub_info->wait == UMH_NO_WAIT)
+ call_usermodehelper_freeinfo(sub_info);
else
complete(sub_info->complete);
return 0;
@@ -229,34 +238,122 @@ static void __call_usermodehelper(struct work_struct *work)
struct subprocess_info *sub_info =
container_of(work, struct subprocess_info, work);
pid_t pid;
- int wait = sub_info->wait;
+ enum umh_wait wait = sub_info->wait;
/* CLONE_VFORK: wait until the usermode helper has execve'd
* successfully We need the data structures to stay around
* until that is done. */
- if (wait)
+ if (wait == UMH_WAIT_PROC || wait == UMH_NO_WAIT)
pid = kernel_thread(wait_for_helper, sub_info,
CLONE_FS | CLONE_FILES | SIGCHLD);
else
pid = kernel_thread(____call_usermodehelper, sub_info,
CLONE_VFORK | SIGCHLD);
- if (wait < 0)
- return;
+ switch (wait) {
+ case UMH_NO_WAIT:
+ break;
- if (pid < 0) {
+ case UMH_WAIT_PROC:
+ if (pid > 0)
+ break;
sub_info->retval = pid;
+ /* FALLTHROUGH */
+
+ case UMH_WAIT_EXEC:
complete(sub_info->complete);
- } else if (!wait)
- complete(sub_info->complete);
+ }
+}
+
+/**
+ * call_usermodehelper_setup - prepare to call a usermode helper
+ * @path - path to usermode executable
+ * @argv - arg vector for process
+ * @envp - environment for process
+ *
+ * Returns either NULL on allocation failure, or a subprocess_info
+ * structure. This should be passed to call_usermodehelper_exec to
+ * exec the process and free the structure.
+ */
+struct subprocess_info *call_usermodehelper_setup(char *path,
+ char **argv, char **envp)
+{
+ struct subprocess_info *sub_info;
+ sub_info = kzalloc(sizeof(struct subprocess_info), GFP_ATOMIC);
+ if (!sub_info)
+ goto out;
+
+ INIT_WORK(&sub_info->work, __call_usermodehelper);
+ sub_info->path = path;
+ sub_info->argv = argv;
+ sub_info->envp = envp;
+
+ out:
+ return sub_info;
}
+EXPORT_SYMBOL(call_usermodehelper_setup);
/**
- * call_usermodehelper_keys - start a usermode application
- * @path: pathname for the application
- * @argv: null-terminated argument list
- * @envp: null-terminated environment list
- * @session_keyring: session keyring for process (NULL for an empty keyring)
+ * call_usermodehelper_setkeys - set the session keys for usermode helper
+ * @info: a subprocess_info returned by call_usermodehelper_setup
+ * @session_keyring: the session keyring for the process
+ */
+void call_usermodehelper_setkeys(struct subprocess_info *info,
+ struct key *session_keyring)
+{
+ info->ring = session_keyring;
+}
+EXPORT_SYMBOL(call_usermodehelper_setkeys);
+
+/**
+ * call_usermodehelper_setcleanup - set a cleanup function
+ * @info: a subprocess_info returned by call_usermodehelper_setup
+ * @cleanup: a cleanup function
+ *
+ * The cleanup function is just befor ethe subprocess_info is about to
+ * be freed. This can be used for freeing the argv and envp. The
+ * Function must be runnable in either a process context or the
+ * context in which call_usermodehelper_exec is called.
+ */
+void call_usermodehelper_setcleanup(struct subprocess_info *info,
+ void (*cleanup)(char **argv, char **envp))
+{
+ info->cleanup = cleanup;
+}
+EXPORT_SYMBOL(call_usermodehelper_setcleanup);
+
+/**
+ * call_usermodehelper_stdinpipe - set up a pipe to be used for stdin
+ * @sub_info: a subprocess_info returned by call_usermodehelper_setup
+ * @filp: set to the write-end of a pipe
+ *
+ * This constructs a pipe, and sets the read end to be the stdin of the
+ * subprocess, and returns the write-end in *@filp.
+ */
+int call_usermodehelper_stdinpipe(struct subprocess_info *sub_info,
+ struct file **filp)
+{
+ struct file *f;
+
+ f = create_write_pipe();
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+ *filp = f;
+
+ f = create_read_pipe(f);
+ if (IS_ERR(f)) {
+ free_write_pipe(*filp);
+ return PTR_ERR(f);
+ }
+ sub_info->stdin = f;
+
+ return 0;
+}
+EXPORT_SYMBOL(call_usermodehelper_stdinpipe);
+
+/**
+ * call_usermodehelper_exec - start a usermode application
+ * @sub_info: information about the subprocessa
* @wait: wait for the application to finish and return status.
* when -1 don't wait at all, but you get no useful error back when
* the program couldn't be exec'ed. This makes it safe to call
@@ -265,81 +362,68 @@ static void __call_usermodehelper(struct work_struct *work)
* Runs a user-space application. The application is started
* asynchronously if wait is not set, and runs as a child of keventd.
* (ie. it runs with full root capabilities).
- *
- * Must be called from process context. Returns a negative error code
- * if program was not execed successfully, or 0.
*/
-int call_usermodehelper_keys(char *path, char **argv, char **envp,
- struct key *session_keyring, int wait)
+int call_usermodehelper_exec(struct subprocess_info *sub_info,
+ enum umh_wait wait)
{
DECLARE_COMPLETION_ONSTACK(done);
- struct subprocess_info *sub_info;
int retval;
- if (!khelper_wq)
- return -EBUSY;
-
- if (path[0] == '\0')
- return 0;
+ if (sub_info->path[0] == '\0') {
+ retval = 0;
+ goto out;
+ }
- sub_info = kzalloc(sizeof(struct subprocess_info), GFP_ATOMIC);
- if (!sub_info)
- return -ENOMEM;
+ if (!khelper_wq) {
+ retval = -EBUSY;
+ goto out;
+ }
- INIT_WORK(&sub_info->work, __call_usermodehelper);
sub_info->complete = &done;
- sub_info->path = path;
- sub_info->argv = argv;
- sub_info->envp = envp;
- sub_info->ring = session_keyring;
sub_info->wait = wait;
queue_work(khelper_wq, &sub_info->work);
- if (wait < 0) /* task has freed sub_info */
+ if (wait == UMH_NO_WAIT) /* task has freed sub_info */
return 0;
wait_for_completion(&done);
retval = sub_info->retval;
- kfree(sub_info);
+
+ out:
+ call_usermodehelper_freeinfo(sub_info);
return retval;
}
-EXPORT_SYMBOL(call_usermodehelper_keys);
+EXPORT_SYMBOL(call_usermodehelper_exec);
+/**
+ * call_usermodehelper_pipe - call a usermode helper process with a pipe stdin
+ * @path: path to usermode executable
+ * @argv: arg vector for process
+ * @envp: environment for process
+ * @filp: set to the write-end of a pipe
+ *
+ * This is a simple wrapper which executes a usermode-helper function
+ * with a pipe as stdin. It is implemented entirely in terms of
+ * lower-level call_usermodehelper_* functions.
+ */
int call_usermodehelper_pipe(char *path, char **argv, char **envp,
struct file **filp)
{
- DECLARE_COMPLETION(done);
- struct subprocess_info sub_info = {
- .work = __WORK_INITIALIZER(sub_info.work,
- __call_usermodehelper),
- .complete = &done,
- .path = path,
- .argv = argv,
- .envp = envp,
- .retval = 0,
- };
- struct file *f;
+ struct subprocess_info *sub_info;
+ int ret;
- if (!khelper_wq)
- return -EBUSY;
+ sub_info = call_usermodehelper_setup(path, argv, envp);
+ if (sub_info == NULL)
+ return -ENOMEM;
- if (path[0] == '\0')
- return 0;
+ ret = call_usermodehelper_stdinpipe(sub_info, filp);
+ if (ret < 0)
+ goto out;
- f = create_write_pipe();
- if (IS_ERR(f))
- return PTR_ERR(f);
- *filp = f;
-
- f = create_read_pipe(f);
- if (IS_ERR(f)) {
- free_write_pipe(*filp);
- return PTR_ERR(f);
- }
- sub_info.stdin = f;
+ return call_usermodehelper_exec(sub_info, 1);
- queue_work(khelper_wq, &sub_info.work);
- wait_for_completion(&done);
- return sub_info.retval;
+ out:
+ call_usermodehelper_freeinfo(sub_info);
+ return ret;
}
EXPORT_SYMBOL(call_usermodehelper_pipe);
diff --git a/kernel/sys.c b/kernel/sys.c
index 4d141ae3e80..18987c7f6ad 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -2286,3 +2286,61 @@ asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep,
}
return err ? -EFAULT : 0;
}
+
+char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff";
+
+static void argv_cleanup(char **argv, char **envp)
+{
+ argv_free(argv);
+}
+
+/**
+ * orderly_poweroff - Trigger an orderly system poweroff
+ * @force: force poweroff if command execution fails
+ *
+ * This may be called from any context to trigger a system shutdown.
+ * If the orderly shutdown fails, it will force an immediate shutdown.
+ */
+int orderly_poweroff(bool force)
+{
+ int argc;
+ char **argv = argv_split(GFP_ATOMIC, poweroff_cmd, &argc);
+ static char *envp[] = {
+ "HOME=/",
+ "PATH=/sbin:/bin:/usr/sbin:/usr/bin",
+ NULL
+ };
+ int ret = -ENOMEM;
+ struct subprocess_info *info;
+
+ if (argv == NULL) {
+ printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n",
+ __func__, poweroff_cmd);
+ goto out;
+ }
+
+ info = call_usermodehelper_setup(argv[0], argv, envp);
+ if (info == NULL) {
+ argv_free(argv);
+ goto out;
+ }
+
+ call_usermodehelper_setcleanup(info, argv_cleanup);
+
+ ret = call_usermodehelper_exec(info, UMH_NO_WAIT);
+
+ out:
+ if (ret && force) {
+ printk(KERN_WARNING "Failed to start orderly shutdown: "
+ "forcing the issue\n");
+
+ /* I guess this should try to kick off some daemon to
+ sync and poweroff asap. Or not even bother syncing
+ if we're doing an emergency shutdown? */
+ emergency_sync();
+ kernel_power_off();
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(orderly_poweroff);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 7063ebc6db0..44a1d699aad 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -46,6 +46,7 @@
#include <linux/syscalls.h>
#include <linux/nfs_fs.h>
#include <linux/acpi.h>
+#include <linux/reboot.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
@@ -705,6 +706,15 @@ static ctl_table kern_table[] = {
.proc_handler = &proc_dointvec,
},
#endif
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "poweroff_cmd",
+ .data = &poweroff_cmd,
+ .maxlen = POWEROFF_CMD_PATH_LEN,
+ .mode = 0644,
+ .proc_handler = &proc_dostring,
+ .strategy = &sysctl_string,
+ },
{ .ctl_name = 0 }
};
diff --git a/lib/Makefile b/lib/Makefile
index da68b2ca060..61496638740 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -5,7 +5,7 @@
lib-y := ctype.o string.o vsprintf.o cmdline.o \
rbtree.o radix-tree.o dump_stack.o \
idr.o int_sqrt.o bitmap.o extable.o prio_tree.o \
- sha1.o irq_regs.o reciprocal_div.o
+ sha1.o irq_regs.o reciprocal_div.o argv_split.o
lib-$(CONFIG_MMU) += ioremap.o
lib-$(CONFIG_SMP) += cpumask.o
diff --git a/lib/argv_split.c b/lib/argv_split.c
new file mode 100644
index 00000000000..4096ed42f49
--- /dev/null
+++ b/lib/argv_split.c
@@ -0,0 +1,105 @@
+/*
+ * Helper function for splitting a string into an argv-like array.
+ */
+
+#include <linux/kernel.h>
+#include <linux/ctype.h>
+#include <linux/bug.h>
+
+static const char *skip_sep(const char *cp)
+{
+ while (*cp && isspace(*cp))
+ cp++;
+
+ return cp;
+}
+
+static const char *skip_arg(const char *cp)
+{
+ while (*cp && !isspace(*cp))
+ cp++;
+
+ return cp;
+}
+
+static int count_argc(const char *str)
+{
+ int count = 0;
+
+ while (*str) {
+ str = skip_sep(str);
+ if (*str) {
+ count++;
+ str = skip_arg(str);
+ }
+ }
+
+ return count;
+}
+
+/**
+ * argv_free - free an argv
+ * @argv - the argument vector to be freed
+ *
+ * Frees an argv and the strings it points to.
+ */
+void argv_free(char **argv)
+{
+ char **p;
+ for (p = argv; *p; p++)
+ kfree(*p);
+
+ kfree(argv);
+}
+EXPORT_SYMBOL(argv_free);
+
+/**
+ * argv_split - split a string at whitespace, returning an argv
+ * @gfp: the GFP mask used to allocate memory
+ * @str: the string to be split
+ * @argcp: returned argument count
+ *
+ * Returns an array of pointers to strings which are split out from
+ * @str. This is performed by strictly splitting on white-space; no
+ * quote processing is performed. Multiple whitespace characters are
+ * considered to be a single argument separator. The returned array
+ * is always NULL-terminated. Returns NULL on memory allocation
+ * failure.
+ */
+char **argv_split(gfp_t gfp, const char *str, int *argcp)
+{
+ int argc = count_argc(str);
+ char **argv = kzalloc(sizeof(*argv) * (argc+1), gfp);
+ char **argvp;
+
+ if (argv == NULL)
+ goto out;
+
+ *argcp = argc;
+ argvp = argv;
+
+ while (*str) {
+ str = skip_sep(str);
+
+ if (*str) {
+ const char *p = str;
+ char *t;
+
+ str = skip_arg(str);
+
+ t = kstrndup(p, str-p, gfp);
+ if (t == NULL)
+ goto fail;
+ *argvp++ = t;
+ }
+ }
+ *argvp = NULL;
+
+ out:
+ return argv;
+
+ fail:
+ argv_free(argv);
+ return NULL;
+}
+EXPORT_SYMBOL(argv_split);
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 12e311dc664..bd5ecbbafab 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -208,7 +208,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
argv [0] = uevent_helper;
argv [1] = (char *)subsystem;
argv [2] = NULL;
- call_usermodehelper (argv[0], argv, envp, 0);
+ call_usermodehelper (argv[0], argv, envp, UMH_WAIT_EXEC);
}
exit:
diff --git a/mm/util.c b/mm/util.c
index 78f3783bdcc..bf340d80686 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -6,7 +6,6 @@
/**
* kstrdup - allocate space for and copy an existing string
- *
* @s: the string to duplicate
* @gfp: the GFP mask used in the kmalloc() call when allocating memory
*/
@@ -27,6 +26,30 @@ char *kstrdup(const char *s, gfp_t gfp)
EXPORT_SYMBOL(kstrdup);
/**
+ * kstrndup - allocate space for and copy an existing string
+ * @s: the string to duplicate
+ * @max: read at most @max chars from @s
+ * @gfp: the GFP mask used in the kmalloc() call when allocating memory
+ */
+char *kstrndup(const char *s, size_t max, gfp_t gfp)
+{
+ size_t len;
+ char *buf;
+
+ if (!s)
+ return NULL;
+
+ len = strnlen(s, max);
+ buf = kmalloc_track_caller(len+1, gfp);
+ if (buf) {
+ memcpy(buf, s, len);
+ buf[len] = '\0';
+ }
+ return buf;
+}
+EXPORT_SYMBOL(kstrndup);
+
+/**
* kmemdup - duplicate region of memory
*
* @src: memory region to duplicate
@@ -80,7 +103,6 @@ EXPORT_SYMBOL(krealloc);
/*
* strndup_user - duplicate an existing string from user space
- *
* @s: The string to duplicate
* @n: Maximum number of bytes to copy, including the trailing NUL.
*/
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 8e05a11155c..3130c343088 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -767,3 +767,56 @@ EXPORT_SYMBOL(remap_vmalloc_range);
void __attribute__((weak)) vmalloc_sync_all(void)
{
}
+
+
+static int f(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
+{
+ /* apply_to_page_range() does all the hard work. */
+ return 0;
+}
+
+/**
+ * alloc_vm_area - allocate a range of kernel address space
+ * @size: size of the area
+ * @returns: NULL on failure, vm_struct on success
+ *
+ * This function reserves a range of kernel address space, and
+ * allocates pagetables to map that range. No actual mappings
+ * are created. If the kernel address space is not shared
+ * between processes, it syncs the pagetable across all
+ * processes.
+ */
+struct vm_struct *alloc_vm_area(size_t size)
+{
+ struct vm_struct *area;
+
+ area = get_vm_area(size, VM_IOREMAP);
+ if (area == NULL)
+ return NULL;
+
+ /*
+ * This ensures that page tables are constructed for this region
+ * of kernel virtual address space and mapped into init_mm.
+ */
+ if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
+ area->size, f, NULL)) {
+ free_vm_area(area);
+ return NULL;
+ }
+
+ /* Make sure the pagetables are constructed in process kernel
+ mappings */
+ vmalloc_sync_all();
+
+ return area;
+}
+EXPORT_SYMBOL_GPL(alloc_vm_area);
+
+void free_vm_area(struct vm_struct *area)
+{
+ struct vm_struct *ret;
+ ret = remove_vm_area(area->addr);
+ BUG_ON(ret != area);
+ kfree(area);
+}
+EXPORT_SYMBOL_GPL(free_vm_area);
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index faa6aaf6756..c0f6861eefe 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -460,11 +460,7 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
skb_pull(skb, plen);
skb_set_mac_header(skb, -ETH_HLEN);
skb->pkt_type = PACKET_HOST;
-#ifdef CONFIG_BR2684_FAST_TRANS
- skb->protocol = ((u16 *) skb->data)[-1];
-#else /* some protocols might require this: */
skb->protocol = br_type_trans(skb, net_dev);
-#endif /* CONFIG_BR2684_FAST_TRANS */
#else
skb_pull(skb, plen - ETH_HLEN);
skb->protocol = eth_type_trans(skb, net_dev);
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index a786e786320..1ea2f86f768 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -125,7 +125,7 @@ static void br_stp_start(struct net_bridge *br)
char *argv[] = { BR_STP_PROG, br->dev->name, "start", NULL };
char *envp[] = { NULL };
- r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
+ r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
if (r == 0) {
br->stp_enabled = BR_USER_STP;
printk(KERN_INFO "%s: userspace STP started\n", br->dev->name);
diff --git a/net/core/dev.c b/net/core/dev.c
index 13a0d9f6da5..6357f54c8ff 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2715,20 +2715,6 @@ int __dev_addr_add(struct dev_addr_list **list, int *count,
return 0;
}
-void __dev_addr_discard(struct dev_addr_list **list)
-{
- struct dev_addr_list *tmp;
-
- while (*list != NULL) {
- tmp = *list;
- *list = tmp->next;
- if (tmp->da_users > tmp->da_gusers)
- printk("__dev_addr_discard: address leakage! "
- "da_users=%d\n", tmp->da_users);
- kfree(tmp);
- }
-}
-
/**
* dev_unicast_delete - Release secondary unicast address.
* @dev: device
@@ -2777,11 +2763,30 @@ int dev_unicast_add(struct net_device *dev, void *addr, int alen)
}
EXPORT_SYMBOL(dev_unicast_add);
-static void dev_unicast_discard(struct net_device *dev)
+static void __dev_addr_discard(struct dev_addr_list **list)
+{
+ struct dev_addr_list *tmp;
+
+ while (*list != NULL) {
+ tmp = *list;
+ *list = tmp->next;
+ if (tmp->da_users > tmp->da_gusers)
+ printk("__dev_addr_discard: address leakage! "
+ "da_users=%d\n", tmp->da_users);
+ kfree(tmp);
+ }
+}
+
+static void dev_addr_discard(struct net_device *dev)
{
netif_tx_lock_bh(dev);
+
__dev_addr_discard(&dev->uc_list);
dev->uc_count = 0;
+
+ __dev_addr_discard(&dev->mc_list);
+ dev->mc_count = 0;
+
netif_tx_unlock_bh(dev);
}
@@ -3739,8 +3744,7 @@ void unregister_netdevice(struct net_device *dev)
/*
* Flush the unicast and multicast chains
*/
- dev_unicast_discard(dev);
- dev_mc_discard(dev);
+ dev_addr_discard(dev);
if (dev->uninit)
dev->uninit(dev);
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c
index 235a2a8a0d0..99aece1aecc 100644
--- a/net/core/dev_mcast.c
+++ b/net/core/dev_mcast.c
@@ -177,18 +177,6 @@ void dev_mc_unsync(struct net_device *to, struct net_device *from)
}
EXPORT_SYMBOL(dev_mc_unsync);
-/*
- * Discard multicast list when a device is downed
- */
-
-void dev_mc_discard(struct net_device *dev)
-{
- netif_tx_lock_bh(dev);
- __dev_addr_discard(&dev->mc_list);
- dev->mc_count = 0;
- netif_tx_unlock_bh(dev);
-}
-
#ifdef CONFIG_PROC_FS
static void *dev_mc_seq_start(struct seq_file *seq, loff_t *pos)
{
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index cc84d8d8a3c..590a767b029 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -79,27 +79,27 @@
struct gen_estimator
{
- struct gen_estimator *next;
+ struct list_head list;
struct gnet_stats_basic *bstats;
struct gnet_stats_rate_est *rate_est;
spinlock_t *stats_lock;
- unsigned interval;
int ewma_log;
u64 last_bytes;
u32 last_packets;
u32 avpps;
u32 avbps;
+ struct rcu_head e_rcu;
};
struct gen_estimator_head
{
struct timer_list timer;
- struct gen_estimator *list;
+ struct list_head list;
};
static struct gen_estimator_head elist[EST_MAX_INTERVAL+1];
-/* Estimator array lock */
+/* Protects against NULL dereference */
static DEFINE_RWLOCK(est_lock);
static void est_timer(unsigned long arg)
@@ -107,13 +107,17 @@ static void est_timer(unsigned long arg)
int idx = (int)arg;
struct gen_estimator *e;
- read_lock(&est_lock);
- for (e = elist[idx].list; e; e = e->next) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(e, &elist[idx].list, list) {
u64 nbytes;
u32 npackets;
u32 rate;
spin_lock(e->stats_lock);
+ read_lock(&est_lock);
+ if (e->bstats == NULL)
+ goto skip;
+
nbytes = e->bstats->bytes;
npackets = e->bstats->packets;
rate = (nbytes - e->last_bytes)<<(7 - idx);
@@ -125,12 +129,14 @@ static void est_timer(unsigned long arg)
e->last_packets = npackets;
e->avpps += ((long)rate - (long)e->avpps) >> e->ewma_log;
e->rate_est->pps = (e->avpps+0x1FF)>>10;
+skip:
+ read_unlock(&est_lock);
spin_unlock(e->stats_lock);
}
- if (elist[idx].list != NULL)
+ if (!list_empty(&elist[idx].list))
mod_timer(&elist[idx].timer, jiffies + ((HZ<<idx)/4));
- read_unlock(&est_lock);
+ rcu_read_unlock();
}
/**
@@ -147,12 +153,17 @@ static void est_timer(unsigned long arg)
* &rate_est with the statistics lock grabed during this period.
*
* Returns 0 on success or a negative error code.
+ *
+ * NOTE: Called under rtnl_mutex
*/
int gen_new_estimator(struct gnet_stats_basic *bstats,
- struct gnet_stats_rate_est *rate_est, spinlock_t *stats_lock, struct rtattr *opt)
+ struct gnet_stats_rate_est *rate_est,
+ spinlock_t *stats_lock,
+ struct rtattr *opt)
{
struct gen_estimator *est;
struct gnet_estimator *parm = RTA_DATA(opt);
+ int idx;
if (RTA_PAYLOAD(opt) < sizeof(*parm))
return -EINVAL;
@@ -164,7 +175,7 @@ int gen_new_estimator(struct gnet_stats_basic *bstats,
if (est == NULL)
return -ENOBUFS;
- est->interval = parm->interval + 2;
+ idx = parm->interval + 2;
est->bstats = bstats;
est->rate_est = rate_est;
est->stats_lock = stats_lock;
@@ -174,20 +185,25 @@ int gen_new_estimator(struct gnet_stats_basic *bstats,
est->last_packets = bstats->packets;
est->avpps = rate_est->pps<<10;
- est->next = elist[est->interval].list;
- if (est->next == NULL) {
- init_timer(&elist[est->interval].timer);
- elist[est->interval].timer.data = est->interval;
- elist[est->interval].timer.expires = jiffies + ((HZ<<est->interval)/4);
- elist[est->interval].timer.function = est_timer;
- add_timer(&elist[est->interval].timer);
+ if (!elist[idx].timer.function) {
+ INIT_LIST_HEAD(&elist[idx].list);
+ setup_timer(&elist[idx].timer, est_timer, idx);
}
- write_lock_bh(&est_lock);
- elist[est->interval].list = est;
- write_unlock_bh(&est_lock);
+
+ if (list_empty(&elist[idx].list))
+ mod_timer(&elist[idx].timer, jiffies + ((HZ<<idx)/4));
+
+ list_add_rcu(&est->list, &elist[idx].list);
return 0;
}
+static void __gen_kill_estimator(struct rcu_head *head)
+{
+ struct gen_estimator *e = container_of(head,
+ struct gen_estimator, e_rcu);
+ kfree(e);
+}
+
/**
* gen_kill_estimator - remove a rate estimator
* @bstats: basic statistics
@@ -195,31 +211,32 @@ int gen_new_estimator(struct gnet_stats_basic *bstats,
*
* Removes the rate estimator specified by &bstats and &rate_est
* and deletes the timer.
+ *
+ * NOTE: Called under rtnl_mutex
*/
void gen_kill_estimator(struct gnet_stats_basic *bstats,
struct gnet_stats_rate_est *rate_est)
{
int idx;
- struct gen_estimator *est, **pest;
+ struct gen_estimator *e, *n;
for (idx=0; idx <= EST_MAX_INTERVAL; idx++) {
- int killed = 0;
- pest = &elist[idx].list;
- while ((est=*pest) != NULL) {
- if (est->rate_est != rate_est || est->bstats != bstats) {
- pest = &est->next;
+
+ /* Skip non initialized indexes */
+ if (!elist[idx].timer.function)
+ continue;
+
+ list_for_each_entry_safe(e, n, &elist[idx].list, list) {
+ if (e->rate_est != rate_est || e->bstats != bstats)
continue;
- }
write_lock_bh(&est_lock);
- *pest = est->next;
+ e->bstats = NULL;
write_unlock_bh(&est_lock);
- kfree(est);
- killed++;
+ list_del_rcu(&e->list);
+ call_rcu(&e->e_rcu, __gen_kill_estimator);
}
- if (killed && elist[idx].list == NULL)
- del_timer(&elist[idx].timer);
}
}
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index dd9ef65ad3f..519de091a94 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -137,7 +137,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
}
static void bictcp_cong_avoid(struct sock *sk, u32 ack,
- u32 seq_rtt, u32 in_flight, int data_acked)
+ u32 in_flight, int data_acked)
{
struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk);
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 1260e52ad77..55fca1820c3 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -324,8 +324,7 @@ EXPORT_SYMBOL_GPL(tcp_slow_start);
/* This is Jacobson's slow start and congestion avoidance.
* SIGCOMM '88, p. 328.
*/
-void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 rtt, u32 in_flight,
- int flag)
+void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index ebfaac2f9f4..d17da30d82d 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -270,7 +270,7 @@ static inline void measure_delay(struct sock *sk)
}
static void bictcp_cong_avoid(struct sock *sk, u32 ack,
- u32 seq_rtt, u32 in_flight, int data_acked)
+ u32 in_flight, int data_acked)
{
struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk);
diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c
index 43d624e5043..14a073d8b60 100644
--- a/net/ipv4/tcp_highspeed.c
+++ b/net/ipv4/tcp_highspeed.c
@@ -109,7 +109,7 @@ static void hstcp_init(struct sock *sk)
tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128);
}
-static void hstcp_cong_avoid(struct sock *sk, u32 adk, u32 rtt,
+static void hstcp_cong_avoid(struct sock *sk, u32 adk,
u32 in_flight, int data_acked)
{
struct tcp_sock *tp = tcp_sk(sk);
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
index 4ba4a7ae0a8..632c05a7588 100644
--- a/net/ipv4/tcp_htcp.c
+++ b/net/ipv4/tcp_htcp.c
@@ -225,7 +225,7 @@ static u32 htcp_recalc_ssthresh(struct sock *sk)
return max((tp->snd_cwnd * ca->beta) >> 7, 2U);
}
-static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
+static void htcp_cong_avoid(struct sock *sk, u32 ack, s32 rtt,
u32 in_flight, int data_acked)
{
struct tcp_sock *tp = tcp_sk(sk);
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c
index e5be3511722..b3e55cf5617 100644
--- a/net/ipv4/tcp_hybla.c
+++ b/net/ipv4/tcp_hybla.c
@@ -85,7 +85,7 @@ static inline u32 hybla_fraction(u32 odds)
* o Give cwnd a new value based on the model proposed
* o remember increments <1
*/
-static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
+static void hybla_cong_avoid(struct sock *sk, u32 ack,
u32 in_flight, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -103,7 +103,7 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
return;
if (!ca->hybla_en)
- return tcp_reno_cong_avoid(sk, ack, rtt, in_flight, flag);
+ return tcp_reno_cong_avoid(sk, ack, in_flight, flag);
if (ca->rho == 0)
hybla_recalc_param(sk);
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
index b2b2256d3b8..cc5de6f69d4 100644
--- a/net/ipv4/tcp_illinois.c
+++ b/net/ipv4/tcp_illinois.c
@@ -258,7 +258,7 @@ static void tcp_illinois_state(struct sock *sk, u8 new_state)
/*
* Increase window in response to successful acknowledgment.
*/
-static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
+static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack,
u32 in_flight, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 4e5884ac8f2..fec8a7a4dba 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2323,11 +2323,11 @@ static inline void tcp_ack_update_rtt(struct sock *sk, const int flag,
tcp_ack_no_tstamp(sk, seq_rtt, flag);
}
-static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
+static void tcp_cong_avoid(struct sock *sk, u32 ack,
u32 in_flight, int good)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
- icsk->icsk_ca_ops->cong_avoid(sk, ack, rtt, in_flight, good);
+ icsk->icsk_ca_ops->cong_avoid(sk, ack, in_flight, good);
tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp;
}
@@ -2826,11 +2826,11 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
/* Advance CWND, if state allows this. */
if ((flag & FLAG_DATA_ACKED) && !frto_cwnd &&
tcp_may_raise_cwnd(sk, flag))
- tcp_cong_avoid(sk, ack, seq_rtt, prior_in_flight, 0);
+ tcp_cong_avoid(sk, ack, prior_in_flight, 0);
tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag);
} else {
if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
- tcp_cong_avoid(sk, ack, seq_rtt, prior_in_flight, 1);
+ tcp_cong_avoid(sk, ack, prior_in_flight, 1);
}
if ((flag & FLAG_FORWARD_PROGRESS) || !(flag&FLAG_NOT_DUP))
diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c
index e49836ce012..80e140e3ec2 100644
--- a/net/ipv4/tcp_lp.c
+++ b/net/ipv4/tcp_lp.c
@@ -115,13 +115,12 @@ static void tcp_lp_init(struct sock *sk)
* Will only call newReno CA when away from inference.
* From TCP-LP's paper, this will be handled in additive increasement.
*/
-static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 rtt, u32 in_flight,
- int flag)
+static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight, int flag)
{
struct lp *lp = inet_csk_ca(sk);
if (!(lp->flag & LP_WITHIN_INF))
- tcp_reno_cong_avoid(sk, ack, rtt, in_flight, flag);
+ tcp_reno_cong_avoid(sk, ack, in_flight, flag);
}
/**
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c
index 4624501e968..be27a33a1c6 100644
--- a/net/ipv4/tcp_scalable.c
+++ b/net/ipv4/tcp_scalable.c
@@ -15,7 +15,7 @@
#define TCP_SCALABLE_AI_CNT 50U
#define TCP_SCALABLE_MD_SCALE 3
-static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
+static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack,
u32 in_flight, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index e218a51cece..914e0307f7a 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -163,13 +163,13 @@ void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event)
EXPORT_SYMBOL_GPL(tcp_vegas_cwnd_event);
static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack,
- u32 seq_rtt, u32 in_flight, int flag)
+ u32 in_flight, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
struct vegas *vegas = inet_csk_ca(sk);
if (!vegas->doing_vegas_now)
- return tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag);
+ return tcp_reno_cong_avoid(sk, ack, in_flight, flag);
/* The key players are v_beg_snd_una and v_beg_snd_nxt.
*
@@ -228,7 +228,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack,
/* We don't have enough RTT samples to do the Vegas
* calculation, so we'll behave like Reno.
*/
- tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag);
+ tcp_reno_cong_avoid(sk, ack, in_flight, flag);
} else {
u32 rtt, target_cwnd, diff;
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index ec854cc5fad..7a55ddf8603 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -115,13 +115,13 @@ static void tcp_veno_cwnd_event(struct sock *sk, enum tcp_ca_event event)
}
static void tcp_veno_cong_avoid(struct sock *sk, u32 ack,
- u32 seq_rtt, u32 in_flight, int flag)
+ u32 in_flight, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
struct veno *veno = inet_csk_ca(sk);
if (!veno->doing_veno_now)
- return tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag);
+ return tcp_reno_cong_avoid(sk, ack, in_flight, flag);
/* limited by applications */
if (!tcp_is_cwnd_limited(sk, in_flight))
@@ -132,7 +132,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack,
/* We don't have enough rtt samples to do the Veno
* calculation, so we'll behave like Reno.
*/
- tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag);
+ tcp_reno_cong_avoid(sk, ack, in_flight, flag);
} else {
u32 rtt, target_cwnd;
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
index 545ed237ab5..c04b7c6ec70 100644
--- a/net/ipv4/tcp_yeah.c
+++ b/net/ipv4/tcp_yeah.c
@@ -70,7 +70,7 @@ static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, ktime_t last)
}
static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack,
- u32 seq_rtt, u32 in_flight, int flag)
+ u32 in_flight, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
struct yeah *yeah = inet_csk_ca(sk);
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index dcd7e325b28..4c670cf6aef 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -2567,7 +2567,7 @@ int __init irsock_init(void)
* Remove IrDA protocol
*
*/
-void __exit irsock_cleanup(void)
+void irsock_cleanup(void)
{
sock_unregister(PF_IRDA);
proto_unregister(&irda_proto);
diff --git a/net/irda/irda_device.c b/net/irda/irda_device.c
index 7b5def1ea63..435b563d29a 100644
--- a/net/irda/irda_device.c
+++ b/net/irda/irda_device.c
@@ -95,14 +95,14 @@ int __init irda_device_init( void)
return 0;
}
-static void __exit leftover_dongle(void *arg)
+static void leftover_dongle(void *arg)
{
struct dongle_reg *reg = arg;
IRDA_WARNING("IrDA: Dongle type %x not unregistered\n",
reg->type);
}
-void __exit irda_device_cleanup(void)
+void irda_device_cleanup(void)
{
IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
diff --git a/net/irda/iriap.c b/net/irda/iriap.c
index 774eb707940..ee3889fa49a 100644
--- a/net/irda/iriap.c
+++ b/net/irda/iriap.c
@@ -153,7 +153,7 @@ int __init iriap_init(void)
* Initializes the IrIAP layer, called by the module cleanup code in
* irmod.c
*/
-void __exit iriap_cleanup(void)
+void iriap_cleanup(void)
{
irlmp_unregister_service(service_handle);
diff --git a/net/irda/irias_object.c b/net/irda/irias_object.c
index 4adaae242b9..cf302457097 100644
--- a/net/irda/irias_object.c
+++ b/net/irda/irias_object.c
@@ -36,39 +36,6 @@ hashbin_t *irias_objects;
*/
struct ias_value irias_missing = { IAS_MISSING, 0, 0, 0, {0}};
-/*
- * Function strndup (str, max)
- *
- * My own kernel version of strndup!
- *
- * Faster, check boundary... Jean II
- */
-static char *strndup(char *str, size_t max)
-{
- char *new_str;
- int len;
-
- /* Check string */
- if (str == NULL)
- return NULL;
- /* Check length, truncate */
- len = strlen(str);
- if(len > max)
- len = max;
-
- /* Allocate new string */
- new_str = kmalloc(len + 1, GFP_ATOMIC);
- if (new_str == NULL) {
- IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__);
- return NULL;
- }
-
- /* Copy and truncate */
- memcpy(new_str, str, len);
- new_str[len] = '\0';
-
- return new_str;
-}
/*
* Function ias_new_object (name, id)
@@ -90,7 +57,7 @@ struct ias_object *irias_new_object( char *name, int id)
}
obj->magic = IAS_OBJECT_MAGIC;
- obj->name = strndup(name, IAS_MAX_CLASSNAME);
+ obj->name = kstrndup(name, IAS_MAX_CLASSNAME, GFP_ATOMIC);
if (!obj->name) {
IRDA_WARNING("%s(), Unable to allocate name!\n",
__FUNCTION__);
@@ -360,7 +327,7 @@ void irias_add_integer_attrib(struct ias_object *obj, char *name, int value,
}
attrib->magic = IAS_ATTRIB_MAGIC;
- attrib->name = strndup(name, IAS_MAX_ATTRIBNAME);
+ attrib->name = kstrndup(name, IAS_MAX_ATTRIBNAME, GFP_ATOMIC);
/* Insert value */
attrib->value = irias_new_integer_value(value);
@@ -404,7 +371,7 @@ void irias_add_octseq_attrib(struct ias_object *obj, char *name, __u8 *octets,
}
attrib->magic = IAS_ATTRIB_MAGIC;
- attrib->name = strndup(name, IAS_MAX_ATTRIBNAME);
+ attrib->name = kstrndup(name, IAS_MAX_ATTRIBNAME, GFP_ATOMIC);
attrib->value = irias_new_octseq_value( octets, len);
if (!attrib->name || !attrib->value) {
@@ -446,7 +413,7 @@ void irias_add_string_attrib(struct ias_object *obj, char *name, char *value,
}
attrib->magic = IAS_ATTRIB_MAGIC;
- attrib->name = strndup(name, IAS_MAX_ATTRIBNAME);
+ attrib->name = kstrndup(name, IAS_MAX_ATTRIBNAME, GFP_ATOMIC);
attrib->value = irias_new_string_value(value);
if (!attrib->name || !attrib->value) {
@@ -506,7 +473,7 @@ struct ias_value *irias_new_string_value(char *string)
value->type = IAS_STRING;
value->charset = CS_ASCII;
- value->t.string = strndup(string, IAS_MAX_STRING);
+ value->t.string = kstrndup(string, IAS_MAX_STRING, GFP_ATOMIC);
if (!value->t.string) {
IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__);
kfree(value);
diff --git a/net/irda/irlap.c b/net/irda/irlap.c
index 2fc9f518f89..3d76aafdb2e 100644
--- a/net/irda/irlap.c
+++ b/net/irda/irlap.c
@@ -95,7 +95,7 @@ int __init irlap_init(void)
return 0;
}
-void __exit irlap_cleanup(void)
+void irlap_cleanup(void)
{
IRDA_ASSERT(irlap != NULL, return;);
diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c
index 24a5e3f2377..7efa930ed68 100644
--- a/net/irda/irlmp.c
+++ b/net/irda/irlmp.c
@@ -116,7 +116,7 @@ int __init irlmp_init(void)
* Remove IrLMP layer
*
*/
-void __exit irlmp_cleanup(void)
+void irlmp_cleanup(void)
{
/* Check for main structure */
IRDA_ASSERT(irlmp != NULL, return;);
diff --git a/net/irda/irproc.c b/net/irda/irproc.c
index d6f9aba5b9d..181cb51b48a 100644
--- a/net/irda/irproc.c
+++ b/net/irda/irproc.c
@@ -84,7 +84,7 @@ void __init irda_proc_register(void)
* Unregister irda entry in /proc file system
*
*/
-void __exit irda_proc_unregister(void)
+void irda_proc_unregister(void)
{
int i;
diff --git a/net/irda/irsysctl.c b/net/irda/irsysctl.c
index 2e968e7d8fe..957e04feb0f 100644
--- a/net/irda/irsysctl.c
+++ b/net/irda/irsysctl.c
@@ -287,7 +287,7 @@ int __init irda_sysctl_register(void)
* Unregister our sysctl interface
*
*/
-void __exit irda_sysctl_unregister(void)
+void irda_sysctl_unregister(void)
{
unregister_sysctl_table(irda_table_header);
}
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index 7f50832a2cd..3d7ab03fb13 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -109,7 +109,7 @@ int __init irttp_init(void)
* Called by module destruction/cleanup code
*
*/
-void __exit irttp_cleanup(void)
+void irttp_cleanup(void)
{
/* Check for main structure */
IRDA_ASSERT(irttp->magic == TTP_MAGIC, return;);
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 3ac39f1ec77..3599770a247 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -436,6 +436,7 @@ config NETFILTER_XT_MATCH_CONNBYTES
config NETFILTER_XT_MATCH_CONNLIMIT
tristate '"connlimit" match support"'
depends on NETFILTER_XTABLES
+ depends on NF_CONNTRACK
---help---
This match allows you to match against the number of parallel
connections to a server per client IP address (or address block).
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index a3c8e692f49..641cfbc278d 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1012,13 +1012,14 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
{
struct sock *sk = sock->sk;
struct netlink_sock *nlk = nlk_sk(sk);
- int val = 0, err;
+ unsigned int val = 0;
+ int err;
if (level != SOL_NETLINK)
return -ENOPROTOOPT;
if (optlen >= sizeof(int) &&
- get_user(val, (int __user *)optval))
+ get_user(val, (unsigned int __user *)optval))
return -EFAULT;
switch (optname) {
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index d3f7c3f9407..8a74cac0be8 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -97,7 +97,7 @@ config NET_SCH_ATM
select classes of this queuing discipline. Each class maps
the flow(s) it is handling to a given virtual circuit.
- See the top of <file:net/sched/sch_atm.c>) for more details.
+ See the top of <file:net/sched/sch_atm.c> for more details.
To compile this code as a module, choose M here: the
module will be called sch_atm.
@@ -137,7 +137,7 @@ config NET_SCH_SFQ
tristate "Stochastic Fairness Queueing (SFQ)"
---help---
Say Y here if you want to use the Stochastic Fairness Queueing (SFQ)
- packet scheduling algorithm .
+ packet scheduling algorithm.
See the top of <file:net/sched/sch_sfq.c> for more details.
@@ -306,7 +306,7 @@ config NET_CLS_RSVP6
is important for real time data such as streaming sound or video.
Say Y here if you want to be able to classify outgoing packets based
- on their RSVP requests and you are using the IPv6.
+ on their RSVP requests and you are using the IPv6 protocol.
To compile this code as a module, choose M here: the
module will be called cls_rsvp6.
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 417ec8fb7f1..ddc4f2c5437 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -292,13 +292,12 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
}
}
DPRINTK("atm_tc_change: new id %x\n", classid);
- flow = kmalloc(sizeof(struct atm_flow_data) + hdr_len, GFP_KERNEL);
+ flow = kzalloc(sizeof(struct atm_flow_data) + hdr_len, GFP_KERNEL);
DPRINTK("atm_tc_change: flow %p\n", flow);
if (!flow) {
error = -ENOBUFS;
goto err_out;
}
- memset(flow, 0, sizeof(*flow));
flow->filter_list = NULL;
if (!(flow->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid)))
flow->q = &noop_qdisc;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 157bfbd250b..b48f06fc9fd 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2141,7 +2141,7 @@ int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
if (last == first)
break;
- last = last->u.next;
+ last = (struct xfrm_dst *)last->u.dst.next;
last->child_mtu_cached = mtu;
}
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index f573ac189a0..557500110a1 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -108,7 +108,8 @@ static int call_sbin_request_key(struct key *key,
argv[i] = NULL;
/* do it */
- ret = call_usermodehelper_keys(argv[0], argv, envp, keyring, 1);
+ ret = call_usermodehelper_keys(argv[0], argv, envp, keyring,
+ UMH_WAIT_PROC);
error_link:
key_put(keyring);