aboutsummaryrefslogtreecommitdiff
path: root/arch/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/configs/generic_defconfig3
-rw-r--r--arch/ia64/dig/Makefile4
-rw-r--r--arch/ia64/dig/dig_vtd_iommu.c59
-rw-r--r--arch/ia64/hp/common/hwsw_iommu.c165
-rw-r--r--arch/ia64/hp/common/sba_iommu.c79
-rw-r--r--arch/ia64/hp/sim/simserial.c49
-rw-r--r--arch/ia64/ia32/ia32_entry.S2
-rw-r--r--arch/ia64/include/asm/dma-mapping.h194
-rw-r--r--arch/ia64/include/asm/fpu.h2
-rw-r--r--arch/ia64/include/asm/gcc_intrin.h1
-rw-r--r--arch/ia64/include/asm/intrinsics.h7
-rw-r--r--arch/ia64/include/asm/kvm.h52
-rw-r--r--arch/ia64/include/asm/kvm_host.h18
-rw-r--r--arch/ia64/include/asm/machvec.h102
-rw-r--r--arch/ia64/include/asm/machvec_dig_vtd.h20
-rw-r--r--arch/ia64/include/asm/machvec_hpzx1.h23
-rw-r--r--arch/ia64/include/asm/machvec_hpzx1_swiotlb.h27
-rw-r--r--arch/ia64/include/asm/machvec_sn2.h27
-rw-r--r--arch/ia64/include/asm/mmu_context.h6
-rw-r--r--arch/ia64/include/asm/module.h6
-rw-r--r--arch/ia64/include/asm/msidef.h42
-rw-r--r--arch/ia64/include/asm/native/inst.h13
-rw-r--r--arch/ia64/include/asm/native/patchlist.h38
-rw-r--r--arch/ia64/include/asm/native/pvchk_inst.h8
-rw-r--r--arch/ia64/include/asm/paravirt.h65
-rw-r--r--arch/ia64/include/asm/paravirt_patch.h143
-rw-r--r--arch/ia64/include/asm/paravirt_privop.h365
-rw-r--r--arch/ia64/include/asm/percpu.h4
-rw-r--r--arch/ia64/include/asm/smp.h3
-rw-r--r--arch/ia64/include/asm/socket.h3
-rw-r--r--arch/ia64/include/asm/spinlock.h77
-rw-r--r--arch/ia64/include/asm/swab.h2
-rw-r--r--arch/ia64/include/asm/timex.h1
-rw-r--r--arch/ia64/include/asm/topology.h12
-rw-r--r--arch/ia64/include/asm/uv/uv.h13
-rw-r--r--arch/ia64/include/asm/uv/uv_hub.h6
-rw-r--r--arch/ia64/include/asm/uv/uv_mmrs.h158
-rw-r--r--arch/ia64/include/asm/xen/hypervisor.h39
-rw-r--r--arch/ia64/include/asm/xen/inst.h28
-rw-r--r--arch/ia64/include/asm/xen/interface.h9
-rw-r--r--arch/ia64/include/asm/xen/minstate.h11
-rw-r--r--arch/ia64/include/asm/xen/patchlist.h38
-rw-r--r--arch/ia64/include/asm/xen/privop.h8
-rw-r--r--arch/ia64/kernel/Makefile43
-rw-r--r--arch/ia64/kernel/Makefile.gate27
-rw-r--r--arch/ia64/kernel/acpi.c12
-rw-r--r--arch/ia64/kernel/asm-offsets.c2
-rw-r--r--arch/ia64/kernel/dma-mapping.c13
-rw-r--r--arch/ia64/kernel/efi.c1
-rw-r--r--arch/ia64/kernel/entry.S4
-rw-r--r--arch/ia64/kernel/fsys.S35
-rw-r--r--arch/ia64/kernel/gate.S171
-rw-r--r--arch/ia64/kernel/gate.lds.S17
-rw-r--r--arch/ia64/kernel/head.S10
-rw-r--r--arch/ia64/kernel/iosapic.c2
-rw-r--r--arch/ia64/kernel/irq.c6
-rw-r--r--arch/ia64/kernel/irq_ia64.c31
-rw-r--r--arch/ia64/kernel/ivt.S2
-rw-r--r--arch/ia64/kernel/machvec.c8
-rw-r--r--arch/ia64/kernel/mca.c6
-rw-r--r--arch/ia64/kernel/module.c35
-rw-r--r--arch/ia64/kernel/msi_ia64.c59
-rw-r--r--arch/ia64/kernel/palinfo.c2
-rw-r--r--arch/ia64/kernel/paravirt.c539
-rw-r--r--arch/ia64/kernel/paravirt_patch.c514
-rw-r--r--arch/ia64/kernel/paravirt_patchlist.c79
-rw-r--r--arch/ia64/kernel/paravirt_patchlist.h28
-rw-r--r--arch/ia64/kernel/paravirtentry.S99
-rw-r--r--arch/ia64/kernel/patch.c40
-rw-r--r--arch/ia64/kernel/pci-dma.c46
-rw-r--r--arch/ia64/kernel/pci-swiotlb.c30
-rw-r--r--arch/ia64/kernel/perfmon.c6
-rw-r--r--arch/ia64/kernel/process.c2
-rw-r--r--arch/ia64/kernel/salinfo.c6
-rw-r--r--arch/ia64/kernel/setup.c9
-rw-r--r--arch/ia64/kernel/smp.c6
-rw-r--r--arch/ia64/kernel/smpboot.c17
-rw-r--r--arch/ia64/kernel/time.c25
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S41
-rw-r--r--arch/ia64/kvm/Kconfig4
-rw-r--r--arch/ia64/kvm/irq.h2
-rw-r--r--arch/ia64/kvm/kvm-ia64.c127
-rw-r--r--arch/ia64/kvm/kvm_fw.c151
-rw-r--r--arch/ia64/kvm/process.c71
-rw-r--r--arch/ia64/kvm/vcpu.c46
-rw-r--r--arch/ia64/kvm/vcpu.h4
-rw-r--r--arch/ia64/kvm/vtlb.c46
-rw-r--r--arch/ia64/mm/init.c12
-rw-r--r--arch/ia64/mm/tlb.c2
-rw-r--r--arch/ia64/scripts/pvcheck.sed1
-rw-r--r--arch/ia64/sn/kernel/io_common.c15
-rw-r--r--arch/ia64/sn/kernel/io_init.c12
-rw-r--r--arch/ia64/sn/kernel/msi_sn.c2
-rw-r--r--arch/ia64/sn/kernel/setup.c5
-rw-r--r--arch/ia64/sn/kernel/sn2/prominfo_proc.c9
-rw-r--r--arch/ia64/sn/kernel/sn2/sn2_smp.c12
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_hwperf.c8
-rw-r--r--arch/ia64/sn/pci/pci_dma.c99
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_dma.c4
-rw-r--r--arch/ia64/xen/Makefile19
-rw-r--r--arch/ia64/xen/gate-data.S3
-rw-r--r--arch/ia64/xen/hypercall.S2
-rw-r--r--arch/ia64/xen/time.c48
-rw-r--r--arch/ia64/xen/xen_pv_ops.c800
104 files changed, 4281 insertions, 1128 deletions
diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig
index a109db30ce5..75645495c2d 100644
--- a/arch/ia64/configs/generic_defconfig
+++ b/arch/ia64/configs/generic_defconfig
@@ -193,7 +193,6 @@ CONFIG_BOUNCE=y
CONFIG_NR_QUICK=1
CONFIG_VIRT_TO_BUS=y
CONFIG_UNEVICTABLE_LRU=y
-CONFIG_MMU_NOTIFIER=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
CONFIG_ARCH_FLATMEM_ENABLE=y
@@ -416,8 +415,6 @@ CONFIG_SGI_IOC4=y
# CONFIG_ENCLOSURE_SERVICES is not set
CONFIG_SGI_XP=m
# CONFIG_HP_ILO is not set
-CONFIG_SGI_GRU=m
-# CONFIG_SGI_GRU_DEBUG is not set
# CONFIG_C2PORT is not set
CONFIG_HAVE_IDE=y
CONFIG_IDE=y
diff --git a/arch/ia64/dig/Makefile b/arch/ia64/dig/Makefile
index 5c0283830bd..2f7caddf093 100644
--- a/arch/ia64/dig/Makefile
+++ b/arch/ia64/dig/Makefile
@@ -7,8 +7,8 @@
obj-y := setup.o
ifeq ($(CONFIG_DMAR), y)
-obj-$(CONFIG_IA64_GENERIC) += machvec.o machvec_vtd.o dig_vtd_iommu.o
+obj-$(CONFIG_IA64_GENERIC) += machvec.o machvec_vtd.o
else
obj-$(CONFIG_IA64_GENERIC) += machvec.o
endif
-obj-$(CONFIG_IA64_DIG_VTD) += dig_vtd_iommu.o
+
diff --git a/arch/ia64/dig/dig_vtd_iommu.c b/arch/ia64/dig/dig_vtd_iommu.c
deleted file mode 100644
index 1c8a079017a..00000000000
--- a/arch/ia64/dig/dig_vtd_iommu.c
+++ /dev/null
@@ -1,59 +0,0 @@
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/intel-iommu.h>
-
-void *
-vtd_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
- gfp_t flags)
-{
- return intel_alloc_coherent(dev, size, dma_handle, flags);
-}
-EXPORT_SYMBOL_GPL(vtd_alloc_coherent);
-
-void
-vtd_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle)
-{
- intel_free_coherent(dev, size, vaddr, dma_handle);
-}
-EXPORT_SYMBOL_GPL(vtd_free_coherent);
-
-dma_addr_t
-vtd_map_single_attrs(struct device *dev, void *addr, size_t size,
- int dir, struct dma_attrs *attrs)
-{
- return intel_map_single(dev, (phys_addr_t)addr, size, dir);
-}
-EXPORT_SYMBOL_GPL(vtd_map_single_attrs);
-
-void
-vtd_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
- int dir, struct dma_attrs *attrs)
-{
- intel_unmap_single(dev, iova, size, dir);
-}
-EXPORT_SYMBOL_GPL(vtd_unmap_single_attrs);
-
-int
-vtd_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
- int dir, struct dma_attrs *attrs)
-{
- return intel_map_sg(dev, sglist, nents, dir);
-}
-EXPORT_SYMBOL_GPL(vtd_map_sg_attrs);
-
-void
-vtd_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
- int nents, int dir, struct dma_attrs *attrs)
-{
- intel_unmap_sg(dev, sglist, nents, dir);
-}
-EXPORT_SYMBOL_GPL(vtd_unmap_sg_attrs);
-
-int
-vtd_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return 0;
-}
-EXPORT_SYMBOL_GPL(vtd_dma_mapping_error);
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
index 2769dbfd03b..e4a80d82e3d 100644
--- a/arch/ia64/hp/common/hwsw_iommu.c
+++ b/arch/ia64/hp/common/hwsw_iommu.c
@@ -13,49 +13,34 @@
*/
#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include <linux/swiotlb.h>
-
#include <asm/machvec.h>
+extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
+
/* swiotlb declarations & definitions: */
extern int swiotlb_late_init_with_default_size (size_t size);
-/* hwiommu declarations & definitions: */
-
-extern ia64_mv_dma_alloc_coherent sba_alloc_coherent;
-extern ia64_mv_dma_free_coherent sba_free_coherent;
-extern ia64_mv_dma_map_single_attrs sba_map_single_attrs;
-extern ia64_mv_dma_unmap_single_attrs sba_unmap_single_attrs;
-extern ia64_mv_dma_map_sg_attrs sba_map_sg_attrs;
-extern ia64_mv_dma_unmap_sg_attrs sba_unmap_sg_attrs;
-extern ia64_mv_dma_supported sba_dma_supported;
-extern ia64_mv_dma_mapping_error sba_dma_mapping_error;
-
-#define hwiommu_alloc_coherent sba_alloc_coherent
-#define hwiommu_free_coherent sba_free_coherent
-#define hwiommu_map_single_attrs sba_map_single_attrs
-#define hwiommu_unmap_single_attrs sba_unmap_single_attrs
-#define hwiommu_map_sg_attrs sba_map_sg_attrs
-#define hwiommu_unmap_sg_attrs sba_unmap_sg_attrs
-#define hwiommu_dma_supported sba_dma_supported
-#define hwiommu_dma_mapping_error sba_dma_mapping_error
-#define hwiommu_sync_single_for_cpu machvec_dma_sync_single
-#define hwiommu_sync_sg_for_cpu machvec_dma_sync_sg
-#define hwiommu_sync_single_for_device machvec_dma_sync_single
-#define hwiommu_sync_sg_for_device machvec_dma_sync_sg
-
-
/*
* Note: we need to make the determination of whether or not to use
* the sw I/O TLB based purely on the device structure. Anything else
* would be unreliable or would be too intrusive.
*/
-static inline int
-use_swiotlb (struct device *dev)
+static inline int use_swiotlb(struct device *dev)
{
- return dev && dev->dma_mask && !hwiommu_dma_supported(dev, *dev->dma_mask);
+ return dev && dev->dma_mask &&
+ !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
}
+struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
+{
+ if (use_swiotlb(dev))
+ return &swiotlb_dma_ops;
+ return &sba_dma_ops;
+}
+EXPORT_SYMBOL(hwsw_dma_get_ops);
+
void __init
hwsw_init (void)
{
@@ -71,125 +56,3 @@ hwsw_init (void)
#endif
}
}
-
-void *
-hwsw_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags)
-{
- if (use_swiotlb(dev))
- return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
- else
- return hwiommu_alloc_coherent(dev, size, dma_handle, flags);
-}
-
-void
-hwsw_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
-{
- if (use_swiotlb(dev))
- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
- else
- hwiommu_free_coherent(dev, size, vaddr, dma_handle);
-}
-
-dma_addr_t
-hwsw_map_single_attrs(struct device *dev, void *addr, size_t size, int dir,
- struct dma_attrs *attrs)
-{
- if (use_swiotlb(dev))
- return swiotlb_map_single_attrs(dev, addr, size, dir, attrs);
- else
- return hwiommu_map_single_attrs(dev, addr, size, dir, attrs);
-}
-EXPORT_SYMBOL(hwsw_map_single_attrs);
-
-void
-hwsw_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
- int dir, struct dma_attrs *attrs)
-{
- if (use_swiotlb(dev))
- return swiotlb_unmap_single_attrs(dev, iova, size, dir, attrs);
- else
- return hwiommu_unmap_single_attrs(dev, iova, size, dir, attrs);
-}
-EXPORT_SYMBOL(hwsw_unmap_single_attrs);
-
-int
-hwsw_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
- int dir, struct dma_attrs *attrs)
-{
- if (use_swiotlb(dev))
- return swiotlb_map_sg_attrs(dev, sglist, nents, dir, attrs);
- else
- return hwiommu_map_sg_attrs(dev, sglist, nents, dir, attrs);
-}
-EXPORT_SYMBOL(hwsw_map_sg_attrs);
-
-void
-hwsw_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
- int dir, struct dma_attrs *attrs)
-{
- if (use_swiotlb(dev))
- return swiotlb_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
- else
- return hwiommu_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
-}
-EXPORT_SYMBOL(hwsw_unmap_sg_attrs);
-
-void
-hwsw_sync_single_for_cpu (struct device *dev, dma_addr_t addr, size_t size, int dir)
-{
- if (use_swiotlb(dev))
- swiotlb_sync_single_for_cpu(dev, addr, size, dir);
- else
- hwiommu_sync_single_for_cpu(dev, addr, size, dir);
-}
-
-void
-hwsw_sync_sg_for_cpu (struct device *dev, struct scatterlist *sg, int nelems, int dir)
-{
- if (use_swiotlb(dev))
- swiotlb_sync_sg_for_cpu(dev, sg, nelems, dir);
- else
- hwiommu_sync_sg_for_cpu(dev, sg, nelems, dir);
-}
-
-void
-hwsw_sync_single_for_device (struct device *dev, dma_addr_t addr, size_t size, int dir)
-{
- if (use_swiotlb(dev))
- swiotlb_sync_single_for_device(dev, addr, size, dir);
- else
- hwiommu_sync_single_for_device(dev, addr, size, dir);
-}
-
-void
-hwsw_sync_sg_for_device (struct device *dev, struct scatterlist *sg, int nelems, int dir)
-{
- if (use_swiotlb(dev))
- swiotlb_sync_sg_for_device(dev, sg, nelems, dir);
- else
- hwiommu_sync_sg_for_device(dev, sg, nelems, dir);
-}
-
-int
-hwsw_dma_supported (struct device *dev, u64 mask)
-{
- if (hwiommu_dma_supported(dev, mask))
- return 1;
- return swiotlb_dma_supported(dev, mask);
-}
-
-int
-hwsw_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return hwiommu_dma_mapping_error(dev, dma_addr) ||
- swiotlb_dma_mapping_error(dev, dma_addr);
-}
-
-EXPORT_SYMBOL(hwsw_dma_mapping_error);
-EXPORT_SYMBOL(hwsw_dma_supported);
-EXPORT_SYMBOL(hwsw_alloc_coherent);
-EXPORT_SYMBOL(hwsw_free_coherent);
-EXPORT_SYMBOL(hwsw_sync_single_for_cpu);
-EXPORT_SYMBOL(hwsw_sync_single_for_device);
-EXPORT_SYMBOL(hwsw_sync_sg_for_cpu);
-EXPORT_SYMBOL(hwsw_sync_sg_for_device);
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 6d5e6c5630e..56ceb68eb99 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -36,6 +36,7 @@
#include <linux/bitops.h> /* hweight64() */
#include <linux/crash_dump.h>
#include <linux/iommu-helper.h>
+#include <linux/dma-mapping.h>
#include <asm/delay.h> /* ia64_get_itc() */
#include <asm/io.h>
@@ -908,11 +909,13 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
*
* See Documentation/PCI/PCI-DMA-mapping.txt
*/
-dma_addr_t
-sba_map_single_attrs(struct device *dev, void *addr, size_t size, int dir,
- struct dma_attrs *attrs)
+static dma_addr_t sba_map_page(struct device *dev, struct page *page,
+ unsigned long poff, size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
struct ioc *ioc;
+ void *addr = page_address(page) + poff;
dma_addr_t iovp;
dma_addr_t offset;
u64 *pdir_start;
@@ -990,7 +993,14 @@ sba_map_single_attrs(struct device *dev, void *addr, size_t size, int dir,
#endif
return SBA_IOVA(ioc, iovp, offset);
}
-EXPORT_SYMBOL(sba_map_single_attrs);
+
+static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ return sba_map_page(dev, virt_to_page(addr),
+ (unsigned long)addr & ~PAGE_MASK, size, dir, attrs);
+}
#ifdef ENABLE_MARK_CLEAN
static SBA_INLINE void
@@ -1026,8 +1036,8 @@ sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
*
* See Documentation/PCI/PCI-DMA-mapping.txt
*/
-void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
- int dir, struct dma_attrs *attrs)
+static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
{
struct ioc *ioc;
#if DELAYED_RESOURCE_CNT > 0
@@ -1094,7 +1104,12 @@ void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif /* DELAYED_RESOURCE_CNT == 0 */
}
-EXPORT_SYMBOL(sba_unmap_single_attrs);
+
+void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+ sba_unmap_page(dev, iova, size, dir, attrs);
+}
/**
* sba_alloc_coherent - allocate/map shared mem for DMA
@@ -1104,7 +1119,7 @@ EXPORT_SYMBOL(sba_unmap_single_attrs);
*
* See Documentation/PCI/PCI-DMA-mapping.txt
*/
-void *
+static void *
sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags)
{
struct ioc *ioc;
@@ -1167,7 +1182,8 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp
*
* See Documentation/PCI/PCI-DMA-mapping.txt
*/
-void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
+static void sba_free_coherent (struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle)
{
sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL);
free_pages((unsigned long) vaddr, get_order(size));
@@ -1422,8 +1438,9 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
*
* See Documentation/PCI/PCI-DMA-mapping.txt
*/
-int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
- int dir, struct dma_attrs *attrs)
+static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
struct ioc *ioc;
int coalesced, filled = 0;
@@ -1502,7 +1519,6 @@ int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
return filled;
}
-EXPORT_SYMBOL(sba_map_sg_attrs);
/**
* sba_unmap_sg_attrs - unmap Scatter/Gather list
@@ -1514,8 +1530,9 @@ EXPORT_SYMBOL(sba_map_sg_attrs);
*
* See Documentation/PCI/PCI-DMA-mapping.txt
*/
-void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
- int nents, int dir, struct dma_attrs *attrs)
+static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
#ifdef ASSERT_PDIR_SANITY
struct ioc *ioc;
@@ -1551,7 +1568,6 @@ void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
#endif
}
-EXPORT_SYMBOL(sba_unmap_sg_attrs);
/**************************************************************
*
@@ -2064,6 +2080,8 @@ static struct acpi_driver acpi_sba_ioc_driver = {
},
};
+extern struct dma_map_ops swiotlb_dma_ops;
+
static int __init
sba_init(void)
{
@@ -2077,6 +2095,7 @@ sba_init(void)
* a successful kdump kernel boot is to use the swiotlb.
*/
if (is_kdump_kernel()) {
+ dma_ops = &swiotlb_dma_ops;
if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
panic("Unable to initialize software I/O TLB:"
" Try machvec=dig boot option");
@@ -2092,6 +2111,7 @@ sba_init(void)
* If we didn't find something sba_iommu can claim, we
* need to setup the swiotlb and switch to the dig machvec.
*/
+ dma_ops = &swiotlb_dma_ops;
if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
panic("Unable to find SBA IOMMU or initialize "
"software I/O TLB: Try machvec=dig boot option");
@@ -2138,15 +2158,13 @@ nosbagart(char *str)
return 1;
}
-int
-sba_dma_supported (struct device *dev, u64 mask)
+static int sba_dma_supported (struct device *dev, u64 mask)
{
/* make sure it's at least 32bit capable */
return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL);
}
-int
-sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+static int sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
@@ -2176,7 +2194,22 @@ sba_page_override(char *str)
__setup("sbapagesize=",sba_page_override);
-EXPORT_SYMBOL(sba_dma_mapping_error);
-EXPORT_SYMBOL(sba_dma_supported);
-EXPORT_SYMBOL(sba_alloc_coherent);
-EXPORT_SYMBOL(sba_free_coherent);
+struct dma_map_ops sba_dma_ops = {
+ .alloc_coherent = sba_alloc_coherent,
+ .free_coherent = sba_free_coherent,
+ .map_page = sba_map_page,
+ .unmap_page = sba_unmap_page,
+ .map_sg = sba_map_sg_attrs,
+ .unmap_sg = sba_unmap_sg_attrs,
+ .sync_single_for_cpu = machvec_dma_sync_single,
+ .sync_sg_for_cpu = machvec_dma_sync_sg,
+ .sync_single_for_device = machvec_dma_sync_single,
+ .sync_sg_for_device = machvec_dma_sync_sg,
+ .dma_supported = sba_dma_supported,
+ .mapping_error = sba_dma_mapping_error,
+};
+
+void sba_dma_init(void)
+{
+ dma_ops = &sba_dma_ops;
+}
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c
index 24b1ad5334c..2bef5261d96 100644
--- a/arch/ia64/hp/sim/simserial.c
+++ b/arch/ia64/hp/sim/simserial.c
@@ -24,6 +24,7 @@
#include <linux/major.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/capability.h>
#include <linux/console.h>
@@ -848,38 +849,36 @@ static int rs_open(struct tty_struct *tty, struct file * filp)
* /proc fs routines....
*/
-static inline int line_info(char *buf, struct serial_state *state)
+static inline void line_info(struct seq_file *m, struct serial_state *state)
{
- return sprintf(buf, "%d: uart:%s port:%lX irq:%d\n",
+ seq_printf(m, "%d: uart:%s port:%lX irq:%d\n",
state->line, uart_config[state->type].name,
state->port, state->irq);
}
-static int rs_read_proc(char *page, char **start, off_t off, int count,
- int *eof, void *data)
+static int rs_proc_show(struct seq_file *m, void *v)
{
- int i, len = 0, l;
- off_t begin = 0;
-
- len += sprintf(page, "simserinfo:1.0 driver:%s\n", serial_version);
- for (i = 0; i < NR_PORTS && len < 4000; i++) {
- l = line_info(page + len, &rs_table[i]);
- len += l;
- if (len+begin > off+count)
- goto done;
- if (len+begin < off) {
- begin += len;
- len = 0;
- }
- }
- *eof = 1;
-done:
- if (off >= len+begin)
- return 0;
- *start = page + (begin-off);
- return ((count < begin+len-off) ? count : begin+len-off);
+ int i;
+
+ seq_printf(m, "simserinfo:1.0 driver:%s\n", serial_version);
+ for (i = 0; i < NR_PORTS; i++)
+ line_info(m, &rs_table[i]);
+ return 0;
}
+static int rs_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, rs_proc_show, NULL);
+}
+
+static const struct file_operations rs_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = rs_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
/*
* ---------------------------------------------------------------------
* rs_init() and friends
@@ -917,7 +916,7 @@ static const struct tty_operations hp_ops = {
.start = rs_start,
.hangup = rs_hangup,
.wait_until_sent = rs_wait_until_sent,
- .read_proc = rs_read_proc,
+ .proc_fops = &rs_proc_fops,
};
/*
diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S
index a46f8395e9a..af9405cd70e 100644
--- a/arch/ia64/ia32/ia32_entry.S
+++ b/arch/ia64/ia32/ia32_entry.S
@@ -240,7 +240,7 @@ ia32_syscall_table:
data8 sys_ni_syscall
data8 sys_umask /* 60 */
data8 sys_chroot
- data8 sys_ustat
+ data8 compat_sys_ustat
data8 sys_dup2
data8 sys_getppid
data8 sys_getpgrp /* 65 */
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index 1f912d92758..36c0009dbec 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -11,99 +11,128 @@
#define ARCH_HAS_DMA_GET_REQUIRED_MASK
-struct dma_mapping_ops {
- int (*mapping_error)(struct device *dev,
- dma_addr_t dma_addr);
- void* (*alloc_coherent)(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp);
- void (*free_coherent)(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
- dma_addr_t (*map_single)(struct device *hwdev, unsigned long ptr,
- size_t size, int direction);
- void (*unmap_single)(struct device *dev, dma_addr_t addr,
- size_t size, int direction);
- void (*sync_single_for_cpu)(struct device *hwdev,
- dma_addr_t dma_handle, size_t size,
- int direction);
- void (*sync_single_for_device)(struct device *hwdev,
- dma_addr_t dma_handle, size_t size,
- int direction);
- void (*sync_single_range_for_cpu)(struct device *hwdev,
- dma_addr_t dma_handle, unsigned long offset,
- size_t size, int direction);
- void (*sync_single_range_for_device)(struct device *hwdev,
- dma_addr_t dma_handle, unsigned long offset,
- size_t size, int direction);
- void (*sync_sg_for_cpu)(struct device *hwdev,
- struct scatterlist *sg, int nelems,
- int direction);
- void (*sync_sg_for_device)(struct device *hwdev,
- struct scatterlist *sg, int nelems,
- int direction);
- int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
- int nents, int direction);
- void (*unmap_sg)(struct device *hwdev,
- struct scatterlist *sg, int nents,
- int direction);
- int (*dma_supported_op)(struct device *hwdev, u64 mask);
- int is_phys;
-};
-
-extern struct dma_mapping_ops *dma_ops;
+extern struct dma_map_ops *dma_ops;
extern struct ia64_machine_vector ia64_mv;
extern void set_iommu_machvec(void);
-#define dma_alloc_coherent(dev, size, handle, gfp) \
- platform_dma_alloc_coherent(dev, size, handle, (gfp) | GFP_DMA)
+extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t,
+ enum dma_data_direction);
+extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
+ enum dma_data_direction);
-/* coherent mem. is cheap */
-static inline void *
-dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
- gfp_t flag)
+static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *daddr, gfp_t gfp)
{
- return dma_alloc_coherent(dev, size, dma_handle, flag);
+ struct dma_map_ops *ops = platform_dma_get_ops(dev);
+ return ops->alloc_coherent(dev, size, daddr, gfp);
}
-#define dma_free_coherent platform_dma_free_coherent
-static inline void
-dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t dma_handle)
+
+static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *caddr, dma_addr_t daddr)
+{
+ struct dma_map_ops *ops = platform_dma_get_ops(dev);
+ ops->free_coherent(dev, size, caddr, daddr);
+}
+
+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+
+static inline dma_addr_t dma_map_single_attrs(struct device *dev,
+ void *caddr, size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = platform_dma_get_ops(dev);
+ return ops->map_page(dev, virt_to_page(caddr),
+ (unsigned long)caddr & ~PAGE_MASK, size,
+ dir, attrs);
+}
+
+static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr,
+ size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = platform_dma_get_ops(dev);
+ ops->unmap_page(dev, daddr, size, dir, attrs);
+}
+
+#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
+#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
+
+static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = platform_dma_get_ops(dev);
+ return ops->map_sg(dev, sgl, nents, dir, attrs);
+}
+
+static inline void dma_unmap_sg_attrs(struct device *dev,
+ struct scatterlist *sgl, int nents,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = platform_dma_get_ops(dev);
+ ops->unmap_sg(dev, sgl, nents, dir, attrs);
+}
+
+#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
+#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
+
+static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t daddr,
+ size_t size,
+ enum dma_data_direction dir)
{
- dma_free_coherent(dev, size, cpu_addr, dma_handle);
+ struct dma_map_ops *ops = platform_dma_get_ops(dev);
+ ops->sync_single_for_cpu(dev, daddr, size, dir);
}
-#define dma_map_single_attrs platform_dma_map_single_attrs
-static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
- size_t size, int dir)
+
+static inline void dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir)
{
- return dma_map_single_attrs(dev, cpu_addr, size, dir, NULL);
+ struct dma_map_ops *ops = platform_dma_get_ops(dev);
+ ops->sync_sg_for_cpu(dev, sgl, nents, dir);
}
-#define dma_map_sg_attrs platform_dma_map_sg_attrs
-static inline int dma_map_sg(struct device *dev, struct scatterlist *sgl,
- int nents, int dir)
+
+static inline void dma_sync_single_for_device(struct device *dev,
+ dma_addr_t daddr,
+ size_t size,
+ enum dma_data_direction dir)
{
- return dma_map_sg_attrs(dev, sgl, nents, dir, NULL);
+ struct dma_map_ops *ops = platform_dma_get_ops(dev);
+ ops->sync_single_for_device(dev, daddr, size, dir);
}
-#define dma_unmap_single_attrs platform_dma_unmap_single_attrs
-static inline void dma_unmap_single(struct device *dev, dma_addr_t cpu_addr,
- size_t size, int dir)
+
+static inline void dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sgl,
+ int nents,
+ enum dma_data_direction dir)
{
- return dma_unmap_single_attrs(dev, cpu_addr, size, dir, NULL);
+ struct dma_map_ops *ops = platform_dma_get_ops(dev);
+ ops->sync_sg_for_device(dev, sgl, nents, dir);
}
-#define dma_unmap_sg_attrs platform_dma_unmap_sg_attrs
-static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
- int nents, int dir)
+
+static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
+{
+ struct dma_map_ops *ops = platform_dma_get_ops(dev);
+ return ops->mapping_error(dev, daddr);
+}
+
+static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
+ size_t offset, size_t size,
+ enum dma_data_direction dir)
{
- return dma_unmap_sg_attrs(dev, sgl, nents, dir, NULL);
+ struct dma_map_ops *ops = platform_dma_get_ops(dev);
+ return ops->map_page(dev, page, offset, size, dir, NULL);
}
-#define dma_sync_single_for_cpu platform_dma_sync_single_for_cpu
-#define dma_sync_sg_for_cpu platform_dma_sync_sg_for_cpu
-#define dma_sync_single_for_device platform_dma_sync_single_for_device
-#define dma_sync_sg_for_device platform_dma_sync_sg_for_device
-#define dma_mapping_error platform_dma_mapping_error
-#define dma_map_page(dev, pg, off, size, dir) \
- dma_map_single(dev, page_address(pg) + (off), (size), (dir))
-#define dma_unmap_page(dev, dma_addr, size, dir) \
- dma_unmap_single(dev, dma_addr, size, dir)
+static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir)
+{
+ dma_unmap_single(dev, addr, size, dir);
+}
/*
* Rest of this file is part of the "Advanced DMA API". Use at your own risk.
@@ -115,7 +144,11 @@ static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
#define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \
dma_sync_single_for_device(dev, dma_handle, size, dir)
-#define dma_supported platform_dma_supported
+static inline int dma_supported(struct device *dev, u64 mask)
+{
+ struct dma_map_ops *ops = platform_dma_get_ops(dev);
+ return ops->dma_supported(dev, mask);
+}
static inline int
dma_set_mask (struct device *dev, u64 mask)
@@ -141,11 +174,4 @@ dma_cache_sync (struct device *dev, void *vaddr, size_t size,
#define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */
-static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
-{
- return dma_ops;
-}
-
-
-
#endif /* _ASM_IA64_DMA_MAPPING_H */
diff --git a/arch/ia64/include/asm/fpu.h b/arch/ia64/include/asm/fpu.h
index 3859558ff0a..0c26157cffa 100644
--- a/arch/ia64/include/asm/fpu.h
+++ b/arch/ia64/include/asm/fpu.h
@@ -6,8 +6,6 @@
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
-#include <asm/types.h>
-
/* floating point status register: */
#define FPSR_TRAP_VD (1 << 0) /* invalid op trap disabled */
#define FPSR_TRAP_DD (1 << 1) /* denormal trap disabled */
diff --git a/arch/ia64/include/asm/gcc_intrin.h b/arch/ia64/include/asm/gcc_intrin.h
index 0f5b5592175..c2c5fd8fcac 100644
--- a/arch/ia64/include/asm/gcc_intrin.h
+++ b/arch/ia64/include/asm/gcc_intrin.h
@@ -6,6 +6,7 @@
* Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
*/
+#include <linux/types.h>
#include <linux/compiler.h>
/* define this macro to get some asm stmts included in 'c' files */
diff --git a/arch/ia64/include/asm/intrinsics.h b/arch/ia64/include/asm/intrinsics.h
index a3e44a5ed49..111ed522289 100644
--- a/arch/ia64/include/asm/intrinsics.h
+++ b/arch/ia64/include/asm/intrinsics.h
@@ -10,6 +10,7 @@
#ifndef __ASSEMBLY__
+#include <linux/types.h>
/* include compiler specific intrinsics */
#include <asm/ia64regs.h>
#ifdef __INTEL_COMPILER
@@ -201,7 +202,11 @@ extern long ia64_cmpxchg_called_with_bad_pointer (void);
#ifndef __ASSEMBLY__
#if defined(CONFIG_PARAVIRT) && defined(__KERNEL__)
-#define IA64_INTRINSIC_API(name) pv_cpu_ops.name
+#ifdef ASM_SUPPORTED
+# define IA64_INTRINSIC_API(name) paravirt_ ## name
+#else
+# define IA64_INTRINSIC_API(name) pv_cpu_ops.name
+#endif
#define IA64_INTRINSIC_MACRO(name) paravirt_ ## name
#else
#define IA64_INTRINSIC_API(name) ia64_native_ ## name
diff --git a/arch/ia64/include/asm/kvm.h b/arch/ia64/include/asm/kvm.h
index bfa86b6af7c..18a7e49abbc 100644
--- a/arch/ia64/include/asm/kvm.h
+++ b/arch/ia64/include/asm/kvm.h
@@ -21,8 +21,7 @@
*
*/
-#include <asm/types.h>
-
+#include <linux/types.h>
#include <linux/ioctl.h>
/* Select x86 specific features in <linux/kvm.h> */
@@ -166,7 +165,40 @@ struct saved_vpd {
unsigned long vcpuid[5];
unsigned long vpsr;
unsigned long vpr;
- unsigned long vcr[128];
+ union {
+ unsigned long vcr[128];
+ struct {
+ unsigned long dcr;
+ unsigned long itm;
+ unsigned long iva;
+ unsigned long rsv1[5];
+ unsigned long pta;
+ unsigned long rsv2[7];
+ unsigned long ipsr;
+ unsigned long isr;
+ unsigned long rsv3;
+ unsigned long iip;
+ unsigned long ifa;
+ unsigned long itir;
+ unsigned long iipa;
+ unsigned long ifs;
+ unsigned long iim;
+ unsigned long iha;
+ unsigned long rsv4[38];
+ unsigned long lid;
+ unsigned long ivr;
+ unsigned long tpr;
+ unsigned long eoi;
+ unsigned long irr[4];
+ unsigned long itv;
+ unsigned long pmv;
+ unsigned long cmcv;
+ unsigned long rsv5[5];
+ unsigned long lrr0;
+ unsigned long lrr1;
+ unsigned long rsv6[46];
+ };
+ };
};
struct kvm_regs {
@@ -214,4 +246,18 @@ struct kvm_sregs {
struct kvm_fpu {
};
+#define KVM_IA64_VCPU_STACK_SHIFT 16
+#define KVM_IA64_VCPU_STACK_SIZE (1UL << KVM_IA64_VCPU_STACK_SHIFT)
+
+struct kvm_ia64_vcpu_stack {
+ unsigned char stack[KVM_IA64_VCPU_STACK_SIZE];
+};
+
+struct kvm_debug_exit_arch {
+};
+
+/* for KVM_SET_GUEST_DEBUG */
+struct kvm_guest_debug_arch {
+};
+
#endif
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
index 34866366165..4542651e6ac 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -112,7 +112,11 @@
#define VCPU_STRUCT_SHIFT 16
#define VCPU_STRUCT_SIZE (__IA64_UL_CONST(1) << VCPU_STRUCT_SHIFT)
-#define KVM_STK_OFFSET VCPU_STRUCT_SIZE
+/*
+ * This must match KVM_IA64_VCPU_STACK_{SHIFT,SIZE} arch/ia64/include/asm/kvm.h
+ */
+#define KVM_STK_SHIFT 16
+#define KVM_STK_OFFSET (__IA64_UL_CONST(1)<< KVM_STK_SHIFT)
#define KVM_VM_STRUCT_SHIFT 19
#define KVM_VM_STRUCT_SIZE (__IA64_UL_CONST(1) << KVM_VM_STRUCT_SHIFT)
@@ -153,10 +157,10 @@ struct kvm_vm_data {
struct kvm_vcpu_data vcpu_data[KVM_MAX_VCPUS];
};
-#define VCPU_BASE(n) KVM_VM_DATA_BASE + \
- offsetof(struct kvm_vm_data, vcpu_data[n])
-#define VM_BASE KVM_VM_DATA_BASE + \
- offsetof(struct kvm_vm_data, kvm_vm_struct)
+#define VCPU_BASE(n) (KVM_VM_DATA_BASE + \
+ offsetof(struct kvm_vm_data, vcpu_data[n]))
+#define KVM_VM_BASE (KVM_VM_DATA_BASE + \
+ offsetof(struct kvm_vm_data, kvm_vm_struct))
#define KVM_MEM_DIRTY_LOG_BASE KVM_VM_DATA_BASE + \
offsetof(struct kvm_vm_data, kvm_mem_dirty_log)
@@ -235,8 +239,6 @@ struct kvm_vm_data {
struct kvm;
struct kvm_vcpu;
-struct kvm_guest_debug{
-};
struct kvm_mmio_req {
uint64_t addr; /* physical address */
@@ -462,6 +464,8 @@ struct kvm_arch {
unsigned long metaphysical_rr4;
unsigned long vmm_init_rr;
+ int online_vcpus;
+
struct kvm_ioapic *vioapic;
struct kvm_vm_stat stat;
struct kvm_sal_data rdv_sal_data;
diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
index fe87b212170..367d299d993 100644
--- a/arch/ia64/include/asm/machvec.h
+++ b/arch/ia64/include/asm/machvec.h
@@ -11,7 +11,6 @@
#define _ASM_IA64_MACHVEC_H
#include <linux/types.h>
-#include <linux/swiotlb.h>
/* forward declarations: */
struct device;
@@ -45,24 +44,8 @@ typedef void ia64_mv_kernel_launch_event_t(void);
/* DMA-mapping interface: */
typedef void ia64_mv_dma_init (void);
-typedef void *ia64_mv_dma_alloc_coherent (struct device *, size_t, dma_addr_t *, gfp_t);
-typedef void ia64_mv_dma_free_coherent (struct device *, size_t, void *, dma_addr_t);
-typedef dma_addr_t ia64_mv_dma_map_single (struct device *, void *, size_t, int);
-typedef void ia64_mv_dma_unmap_single (struct device *, dma_addr_t, size_t, int);
-typedef int ia64_mv_dma_map_sg (struct device *, struct scatterlist *, int, int);
-typedef void ia64_mv_dma_unmap_sg (struct device *, struct scatterlist *, int, int);
-typedef void ia64_mv_dma_sync_single_for_cpu (struct device *, dma_addr_t, size_t, int);
-typedef void ia64_mv_dma_sync_sg_for_cpu (struct device *, struct scatterlist *, int, int);
-typedef void ia64_mv_dma_sync_single_for_device (struct device *, dma_addr_t, size_t, int);
-typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist *, int, int);
-typedef int ia64_mv_dma_mapping_error(struct device *, dma_addr_t dma_addr);
-typedef int ia64_mv_dma_supported (struct device *, u64);
-
-typedef dma_addr_t ia64_mv_dma_map_single_attrs (struct device *, void *, size_t, int, struct dma_attrs *);
-typedef void ia64_mv_dma_unmap_single_attrs (struct device *, dma_addr_t, size_t, int, struct dma_attrs *);
-typedef int ia64_mv_dma_map_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *);
-typedef void ia64_mv_dma_unmap_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *);
typedef u64 ia64_mv_dma_get_required_mask (struct device *);
+typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
/*
* WARNING: The legacy I/O space is _architected_. Platforms are
@@ -114,8 +97,6 @@ machvec_noop_bus (struct pci_bus *bus)
extern void machvec_setup (char **);
extern void machvec_timer_interrupt (int, void *);
-extern void machvec_dma_sync_single (struct device *, dma_addr_t, size_t, int);
-extern void machvec_dma_sync_sg (struct device *, struct scatterlist *, int, int);
extern void machvec_tlb_migrate_finish (struct mm_struct *);
# if defined (CONFIG_IA64_HP_SIM)
@@ -148,19 +129,8 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
# define platform_global_tlb_purge ia64_mv.global_tlb_purge
# define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish
# define platform_dma_init ia64_mv.dma_init
-# define platform_dma_alloc_coherent ia64_mv.dma_alloc_coherent
-# define platform_dma_free_coherent ia64_mv.dma_free_coherent
-# define platform_dma_map_single_attrs ia64_mv.dma_map_single_attrs
-# define platform_dma_unmap_single_attrs ia64_mv.dma_unmap_single_attrs
-# define platform_dma_map_sg_attrs ia64_mv.dma_map_sg_attrs
-# define platform_dma_unmap_sg_attrs ia64_mv.dma_unmap_sg_attrs
-# define platform_dma_sync_single_for_cpu ia64_mv.dma_sync_single_for_cpu
-# define platform_dma_sync_sg_for_cpu ia64_mv.dma_sync_sg_for_cpu
-# define platform_dma_sync_single_for_device ia64_mv.dma_sync_single_for_device
-# define platform_dma_sync_sg_for_device ia64_mv.dma_sync_sg_for_device
-# define platform_dma_mapping_error ia64_mv.dma_mapping_error
-# define platform_dma_supported ia64_mv.dma_supported
# define platform_dma_get_required_mask ia64_mv.dma_get_required_mask
+# define platform_dma_get_ops ia64_mv.dma_get_ops
# define platform_irq_to_vector ia64_mv.irq_to_vector
# define platform_local_vector_to_irq ia64_mv.local_vector_to_irq
# define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem
@@ -203,19 +173,8 @@ struct ia64_machine_vector {
ia64_mv_global_tlb_purge_t *global_tlb_purge;
ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish;
ia64_mv_dma_init *dma_init;
- ia64_mv_dma_alloc_coherent *dma_alloc_coherent;
- ia64_mv_dma_free_coherent *dma_free_coherent;
- ia64_mv_dma_map_single_attrs *dma_map_single_attrs;
- ia64_mv_dma_unmap_single_attrs *dma_unmap_single_attrs;
- ia64_mv_dma_map_sg_attrs *dma_map_sg_attrs;
- ia64_mv_dma_unmap_sg_attrs *dma_unmap_sg_attrs;
- ia64_mv_dma_sync_single_for_cpu *dma_sync_single_for_cpu;
- ia64_mv_dma_sync_sg_for_cpu *dma_sync_sg_for_cpu;
- ia64_mv_dma_sync_single_for_device *dma_sync_single_for_device;
- ia64_mv_dma_sync_sg_for_device *dma_sync_sg_for_device;
- ia64_mv_dma_mapping_error *dma_mapping_error;
- ia64_mv_dma_supported *dma_supported;
ia64_mv_dma_get_required_mask *dma_get_required_mask;
+ ia64_mv_dma_get_ops *dma_get_ops;
ia64_mv_irq_to_vector *irq_to_vector;
ia64_mv_local_vector_to_irq *local_vector_to_irq;
ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem;
@@ -254,19 +213,8 @@ struct ia64_machine_vector {
platform_global_tlb_purge, \
platform_tlb_migrate_finish, \
platform_dma_init, \
- platform_dma_alloc_coherent, \
- platform_dma_free_coherent, \
- platform_dma_map_single_attrs, \
- platform_dma_unmap_single_attrs, \
- platform_dma_map_sg_attrs, \
- platform_dma_unmap_sg_attrs, \
- platform_dma_sync_single_for_cpu, \
- platform_dma_sync_sg_for_cpu, \
- platform_dma_sync_single_for_device, \
- platform_dma_sync_sg_for_device, \
- platform_dma_mapping_error, \
- platform_dma_supported, \
platform_dma_get_required_mask, \
+ platform_dma_get_ops, \
platform_irq_to_vector, \
platform_local_vector_to_irq, \
platform_pci_get_legacy_mem, \
@@ -302,6 +250,9 @@ extern void machvec_init_from_cmdline(const char *cmdline);
# error Unknown configuration. Update arch/ia64/include/asm/machvec.h.
# endif /* CONFIG_IA64_GENERIC */
+extern void swiotlb_dma_init(void);
+extern struct dma_map_ops *dma_get_ops(struct device *);
+
/*
* Define default versions so we can extend machvec for new platforms without having
* to update the machvec files for all existing platforms.
@@ -332,43 +283,10 @@ extern void machvec_init_from_cmdline(const char *cmdline);
# define platform_kernel_launch_event machvec_noop
#endif
#ifndef platform_dma_init
-# define platform_dma_init swiotlb_init
-#endif
-#ifndef platform_dma_alloc_coherent
-# define platform_dma_alloc_coherent swiotlb_alloc_coherent
-#endif
-#ifndef platform_dma_free_coherent
-# define platform_dma_free_coherent swiotlb_free_coherent
-#endif
-#ifndef platform_dma_map_single_attrs
-# define platform_dma_map_single_attrs swiotlb_map_single_attrs
-#endif
-#ifndef platform_dma_unmap_single_attrs
-# define platform_dma_unmap_single_attrs swiotlb_unmap_single_attrs
-#endif
-#ifndef platform_dma_map_sg_attrs
-# define platform_dma_map_sg_attrs swiotlb_map_sg_attrs
-#endif
-#ifndef platform_dma_unmap_sg_attrs
-# define platform_dma_unmap_sg_attrs swiotlb_unmap_sg_attrs
-#endif
-#ifndef platform_dma_sync_single_for_cpu
-# define platform_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu
-#endif
-#ifndef platform_dma_sync_sg_for_cpu
-# define platform_dma_sync_sg_for_cpu swiotlb_sync_sg_for_cpu
-#endif
-#ifndef platform_dma_sync_single_for_device
-# define platform_dma_sync_single_for_device swiotlb_sync_single_for_device
-#endif
-#ifndef platform_dma_sync_sg_for_device
-# define platform_dma_sync_sg_for_device swiotlb_sync_sg_for_device
-#endif
-#ifndef platform_dma_mapping_error
-# define platform_dma_mapping_error swiotlb_dma_mapping_error
+# define platform_dma_init swiotlb_dma_init
#endif
-#ifndef platform_dma_supported
-# define platform_dma_supported swiotlb_dma_supported
+#ifndef platform_dma_get_ops
+# define platform_dma_get_ops dma_get_ops
#endif
#ifndef platform_dma_get_required_mask
# define platform_dma_get_required_mask ia64_dma_get_required_mask
diff --git a/arch/ia64/include/asm/machvec_dig_vtd.h b/arch/ia64/include/asm/machvec_dig_vtd.h
index 3400b561e71..6ab1de5c45e 100644
--- a/arch/ia64/include/asm/machvec_dig_vtd.h
+++ b/arch/ia64/include/asm/machvec_dig_vtd.h
@@ -2,14 +2,6 @@
#define _ASM_IA64_MACHVEC_DIG_VTD_h
extern ia64_mv_setup_t dig_setup;
-extern ia64_mv_dma_alloc_coherent vtd_alloc_coherent;
-extern ia64_mv_dma_free_coherent vtd_free_coherent;
-extern ia64_mv_dma_map_single_attrs vtd_map_single_attrs;
-extern ia64_mv_dma_unmap_single_attrs vtd_unmap_single_attrs;
-extern ia64_mv_dma_map_sg_attrs vtd_map_sg_attrs;
-extern ia64_mv_dma_unmap_sg_attrs vtd_unmap_sg_attrs;
-extern ia64_mv_dma_supported iommu_dma_supported;
-extern ia64_mv_dma_mapping_error vtd_dma_mapping_error;
extern ia64_mv_dma_init pci_iommu_alloc;
/*
@@ -22,17 +14,5 @@ extern ia64_mv_dma_init pci_iommu_alloc;
#define platform_name "dig_vtd"
#define platform_setup dig_setup
#define platform_dma_init pci_iommu_alloc
-#define platform_dma_alloc_coherent vtd_alloc_coherent
-#define platform_dma_free_coherent vtd_free_coherent
-#define platform_dma_map_single_attrs vtd_map_single_attrs
-#define platform_dma_unmap_single_attrs vtd_unmap_single_attrs
-#define platform_dma_map_sg_attrs vtd_map_sg_attrs
-#define platform_dma_unmap_sg_attrs vtd_unmap_sg_attrs
-#define platform_dma_sync_single_for_cpu machvec_dma_sync_single
-#define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg
-#define platform_dma_sync_single_for_device machvec_dma_sync_single
-#define platform_dma_sync_sg_for_device machvec_dma_sync_sg
-#define platform_dma_supported iommu_dma_supported
-#define platform_dma_mapping_error vtd_dma_mapping_error
#endif /* _ASM_IA64_MACHVEC_DIG_VTD_h */
diff --git a/arch/ia64/include/asm/machvec_hpzx1.h b/arch/ia64/include/asm/machvec_hpzx1.h
index 2f57f5144b9..3bd83d78a41 100644
--- a/arch/ia64/include/asm/machvec_hpzx1.h
+++ b/arch/ia64/include/asm/machvec_hpzx1.h
@@ -2,14 +2,7 @@
#define _ASM_IA64_MACHVEC_HPZX1_h
extern ia64_mv_setup_t dig_setup;
-extern ia64_mv_dma_alloc_coherent sba_alloc_coherent;
-extern ia64_mv_dma_free_coherent sba_free_coherent;
-extern ia64_mv_dma_map_single_attrs sba_map_single_attrs;
-extern ia64_mv_dma_unmap_single_attrs sba_unmap_single_attrs;
-extern ia64_mv_dma_map_sg_attrs sba_map_sg_attrs;
-extern ia64_mv_dma_unmap_sg_attrs sba_unmap_sg_attrs;
-extern ia64_mv_dma_supported sba_dma_supported;
-extern ia64_mv_dma_mapping_error sba_dma_mapping_error;
+extern ia64_mv_dma_init sba_dma_init;
/*
* This stuff has dual use!
@@ -20,18 +13,6 @@ extern ia64_mv_dma_mapping_error sba_dma_mapping_error;
*/
#define platform_name "hpzx1"
#define platform_setup dig_setup
-#define platform_dma_init machvec_noop
-#define platform_dma_alloc_coherent sba_alloc_coherent
-#define platform_dma_free_coherent sba_free_coherent
-#define platform_dma_map_single_attrs sba_map_single_attrs
-#define platform_dma_unmap_single_attrs sba_unmap_single_attrs
-#define platform_dma_map_sg_attrs sba_map_sg_attrs
-#define platform_dma_unmap_sg_attrs sba_unmap_sg_attrs
-#define platform_dma_sync_single_for_cpu machvec_dma_sync_single
-#define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg
-#define platform_dma_sync_single_for_device machvec_dma_sync_single
-#define platform_dma_sync_sg_for_device machvec_dma_sync_sg
-#define platform_dma_supported sba_dma_supported
-#define platform_dma_mapping_error sba_dma_mapping_error
+#define platform_dma_init sba_dma_init
#endif /* _ASM_IA64_MACHVEC_HPZX1_h */
diff --git a/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h b/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h
index a842cdda827..1091ac39740 100644
--- a/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h
+++ b/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h
@@ -2,18 +2,7 @@
#define _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h
extern ia64_mv_setup_t dig_setup;
-extern ia64_mv_dma_alloc_coherent hwsw_alloc_coherent;
-extern ia64_mv_dma_free_coherent hwsw_free_coherent;
-extern ia64_mv_dma_map_single_attrs hwsw_map_single_attrs;
-extern ia64_mv_dma_unmap_single_attrs hwsw_unmap_single_attrs;
-extern ia64_mv_dma_map_sg_attrs hwsw_map_sg_attrs;
-extern ia64_mv_dma_unmap_sg_attrs hwsw_unmap_sg_attrs;
-extern ia64_mv_dma_supported hwsw_dma_supported;
-extern ia64_mv_dma_mapping_error hwsw_dma_mapping_error;
-extern ia64_mv_dma_sync_single_for_cpu hwsw_sync_single_for_cpu;
-extern ia64_mv_dma_sync_sg_for_cpu hwsw_sync_sg_for_cpu;
-extern ia64_mv_dma_sync_single_for_device hwsw_sync_single_for_device;
-extern ia64_mv_dma_sync_sg_for_device hwsw_sync_sg_for_device;
+extern ia64_mv_dma_get_ops hwsw_dma_get_ops;
/*
* This stuff has dual use!
@@ -23,20 +12,8 @@ extern ia64_mv_dma_sync_sg_for_device hwsw_sync_sg_for_device;
* the macros are used directly.
*/
#define platform_name "hpzx1_swiotlb"
-
#define platform_setup dig_setup
#define platform_dma_init machvec_noop
-#define platform_dma_alloc_coherent hwsw_alloc_coherent
-#define platform_dma_free_coherent hwsw_free_coherent
-#define platform_dma_map_single_attrs hwsw_map_single_attrs
-#define platform_dma_unmap_single_attrs hwsw_unmap_single_attrs
-#define platform_dma_map_sg_attrs hwsw_map_sg_attrs
-#define platform_dma_unmap_sg_attrs hwsw_unmap_sg_attrs
-#define platform_dma_supported hwsw_dma_supported
-#define platform_dma_mapping_error hwsw_dma_mapping_error
-#define platform_dma_sync_single_for_cpu hwsw_sync_single_for_cpu
-#define platform_dma_sync_sg_for_cpu hwsw_sync_sg_for_cpu
-#define platform_dma_sync_single_for_device hwsw_sync_single_for_device
-#define platform_dma_sync_sg_for_device hwsw_sync_sg_for_device
+#define platform_dma_get_ops hwsw_dma_get_ops
#endif /* _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h */
diff --git a/arch/ia64/include/asm/machvec_sn2.h b/arch/ia64/include/asm/machvec_sn2.h
index f1a6e0d6dfa..f061a30aac4 100644
--- a/arch/ia64/include/asm/machvec_sn2.h
+++ b/arch/ia64/include/asm/machvec_sn2.h
@@ -55,19 +55,8 @@ extern ia64_mv_readb_t __sn_readb_relaxed;
extern ia64_mv_readw_t __sn_readw_relaxed;
extern ia64_mv_readl_t __sn_readl_relaxed;
extern ia64_mv_readq_t __sn_readq_relaxed;
-extern ia64_mv_dma_alloc_coherent sn_dma_alloc_coherent;
-extern ia64_mv_dma_free_coherent sn_dma_free_coherent;
-extern ia64_mv_dma_map_single_attrs sn_dma_map_single_attrs;
-extern ia64_mv_dma_unmap_single_attrs sn_dma_unmap_single_attrs;
-extern ia64_mv_dma_map_sg_attrs sn_dma_map_sg_attrs;
-extern ia64_mv_dma_unmap_sg_attrs sn_dma_unmap_sg_attrs;
-extern ia64_mv_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu;
-extern ia64_mv_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu;
-extern ia64_mv_dma_sync_single_for_device sn_dma_sync_single_for_device;
-extern ia64_mv_dma_sync_sg_for_device sn_dma_sync_sg_for_device;
-extern ia64_mv_dma_mapping_error sn_dma_mapping_error;
-extern ia64_mv_dma_supported sn_dma_supported;
extern ia64_mv_dma_get_required_mask sn_dma_get_required_mask;
+extern ia64_mv_dma_init sn_dma_init;
extern ia64_mv_migrate_t sn_migrate;
extern ia64_mv_kernel_launch_event_t sn_kernel_launch_event;
extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq;
@@ -111,20 +100,8 @@ extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus;
#define platform_pci_get_legacy_mem sn_pci_get_legacy_mem
#define platform_pci_legacy_read sn_pci_legacy_read
#define platform_pci_legacy_write sn_pci_legacy_write
-#define platform_dma_init machvec_noop
-#define platform_dma_alloc_coherent sn_dma_alloc_coherent
-#define platform_dma_free_coherent sn_dma_free_coherent
-#define platform_dma_map_single_attrs sn_dma_map_single_attrs
-#define platform_dma_unmap_single_attrs sn_dma_unmap_single_attrs
-#define platform_dma_map_sg_attrs sn_dma_map_sg_attrs
-#define platform_dma_unmap_sg_attrs sn_dma_unmap_sg_attrs
-#define platform_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu
-#define platform_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu
-#define platform_dma_sync_single_for_device sn_dma_sync_single_for_device
-#define platform_dma_sync_sg_for_device sn_dma_sync_sg_for_device
-#define platform_dma_mapping_error sn_dma_mapping_error
-#define platform_dma_supported sn_dma_supported
#define platform_dma_get_required_mask sn_dma_get_required_mask
+#define platform_dma_init sn_dma_init
#define platform_migrate sn_migrate
#define platform_kernel_launch_event sn_kernel_launch_event
#ifdef CONFIG_PCI_MSI
diff --git a/arch/ia64/include/asm/mmu_context.h b/arch/ia64/include/asm/mmu_context.h
index 040bc87db93..7f2a456603c 100644
--- a/arch/ia64/include/asm/mmu_context.h
+++ b/arch/ia64/include/asm/mmu_context.h
@@ -87,7 +87,7 @@ get_mmu_context (struct mm_struct *mm)
/* re-check, now that we've got the lock: */
context = mm->context;
if (context == 0) {
- cpus_clear(mm->cpu_vm_mask);
+ cpumask_clear(mm_cpumask(mm));
if (ia64_ctx.next >= ia64_ctx.limit) {
ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap,
ia64_ctx.max_ctx, ia64_ctx.next);
@@ -166,8 +166,8 @@ activate_context (struct mm_struct *mm)
do {
context = get_mmu_context(mm);
- if (!cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
- cpu_set(smp_processor_id(), mm->cpu_vm_mask);
+ if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
+ cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
reload_context(context);
/*
* in the unlikely event of a TLB-flush by another thread,
diff --git a/arch/ia64/include/asm/module.h b/arch/ia64/include/asm/module.h
index d2da61e4c49..908eaef42a0 100644
--- a/arch/ia64/include/asm/module.h
+++ b/arch/ia64/include/asm/module.h
@@ -16,6 +16,12 @@ struct mod_arch_specific {
struct elf64_shdr *got; /* global offset table */
struct elf64_shdr *opd; /* official procedure descriptors */
struct elf64_shdr *unwind; /* unwind-table section */
+#ifdef CONFIG_PARAVIRT
+ struct elf64_shdr *paravirt_bundles;
+ /* paravirt_alt_bundle_patch table */
+ struct elf64_shdr *paravirt_insts;
+ /* paravirt_alt_inst_patch table */
+#endif
unsigned long gp; /* global-pointer for module */
void *core_unw_table; /* core unwind-table cookie returned by unwinder */
diff --git a/arch/ia64/include/asm/msidef.h b/arch/ia64/include/asm/msidef.h
new file mode 100644
index 00000000000..592c1047a0c
--- /dev/null
+++ b/arch/ia64/include/asm/msidef.h
@@ -0,0 +1,42 @@
+#ifndef _IA64_MSI_DEF_H
+#define _IA64_MSI_DEF_H
+
+/*
+ * Shifts for APIC-based data
+ */
+
+#define MSI_DATA_VECTOR_SHIFT 0
+#define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT)
+#define MSI_DATA_VECTOR_MASK 0xffffff00
+
+#define MSI_DATA_DELIVERY_MODE_SHIFT 8
+#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT)
+#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_MODE_SHIFT)
+
+#define MSI_DATA_LEVEL_SHIFT 14
+#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT)
+#define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT)
+
+#define MSI_DATA_TRIGGER_SHIFT 15
+#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT)
+#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT)
+
+/*
+ * Shift/mask fields for APIC-based bus address
+ */
+
+#define MSI_ADDR_DEST_ID_SHIFT 4
+#define MSI_ADDR_HEADER 0xfee00000
+
+#define MSI_ADDR_DEST_ID_MASK 0xfff0000f
+#define MSI_ADDR_DEST_ID_CPU(cpu) ((cpu) << MSI_ADDR_DEST_ID_SHIFT)
+
+#define MSI_ADDR_DEST_MODE_SHIFT 2
+#define MSI_ADDR_DEST_MODE_PHYS (0 << MSI_ADDR_DEST_MODE_SHIFT)
+#define MSI_ADDR_DEST_MODE_LOGIC (1 << MSI_ADDR_DEST_MODE_SHIFT)
+
+#define MSI_ADDR_REDIRECTION_SHIFT 3
+#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT)
+#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT)
+
+#endif/* _IA64_MSI_DEF_H */
diff --git a/arch/ia64/include/asm/native/inst.h b/arch/ia64/include/asm/native/inst.h
index 0a1026cca4f..d2d46efb3e6 100644
--- a/arch/ia64/include/asm/native/inst.h
+++ b/arch/ia64/include/asm/native/inst.h
@@ -30,6 +30,9 @@
#define __paravirt_work_processed_syscall_target \
ia64_work_processed_syscall
+#define paravirt_fsyscall_table ia64_native_fsyscall_table
+#define paravirt_fsys_bubble_down ia64_native_fsys_bubble_down
+
#ifdef CONFIG_PARAVIRT_GUEST_ASM_CLOBBER_CHECK
# define PARAVIRT_POISON 0xdeadbeefbaadf00d
# define CLOBBER(clob) \
@@ -74,6 +77,11 @@
(pred) mov reg = psr \
CLOBBER(clob)
+#define MOV_FROM_ITC(pred, pred_clob, reg, clob) \
+(pred) mov reg = ar.itc \
+ CLOBBER(clob) \
+ CLOBBER_PRED(pred_clob)
+
#define MOV_TO_IFA(reg, clob) \
mov cr.ifa = reg \
CLOBBER(clob)
@@ -158,6 +166,11 @@
#define RSM_PSR_DT \
rsm psr.dt
+#define RSM_PSR_BE_I(clob0, clob1) \
+ rsm psr.be | psr.i \
+ CLOBBER(clob0) \
+ CLOBBER(clob1)
+
#define SSM_PSR_DT_AND_SRLZ_I \
ssm psr.dt \
;; \
diff --git a/arch/ia64/include/asm/native/patchlist.h b/arch/ia64/include/asm/native/patchlist.h
new file mode 100644
index 00000000000..be16ca9311b
--- /dev/null
+++ b/arch/ia64/include/asm/native/patchlist.h
@@ -0,0 +1,38 @@
+/******************************************************************************
+ * arch/ia64/include/asm/native/inst.h
+ *
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ * VA Linux Systems Japan K.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#define __paravirt_start_gate_fsyscall_patchlist \
+ __ia64_native_start_gate_fsyscall_patchlist
+#define __paravirt_end_gate_fsyscall_patchlist \
+ __ia64_native_end_gate_fsyscall_patchlist
+#define __paravirt_start_gate_brl_fsys_bubble_down_patchlist \
+ __ia64_native_start_gate_brl_fsys_bubble_down_patchlist
+#define __paravirt_end_gate_brl_fsys_bubble_down_patchlist \
+ __ia64_native_end_gate_brl_fsys_bubble_down_patchlist
+#define __paravirt_start_gate_vtop_patchlist \
+ __ia64_native_start_gate_vtop_patchlist
+#define __paravirt_end_gate_vtop_patchlist \
+ __ia64_native_end_gate_vtop_patchlist
+#define __paravirt_start_gate_mckinley_e9_patchlist \
+ __ia64_native_start_gate_mckinley_e9_patchlist
+#define __paravirt_end_gate_mckinley_e9_patchlist \
+ __ia64_native_end_gate_mckinley_e9_patchlist
diff --git a/arch/ia64/include/asm/native/pvchk_inst.h b/arch/ia64/include/asm/native/pvchk_inst.h
index b8e6eb1090d..8d72962ec83 100644
--- a/arch/ia64/include/asm/native/pvchk_inst.h
+++ b/arch/ia64/include/asm/native/pvchk_inst.h
@@ -180,6 +180,11 @@
IS_PRED_IN(pred) \
IS_RREG_OUT(reg) \
IS_RREG_CLOB(clob)
+#define MOV_FROM_ITC(pred, pred_clob, reg, clob) \
+ IS_PRED_IN(pred) \
+ IS_PRED_CLOB(pred_clob) \
+ IS_RREG_OUT(reg) \
+ IS_RREG_CLOB(clob)
#define MOV_TO_IFA(reg, clob) \
IS_RREG_IN(reg) \
IS_RREG_CLOB(clob)
@@ -246,6 +251,9 @@
IS_RREG_CLOB(clob2)
#define RSM_PSR_DT \
nop 0
+#define RSM_PSR_BE_I(clob0, clob1) \
+ IS_RREG_CLOB(clob0) \
+ IS_RREG_CLOB(clob1)
#define SSM_PSR_DT_AND_SRLZ_I \
nop 0
#define BSW_0(clob0, clob1, clob2) \
diff --git a/arch/ia64/include/asm/paravirt.h b/arch/ia64/include/asm/paravirt.h
index 2bf3636473f..2eb0a981a09 100644
--- a/arch/ia64/include/asm/paravirt.h
+++ b/arch/ia64/include/asm/paravirt.h
@@ -22,6 +22,56 @@
#ifndef __ASM_PARAVIRT_H
#define __ASM_PARAVIRT_H
+#ifndef __ASSEMBLY__
+/******************************************************************************
+ * fsys related addresses
+ */
+struct pv_fsys_data {
+ unsigned long *fsyscall_table;
+ void *fsys_bubble_down;
+};
+
+extern struct pv_fsys_data pv_fsys_data;
+
+unsigned long *paravirt_get_fsyscall_table(void);
+char *paravirt_get_fsys_bubble_down(void);
+
+/******************************************************************************
+ * patchlist addresses for gate page
+ */
+enum pv_gate_patchlist {
+ PV_GATE_START_FSYSCALL,
+ PV_GATE_END_FSYSCALL,
+
+ PV_GATE_START_BRL_FSYS_BUBBLE_DOWN,
+ PV_GATE_END_BRL_FSYS_BUBBLE_DOWN,
+
+ PV_GATE_START_VTOP,
+ PV_GATE_END_VTOP,
+
+ PV_GATE_START_MCKINLEY_E9,
+ PV_GATE_END_MCKINLEY_E9,
+};
+
+struct pv_patchdata {
+ unsigned long start_fsyscall_patchlist;
+ unsigned long end_fsyscall_patchlist;
+ unsigned long start_brl_fsys_bubble_down_patchlist;
+ unsigned long end_brl_fsys_bubble_down_patchlist;
+ unsigned long start_vtop_patchlist;
+ unsigned long end_vtop_patchlist;
+ unsigned long start_mckinley_e9_patchlist;
+ unsigned long end_mckinley_e9_patchlist;
+
+ void *gate_section;
+};
+
+extern struct pv_patchdata pv_patchdata;
+
+unsigned long paravirt_get_gate_patchlist(enum pv_gate_patchlist type);
+void *paravirt_get_gate_section(void);
+#endif
+
#ifdef CONFIG_PARAVIRT_GUEST
#define PARAVIRT_HYPERVISOR_TYPE_DEFAULT 0
@@ -68,6 +118,14 @@ struct pv_init_ops {
int (*arch_setup_nomca)(void);
void (*post_smp_prepare_boot_cpu)(void);
+
+#ifdef ASM_SUPPORTED
+ unsigned long (*patch_bundle)(void *sbundle, void *ebundle,
+ unsigned long type);
+ unsigned long (*patch_inst)(unsigned long stag, unsigned long etag,
+ unsigned long type);
+#endif
+ void (*patch_branch)(unsigned long tag, unsigned long type);
};
extern struct pv_init_ops pv_init_ops;
@@ -210,6 +268,8 @@ struct pv_time_ops {
int (*do_steal_accounting)(unsigned long *new_itm);
void (*clocksource_resume)(void);
+
+ unsigned long long (*sched_clock)(void);
};
extern struct pv_time_ops pv_time_ops;
@@ -227,6 +287,11 @@ paravirt_do_steal_accounting(unsigned long *new_itm)
return pv_time_ops.do_steal_accounting(new_itm);
}
+static inline unsigned long long paravirt_sched_clock(void)
+{
+ return pv_time_ops.sched_clock();
+}
+
#endif /* !__ASSEMBLY__ */
#else
diff --git a/arch/ia64/include/asm/paravirt_patch.h b/arch/ia64/include/asm/paravirt_patch.h
new file mode 100644
index 00000000000..128ff5db6e6
--- /dev/null
+++ b/arch/ia64/include/asm/paravirt_patch.h
@@ -0,0 +1,143 @@
+/******************************************************************************
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ * VA Linux Systems Japan K.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef __ASM_PARAVIRT_PATCH_H
+#define __ASM_PARAVIRT_PATCH_H
+
+#ifdef __ASSEMBLY__
+
+ .section .paravirt_branches, "a"
+ .previous
+#define PARAVIRT_PATCH_SITE_BR(type) \
+ { \
+ [1:] ; \
+ br.cond.sptk.many 2f ; \
+ nop.b 0 ; \
+ nop.b 0;; ; \
+ } ; \
+ 2: \
+ .xdata8 ".paravirt_branches", 1b, type
+
+#else
+
+#include <linux/stringify.h>
+#include <asm/intrinsics.h>
+
+/* for binary patch */
+struct paravirt_patch_site_bundle {
+ void *sbundle;
+ void *ebundle;
+ unsigned long type;
+};
+
+/* label means the beginning of new bundle */
+#define paravirt_alt_bundle(instr, privop) \
+ "\t998:\n" \
+ "\t" instr "\n" \
+ "\t999:\n" \
+ "\t.pushsection .paravirt_bundles, \"a\"\n" \
+ "\t.popsection\n" \
+ "\t.xdata8 \".paravirt_bundles\", 998b, 999b, " \
+ __stringify(privop) "\n"
+
+
+struct paravirt_patch_bundle_elem {
+ const void *sbundle;
+ const void *ebundle;
+ unsigned long type;
+};
+
+
+struct paravirt_patch_site_inst {
+ unsigned long stag;
+ unsigned long etag;
+ unsigned long type;
+};
+
+#define paravirt_alt_inst(instr, privop) \
+ "\t[998:]\n" \
+ "\t" instr "\n" \
+ "\t[999:]\n" \
+ "\t.pushsection .paravirt_insts, \"a\"\n" \
+ "\t.popsection\n" \
+ "\t.xdata8 \".paravirt_insts\", 998b, 999b, " \
+ __stringify(privop) "\n"
+
+struct paravirt_patch_site_branch {
+ unsigned long tag;
+ unsigned long type;
+};
+
+struct paravirt_patch_branch_target {
+ const void *entry;
+ unsigned long type;
+};
+
+void
+__paravirt_patch_apply_branch(
+ unsigned long tag, unsigned long type,
+ const struct paravirt_patch_branch_target *entries,
+ unsigned int nr_entries);
+
+void
+paravirt_patch_reloc_br(unsigned long tag, const void *target);
+
+void
+paravirt_patch_reloc_brl(unsigned long tag, const void *target);
+
+
+#if defined(ASM_SUPPORTED) && defined(CONFIG_PARAVIRT)
+unsigned long
+ia64_native_patch_bundle(void *sbundle, void *ebundle, unsigned long type);
+
+unsigned long
+__paravirt_patch_apply_bundle(void *sbundle, void *ebundle, unsigned long type,
+ const struct paravirt_patch_bundle_elem *elems,
+ unsigned long nelems,
+ const struct paravirt_patch_bundle_elem **found);
+
+void
+paravirt_patch_apply_bundle(const struct paravirt_patch_site_bundle *start,
+ const struct paravirt_patch_site_bundle *end);
+
+void
+paravirt_patch_apply_inst(const struct paravirt_patch_site_inst *start,
+ const struct paravirt_patch_site_inst *end);
+
+void paravirt_patch_apply(void);
+#else
+#define paravirt_patch_apply_bundle(start, end) do { } while (0)
+#define paravirt_patch_apply_inst(start, end) do { } while (0)
+#define paravirt_patch_apply() do { } while (0)
+#endif
+
+#endif /* !__ASSEMBLEY__ */
+
+#endif /* __ASM_PARAVIRT_PATCH_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "linux"
+ * c-basic-offset: 8
+ * tab-width: 8
+ * indent-tabs-mode: t
+ * End:
+ */
diff --git a/arch/ia64/include/asm/paravirt_privop.h b/arch/ia64/include/asm/paravirt_privop.h
index 33c8e55f577..3d2951130b5 100644
--- a/arch/ia64/include/asm/paravirt_privop.h
+++ b/arch/ia64/include/asm/paravirt_privop.h
@@ -33,7 +33,7 @@
*/
struct pv_cpu_ops {
- void (*fc)(unsigned long addr);
+ void (*fc)(void *addr);
unsigned long (*thash)(unsigned long addr);
unsigned long (*get_cpuid)(int index);
unsigned long (*get_pmd)(int index);
@@ -60,12 +60,18 @@ extern unsigned long ia64_native_getreg_func(int regnum);
/* Instructions paravirtualized for performance */
/************************************************/
+#ifndef ASM_SUPPORTED
+#define paravirt_ssm_i() pv_cpu_ops.ssm_i()
+#define paravirt_rsm_i() pv_cpu_ops.rsm_i()
+#define __paravirt_getreg() pv_cpu_ops.getreg()
+#endif
+
/* mask for ia64_native_ssm/rsm() must be constant.("i" constraing).
* static inline function doesn't satisfy it. */
#define paravirt_ssm(mask) \
do { \
if ((mask) == IA64_PSR_I) \
- pv_cpu_ops.ssm_i(); \
+ paravirt_ssm_i(); \
else \
ia64_native_ssm(mask); \
} while (0)
@@ -73,7 +79,7 @@ extern unsigned long ia64_native_getreg_func(int regnum);
#define paravirt_rsm(mask) \
do { \
if ((mask) == IA64_PSR_I) \
- pv_cpu_ops.rsm_i(); \
+ paravirt_rsm_i(); \
else \
ia64_native_rsm(mask); \
} while (0)
@@ -86,7 +92,7 @@ extern unsigned long ia64_native_getreg_func(int regnum);
if ((reg) == _IA64_REG_IP) \
res = ia64_native_getreg(_IA64_REG_IP); \
else \
- res = pv_cpu_ops.getreg(reg); \
+ res = __paravirt_getreg(reg); \
res; \
})
@@ -112,6 +118,12 @@ void paravirt_cpu_asm_init(const struct pv_cpu_asm_switch *cpu_asm_switch);
#endif /* CONFIG_PARAVIRT */
+#if defined(CONFIG_PARAVIRT) && defined(ASM_SUPPORTED)
+#define paravirt_dv_serialize_data() ia64_dv_serialize_data()
+#else
+#define paravirt_dv_serialize_data() /* nothing */
+#endif
+
/* these routines utilize privilege-sensitive or performance-sensitive
* privileged instructions so the code must be replaced with
* paravirtualized versions */
@@ -121,4 +133,349 @@ void paravirt_cpu_asm_init(const struct pv_cpu_asm_switch *cpu_asm_switch);
IA64_PARAVIRT_ASM_FUNC(work_processed_syscall)
#define ia64_leave_kernel IA64_PARAVIRT_ASM_FUNC(leave_kernel)
+
+#if defined(CONFIG_PARAVIRT)
+/******************************************************************************
+ * binary patching infrastructure
+ */
+#define PARAVIRT_PATCH_TYPE_FC 1
+#define PARAVIRT_PATCH_TYPE_THASH 2
+#define PARAVIRT_PATCH_TYPE_GET_CPUID 3
+#define PARAVIRT_PATCH_TYPE_GET_PMD 4
+#define PARAVIRT_PATCH_TYPE_PTCGA 5
+#define PARAVIRT_PATCH_TYPE_GET_RR 6
+#define PARAVIRT_PATCH_TYPE_SET_RR 7
+#define PARAVIRT_PATCH_TYPE_SET_RR0_TO_RR4 8
+#define PARAVIRT_PATCH_TYPE_SSM_I 9
+#define PARAVIRT_PATCH_TYPE_RSM_I 10
+#define PARAVIRT_PATCH_TYPE_GET_PSR_I 11
+#define PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE 12
+
+/* PARAVIRT_PATY_TYPE_[GS]ETREG + _IA64_REG_xxx */
+#define PARAVIRT_PATCH_TYPE_GETREG 0x10000000
+#define PARAVIRT_PATCH_TYPE_SETREG 0x20000000
+
+/*
+ * struct task_struct* (*ia64_switch_to)(void* next_task);
+ * void *ia64_leave_syscall;
+ * void *ia64_work_processed_syscall
+ * void *ia64_leave_kernel;
+ */
+
+#define PARAVIRT_PATCH_TYPE_BR_START 0x30000000
+#define PARAVIRT_PATCH_TYPE_BR_SWITCH_TO \
+ (PARAVIRT_PATCH_TYPE_BR_START + 0)
+#define PARAVIRT_PATCH_TYPE_BR_LEAVE_SYSCALL \
+ (PARAVIRT_PATCH_TYPE_BR_START + 1)
+#define PARAVIRT_PATCH_TYPE_BR_WORK_PROCESSED_SYSCALL \
+ (PARAVIRT_PATCH_TYPE_BR_START + 2)
+#define PARAVIRT_PATCH_TYPE_BR_LEAVE_KERNEL \
+ (PARAVIRT_PATCH_TYPE_BR_START + 3)
+
+#ifdef ASM_SUPPORTED
+#include <asm/paravirt_patch.h>
+
+/*
+ * pv_cpu_ops calling stub.
+ * normal function call convension can't be written by gcc
+ * inline assembly.
+ *
+ * from the caller's point of view,
+ * the following registers will be clobbered.
+ * r2, r3
+ * r8-r15
+ * r16, r17
+ * b6, b7
+ * p6-p15
+ * ar.ccv
+ *
+ * from the callee's point of view ,
+ * the following registers can be used.
+ * r2, r3: scratch
+ * r8: scratch, input argument0 and return value
+ * r0-r15: scratch, input argument1-5
+ * b6: return pointer
+ * b7: scratch
+ * p6-p15: scratch
+ * ar.ccv: scratch
+ *
+ * other registers must not be changed. especially
+ * b0: rp: preserved. gcc ignores b0 in clobbered register.
+ * r16: saved gp
+ */
+/* 5 bundles */
+#define __PARAVIRT_BR \
+ ";;\n" \
+ "{ .mlx\n" \
+ "nop 0\n" \
+ "movl r2 = %[op_addr]\n"/* get function pointer address */ \
+ ";;\n" \
+ "}\n" \
+ "1:\n" \
+ "{ .mii\n" \
+ "ld8 r2 = [r2]\n" /* load function descriptor address */ \
+ "mov r17 = ip\n" /* get ip to calc return address */ \
+ "mov r16 = gp\n" /* save gp */ \
+ ";;\n" \
+ "}\n" \
+ "{ .mii\n" \
+ "ld8 r3 = [r2], 8\n" /* load entry address */ \
+ "adds r17 = 1f - 1b, r17\n" /* calculate return address */ \
+ ";;\n" \
+ "mov b7 = r3\n" /* set entry address */ \
+ "}\n" \
+ "{ .mib\n" \
+ "ld8 gp = [r2]\n" /* load gp value */ \
+ "mov b6 = r17\n" /* set return address */ \
+ "br.cond.sptk.few b7\n" /* intrinsics are very short isns */ \
+ "}\n" \
+ "1:\n" \
+ "{ .mii\n" \
+ "mov gp = r16\n" /* restore gp value */ \
+ "nop 0\n" \
+ "nop 0\n" \
+ ";;\n" \
+ "}\n"
+
+#define PARAVIRT_OP(op) \
+ [op_addr] "i"(&pv_cpu_ops.op)
+
+#define PARAVIRT_TYPE(type) \
+ PARAVIRT_PATCH_TYPE_ ## type
+
+#define PARAVIRT_REG_CLOBBERS0 \
+ "r2", "r3", /*"r8",*/ "r9", "r10", "r11", "r14", \
+ "r15", "r16", "r17"
+
+#define PARAVIRT_REG_CLOBBERS1 \
+ "r2","r3", /*"r8",*/ "r9", "r10", "r11", "r14", \
+ "r15", "r16", "r17"
+
+#define PARAVIRT_REG_CLOBBERS2 \
+ "r2", "r3", /*"r8", "r9",*/ "r10", "r11", "r14", \
+ "r15", "r16", "r17"
+
+#define PARAVIRT_REG_CLOBBERS5 \
+ "r2", "r3", /*"r8", "r9", "r10", "r11", "r14",*/ \
+ "r15", "r16", "r17"
+
+#define PARAVIRT_BR_CLOBBERS \
+ "b6", "b7"
+
+#define PARAVIRT_PR_CLOBBERS \
+ "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15"
+
+#define PARAVIRT_AR_CLOBBERS \
+ "ar.ccv"
+
+#define PARAVIRT_CLOBBERS0 \
+ PARAVIRT_REG_CLOBBERS0, \
+ PARAVIRT_BR_CLOBBERS, \
+ PARAVIRT_PR_CLOBBERS, \
+ PARAVIRT_AR_CLOBBERS, \
+ "memory"
+
+#define PARAVIRT_CLOBBERS1 \
+ PARAVIRT_REG_CLOBBERS1, \
+ PARAVIRT_BR_CLOBBERS, \
+ PARAVIRT_PR_CLOBBERS, \
+ PARAVIRT_AR_CLOBBERS, \
+ "memory"
+
+#define PARAVIRT_CLOBBERS2 \
+ PARAVIRT_REG_CLOBBERS2, \
+ PARAVIRT_BR_CLOBBERS, \
+ PARAVIRT_PR_CLOBBERS, \
+ PARAVIRT_AR_CLOBBERS, \
+ "memory"
+
+#define PARAVIRT_CLOBBERS5 \
+ PARAVIRT_REG_CLOBBERS5, \
+ PARAVIRT_BR_CLOBBERS, \
+ PARAVIRT_PR_CLOBBERS, \
+ PARAVIRT_AR_CLOBBERS, \
+ "memory"
+
+#define PARAVIRT_BR0(op, type) \
+ register unsigned long ia64_clobber asm ("r8"); \
+ asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
+ PARAVIRT_TYPE(type)) \
+ : "=r"(ia64_clobber) \
+ : PARAVIRT_OP(op) \
+ : PARAVIRT_CLOBBERS0)
+
+#define PARAVIRT_BR0_RET(op, type) \
+ register unsigned long ia64_intri_res asm ("r8"); \
+ asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
+ PARAVIRT_TYPE(type)) \
+ : "=r"(ia64_intri_res) \
+ : PARAVIRT_OP(op) \
+ : PARAVIRT_CLOBBERS0)
+
+#define PARAVIRT_BR1(op, type, arg1) \
+ register unsigned long __##arg1 asm ("r8") = arg1; \
+ register unsigned long ia64_clobber asm ("r8"); \
+ asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
+ PARAVIRT_TYPE(type)) \
+ : "=r"(ia64_clobber) \
+ : PARAVIRT_OP(op), "0"(__##arg1) \
+ : PARAVIRT_CLOBBERS1)
+
+#define PARAVIRT_BR1_RET(op, type, arg1) \
+ register unsigned long ia64_intri_res asm ("r8"); \
+ register unsigned long __##arg1 asm ("r8") = arg1; \
+ asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
+ PARAVIRT_TYPE(type)) \
+ : "=r"(ia64_intri_res) \
+ : PARAVIRT_OP(op), "0"(__##arg1) \
+ : PARAVIRT_CLOBBERS1)
+
+#define PARAVIRT_BR1_VOID(op, type, arg1) \
+ register void *__##arg1 asm ("r8") = arg1; \
+ register unsigned long ia64_clobber asm ("r8"); \
+ asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
+ PARAVIRT_TYPE(type)) \
+ : "=r"(ia64_clobber) \
+ : PARAVIRT_OP(op), "0"(__##arg1) \
+ : PARAVIRT_CLOBBERS1)
+
+#define PARAVIRT_BR2(op, type, arg1, arg2) \
+ register unsigned long __##arg1 asm ("r8") = arg1; \
+ register unsigned long __##arg2 asm ("r9") = arg2; \
+ register unsigned long ia64_clobber1 asm ("r8"); \
+ register unsigned long ia64_clobber2 asm ("r9"); \
+ asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
+ PARAVIRT_TYPE(type)) \
+ : "=r"(ia64_clobber1), "=r"(ia64_clobber2) \
+ : PARAVIRT_OP(op), "0"(__##arg1), "1"(__##arg2) \
+ : PARAVIRT_CLOBBERS2)
+
+
+#define PARAVIRT_DEFINE_CPU_OP0(op, type) \
+ static inline void \
+ paravirt_ ## op (void) \
+ { \
+ PARAVIRT_BR0(op, type); \
+ }
+
+#define PARAVIRT_DEFINE_CPU_OP0_RET(op, type) \
+ static inline unsigned long \
+ paravirt_ ## op (void) \
+ { \
+ PARAVIRT_BR0_RET(op, type); \
+ return ia64_intri_res; \
+ }
+
+#define PARAVIRT_DEFINE_CPU_OP1_VOID(op, type) \
+ static inline void \
+ paravirt_ ## op (void *arg1) \
+ { \
+ PARAVIRT_BR1_VOID(op, type, arg1); \
+ }
+
+#define PARAVIRT_DEFINE_CPU_OP1(op, type) \
+ static inline void \
+ paravirt_ ## op (unsigned long arg1) \
+ { \
+ PARAVIRT_BR1(op, type, arg1); \
+ }
+
+#define PARAVIRT_DEFINE_CPU_OP1_RET(op, type) \
+ static inline unsigned long \
+ paravirt_ ## op (unsigned long arg1) \
+ { \
+ PARAVIRT_BR1_RET(op, type, arg1); \
+ return ia64_intri_res; \
+ }
+
+#define PARAVIRT_DEFINE_CPU_OP2(op, type) \
+ static inline void \
+ paravirt_ ## op (unsigned long arg1, \
+ unsigned long arg2) \
+ { \
+ PARAVIRT_BR2(op, type, arg1, arg2); \
+ }
+
+
+PARAVIRT_DEFINE_CPU_OP1_VOID(fc, FC);
+PARAVIRT_DEFINE_CPU_OP1_RET(thash, THASH)
+PARAVIRT_DEFINE_CPU_OP1_RET(get_cpuid, GET_CPUID)
+PARAVIRT_DEFINE_CPU_OP1_RET(get_pmd, GET_PMD)
+PARAVIRT_DEFINE_CPU_OP2(ptcga, PTCGA)
+PARAVIRT_DEFINE_CPU_OP1_RET(get_rr, GET_RR)
+PARAVIRT_DEFINE_CPU_OP2(set_rr, SET_RR)
+PARAVIRT_DEFINE_CPU_OP0(ssm_i, SSM_I)
+PARAVIRT_DEFINE_CPU_OP0(rsm_i, RSM_I)
+PARAVIRT_DEFINE_CPU_OP0_RET(get_psr_i, GET_PSR_I)
+PARAVIRT_DEFINE_CPU_OP1(intrin_local_irq_restore, INTRIN_LOCAL_IRQ_RESTORE)
+
+static inline void
+paravirt_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
+ unsigned long val2, unsigned long val3,
+ unsigned long val4)
+{
+ register unsigned long __val0 asm ("r8") = val0;
+ register unsigned long __val1 asm ("r9") = val1;
+ register unsigned long __val2 asm ("r10") = val2;
+ register unsigned long __val3 asm ("r11") = val3;
+ register unsigned long __val4 asm ("r14") = val4;
+
+ register unsigned long ia64_clobber0 asm ("r8");
+ register unsigned long ia64_clobber1 asm ("r9");
+ register unsigned long ia64_clobber2 asm ("r10");
+ register unsigned long ia64_clobber3 asm ("r11");
+ register unsigned long ia64_clobber4 asm ("r14");
+
+ asm volatile (paravirt_alt_bundle(__PARAVIRT_BR,
+ PARAVIRT_TYPE(SET_RR0_TO_RR4))
+ : "=r"(ia64_clobber0),
+ "=r"(ia64_clobber1),
+ "=r"(ia64_clobber2),
+ "=r"(ia64_clobber3),
+ "=r"(ia64_clobber4)
+ : PARAVIRT_OP(set_rr0_to_rr4),
+ "0"(__val0), "1"(__val1), "2"(__val2),
+ "3"(__val3), "4"(__val4)
+ : PARAVIRT_CLOBBERS5);
+}
+
+/* unsigned long paravirt_getreg(int reg) */
+#define __paravirt_getreg(reg) \
+ ({ \
+ register unsigned long ia64_intri_res asm ("r8"); \
+ register unsigned long __reg asm ("r8") = (reg); \
+ \
+ BUILD_BUG_ON(!__builtin_constant_p(reg)); \
+ asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
+ PARAVIRT_TYPE(GETREG) \
+ + (reg)) \
+ : "=r"(ia64_intri_res) \
+ : PARAVIRT_OP(getreg), "0"(__reg) \
+ : PARAVIRT_CLOBBERS1); \
+ \
+ ia64_intri_res; \
+ })
+
+/* void paravirt_setreg(int reg, unsigned long val) */
+#define paravirt_setreg(reg, val) \
+ do { \
+ register unsigned long __val asm ("r8") = val; \
+ register unsigned long __reg asm ("r9") = reg; \
+ register unsigned long ia64_clobber1 asm ("r8"); \
+ register unsigned long ia64_clobber2 asm ("r9"); \
+ \
+ BUILD_BUG_ON(!__builtin_constant_p(reg)); \
+ asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
+ PARAVIRT_TYPE(SETREG) \
+ + (reg)) \
+ : "=r"(ia64_clobber1), \
+ "=r"(ia64_clobber2) \
+ : PARAVIRT_OP(setreg), \
+ "1"(__reg), "0"(__val) \
+ : PARAVIRT_CLOBBERS2); \
+ } while (0)
+
+#endif /* ASM_SUPPORTED */
+#endif /* CONFIG_PARAVIRT && ASM_SUPPOTED */
+
#endif /* _ASM_IA64_PARAVIRT_PRIVOP_H */
diff --git a/arch/ia64/include/asm/percpu.h b/arch/ia64/include/asm/percpu.h
index 77f30b664b4..30cf46534dd 100644
--- a/arch/ia64/include/asm/percpu.h
+++ b/arch/ia64/include/asm/percpu.h
@@ -27,12 +27,12 @@ extern void *per_cpu_init(void);
#else /* ! SMP */
-#define PER_CPU_ATTRIBUTES __attribute__((__section__(".data.percpu")))
-
#define per_cpu_init() (__phys_per_cpu_start)
#endif /* SMP */
+#define PER_CPU_BASE_SECTION ".data.percpu"
+
/*
* Be extremely careful when taking the address of this variable! Due to virtual
* remapping, it is different from the canonical address returned by __get_cpu_var(var)!
diff --git a/arch/ia64/include/asm/smp.h b/arch/ia64/include/asm/smp.h
index 21c402365d0..59840833625 100644
--- a/arch/ia64/include/asm/smp.h
+++ b/arch/ia64/include/asm/smp.h
@@ -126,7 +126,8 @@ extern void identify_siblings (struct cpuinfo_ia64 *);
extern int is_multithreading_enabled(void);
extern void arch_send_call_function_single_ipi(int cpu);
-extern void arch_send_call_function_ipi(cpumask_t mask);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
#else /* CONFIG_SMP */
diff --git a/arch/ia64/include/asm/socket.h b/arch/ia64/include/asm/socket.h
index d5ef0aa3e31..745421225ec 100644
--- a/arch/ia64/include/asm/socket.h
+++ b/arch/ia64/include/asm/socket.h
@@ -63,4 +63,7 @@
#define SO_MARK 36
+#define SO_TIMESTAMPING 37
+#define SCM_TIMESTAMPING SO_TIMESTAMPING
+
#endif /* _ASM_IA64_SOCKET_H */
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
index 0229fb95fb3..13ab71576bc 100644
--- a/arch/ia64/include/asm/spinlock.h
+++ b/arch/ia64/include/asm/spinlock.h
@@ -120,6 +120,38 @@ do { \
#define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
#define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0)
+#ifdef ASM_SUPPORTED
+
+static __always_inline void
+__raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags)
+{
+ __asm__ __volatile__ (
+ "tbit.nz p6, p0 = %1,%2\n"
+ "br.few 3f\n"
+ "1:\n"
+ "fetchadd4.rel r2 = [%0], -1;;\n"
+ "(p6) ssm psr.i\n"
+ "2:\n"
+ "hint @pause\n"
+ "ld4 r2 = [%0];;\n"
+ "cmp4.lt p7,p0 = r2, r0\n"
+ "(p7) br.cond.spnt.few 2b\n"
+ "(p6) rsm psr.i\n"
+ ";;\n"
+ "3:\n"
+ "fetchadd4.acq r2 = [%0], 1;;\n"
+ "cmp4.lt p7,p0 = r2, r0\n"
+ "(p7) br.cond.spnt.few 1b\n"
+ : : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT)
+ : "p6", "p7", "r2", "memory");
+}
+
+#define __raw_read_lock(lock) __raw_read_lock_flags(lock, 0)
+
+#else /* !ASM_SUPPORTED */
+
+#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw)
+
#define __raw_read_lock(rw) \
do { \
raw_rwlock_t *__read_lock_ptr = (rw); \
@@ -131,6 +163,8 @@ do { \
} \
} while (0)
+#endif /* !ASM_SUPPORTED */
+
#define __raw_read_unlock(rw) \
do { \
raw_rwlock_t *__read_lock_ptr = (rw); \
@@ -138,20 +172,33 @@ do { \
} while (0)
#ifdef ASM_SUPPORTED
-#define __raw_write_lock(rw) \
-do { \
- __asm__ __volatile__ ( \
- "mov ar.ccv = r0\n" \
- "dep r29 = -1, r0, 31, 1;;\n" \
- "1:\n" \
- "ld4 r2 = [%0];;\n" \
- "cmp4.eq p0,p7 = r0,r2\n" \
- "(p7) br.cond.spnt.few 1b \n" \
- "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n" \
- "cmp4.eq p0,p7 = r0, r2\n" \
- "(p7) br.cond.spnt.few 1b;;\n" \
- :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \
-} while(0)
+
+static __always_inline void
+__raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags)
+{
+ __asm__ __volatile__ (
+ "tbit.nz p6, p0 = %1, %2\n"
+ "mov ar.ccv = r0\n"
+ "dep r29 = -1, r0, 31, 1\n"
+ "br.few 3f;;\n"
+ "1:\n"
+ "(p6) ssm psr.i\n"
+ "2:\n"
+ "hint @pause\n"
+ "ld4 r2 = [%0];;\n"
+ "cmp4.eq p0,p7 = r0, r2\n"
+ "(p7) br.cond.spnt.few 2b\n"
+ "(p6) rsm psr.i\n"
+ ";;\n"
+ "3:\n"
+ "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n"
+ "cmp4.eq p0,p7 = r0, r2\n"
+ "(p7) br.cond.spnt.few 1b;;\n"
+ : : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT)
+ : "ar.ccv", "p6", "p7", "r2", "r29", "memory");
+}
+
+#define __raw_write_lock(rw) __raw_write_lock_flags(rw, 0)
#define __raw_write_trylock(rw) \
({ \
@@ -174,6 +221,8 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
#else /* !ASM_SUPPORTED */
+#define __raw_write_lock_flags(l, flags) __raw_write_lock(l)
+
#define __raw_write_lock(l) \
({ \
__u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
diff --git a/arch/ia64/include/asm/swab.h b/arch/ia64/include/asm/swab.h
index 6aa58b699ee..c89a8cb5d8a 100644
--- a/arch/ia64/include/asm/swab.h
+++ b/arch/ia64/include/asm/swab.h
@@ -6,7 +6,7 @@
* David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co.
*/
-#include <asm/types.h>
+#include <linux/types.h>
#include <asm/intrinsics.h>
#include <linux/compiler.h>
diff --git a/arch/ia64/include/asm/timex.h b/arch/ia64/include/asm/timex.h
index 4e03cfe74a0..86c7db86118 100644
--- a/arch/ia64/include/asm/timex.h
+++ b/arch/ia64/include/asm/timex.h
@@ -40,5 +40,6 @@ get_cycles (void)
}
extern void ia64_cpu_local_tick (void);
+extern unsigned long long ia64_native_sched_clock (void);
#endif /* _ASM_IA64_TIMEX_H */
diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h
index 32f3af1641c..7b4c8c70b2d 100644
--- a/arch/ia64/include/asm/topology.h
+++ b/arch/ia64/include/asm/topology.h
@@ -44,11 +44,6 @@
#define parent_node(nid) (nid)
/*
- * Returns the number of the first CPU on Node 'node'.
- */
-#define node_to_first_cpu(node) (cpumask_first(cpumask_of_node(node)))
-
-/*
* Determines the node for a given pci bus
*/
#define pcibus_to_node(bus) PCI_CONTROLLER(bus)->node
@@ -84,7 +79,7 @@ void build_cpu_to_node_map(void);
.child = NULL, \
.groups = NULL, \
.min_interval = 8, \
- .max_interval = 8*(min(num_online_cpus(), 32)), \
+ .max_interval = 8*(min(num_online_cpus(), 32U)), \
.busy_factor = 64, \
.imbalance_pct = 125, \
.cache_nice_tries = 2, \
@@ -117,11 +112,6 @@ void build_cpu_to_node_map(void);
extern void arch_fix_phys_package_id(int num, u32 slot);
-#define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \
- CPU_MASK_ALL : \
- node_to_cpumask(pcibus_to_node(bus)) \
- )
-
#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
cpu_all_mask : \
cpumask_of_node(pcibus_to_node(bus)))
diff --git a/arch/ia64/include/asm/uv/uv.h b/arch/ia64/include/asm/uv/uv.h
new file mode 100644
index 00000000000..61b5bdfd980
--- /dev/null
+++ b/arch/ia64/include/asm/uv/uv.h
@@ -0,0 +1,13 @@
+#ifndef _ASM_IA64_UV_UV_H
+#define _ASM_IA64_UV_UV_H
+
+#include <asm/system.h>
+#include <asm/sn/simulator.h>
+
+static inline int is_uv_system(void)
+{
+ /* temporary support for running on hardware simulator */
+ return IS_MEDUSA() || ia64_platform_is("uv");
+}
+
+#endif /* _ASM_IA64_UV_UV_H */
diff --git a/arch/ia64/include/asm/uv/uv_hub.h b/arch/ia64/include/asm/uv/uv_hub.h
index f607018af4a..53e9dfacd07 100644
--- a/arch/ia64/include/asm/uv/uv_hub.h
+++ b/arch/ia64/include/asm/uv/uv_hub.h
@@ -305,5 +305,11 @@ static inline int uv_num_possible_blades(void)
return 1;
}
+static inline void uv_hub_send_ipi(int pnode, int apicid, int vector)
+{
+ /* not currently needed on ia64 */
+}
+
+
#endif /* __ASM_IA64_UV_HUB__ */
diff --git a/arch/ia64/include/asm/uv/uv_mmrs.h b/arch/ia64/include/asm/uv/uv_mmrs.h
index c149ef08543..fe0b8f05e1a 100644
--- a/arch/ia64/include/asm/uv/uv_mmrs.h
+++ b/arch/ia64/include/asm/uv/uv_mmrs.h
@@ -8,8 +8,8 @@
* Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved.
*/
-#ifndef __ASM_IA64_UV_MMRS__
-#define __ASM_IA64_UV_MMRS__
+#ifndef _ASM_IA64_UV_UV_MMRS_H
+#define _ASM_IA64_UV_UV_MMRS_H
#define UV_MMR_ENABLE (1UL << 63)
@@ -243,6 +243,158 @@ union uvh_event_occurred0_u {
#define UVH_EVENT_OCCURRED0_ALIAS_32 0x005f0
/* ========================================================================= */
+/* UVH_GR0_TLB_INT0_CONFIG */
+/* ========================================================================= */
+#define UVH_GR0_TLB_INT0_CONFIG 0x61b00UL
+
+#define UVH_GR0_TLB_INT0_CONFIG_VECTOR_SHFT 0
+#define UVH_GR0_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_GR0_TLB_INT0_CONFIG_DM_SHFT 8
+#define UVH_GR0_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_GR0_TLB_INT0_CONFIG_DESTMODE_SHFT 11
+#define UVH_GR0_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_GR0_TLB_INT0_CONFIG_STATUS_SHFT 12
+#define UVH_GR0_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_GR0_TLB_INT0_CONFIG_P_SHFT 13
+#define UVH_GR0_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_GR0_TLB_INT0_CONFIG_T_SHFT 15
+#define UVH_GR0_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_GR0_TLB_INT0_CONFIG_M_SHFT 16
+#define UVH_GR0_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_GR0_TLB_INT0_CONFIG_APIC_ID_SHFT 32
+#define UVH_GR0_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+
+union uvh_gr0_tlb_int0_config_u {
+ unsigned long v;
+ struct uvh_gr0_tlb_int0_config_s {
+ unsigned long vector_ : 8; /* RW */
+ unsigned long dm : 3; /* RW */
+ unsigned long destmode : 1; /* RW */
+ unsigned long status : 1; /* RO */
+ unsigned long p : 1; /* RO */
+ unsigned long rsvd_14 : 1; /* */
+ unsigned long t : 1; /* RO */
+ unsigned long m : 1; /* RW */
+ unsigned long rsvd_17_31: 15; /* */
+ unsigned long apic_id : 32; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_GR0_TLB_INT1_CONFIG */
+/* ========================================================================= */
+#define UVH_GR0_TLB_INT1_CONFIG 0x61b40UL
+
+#define UVH_GR0_TLB_INT1_CONFIG_VECTOR_SHFT 0
+#define UVH_GR0_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_GR0_TLB_INT1_CONFIG_DM_SHFT 8
+#define UVH_GR0_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_GR0_TLB_INT1_CONFIG_DESTMODE_SHFT 11
+#define UVH_GR0_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_GR0_TLB_INT1_CONFIG_STATUS_SHFT 12
+#define UVH_GR0_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_GR0_TLB_INT1_CONFIG_P_SHFT 13
+#define UVH_GR0_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_GR0_TLB_INT1_CONFIG_T_SHFT 15
+#define UVH_GR0_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_GR0_TLB_INT1_CONFIG_M_SHFT 16
+#define UVH_GR0_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_GR0_TLB_INT1_CONFIG_APIC_ID_SHFT 32
+#define UVH_GR0_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+
+union uvh_gr0_tlb_int1_config_u {
+ unsigned long v;
+ struct uvh_gr0_tlb_int1_config_s {
+ unsigned long vector_ : 8; /* RW */
+ unsigned long dm : 3; /* RW */
+ unsigned long destmode : 1; /* RW */
+ unsigned long status : 1; /* RO */
+ unsigned long p : 1; /* RO */
+ unsigned long rsvd_14 : 1; /* */
+ unsigned long t : 1; /* RO */
+ unsigned long m : 1; /* RW */
+ unsigned long rsvd_17_31: 15; /* */
+ unsigned long apic_id : 32; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_GR1_TLB_INT0_CONFIG */
+/* ========================================================================= */
+#define UVH_GR1_TLB_INT0_CONFIG 0x61f00UL
+
+#define UVH_GR1_TLB_INT0_CONFIG_VECTOR_SHFT 0
+#define UVH_GR1_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_GR1_TLB_INT0_CONFIG_DM_SHFT 8
+#define UVH_GR1_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_GR1_TLB_INT0_CONFIG_DESTMODE_SHFT 11
+#define UVH_GR1_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_GR1_TLB_INT0_CONFIG_STATUS_SHFT 12
+#define UVH_GR1_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_GR1_TLB_INT0_CONFIG_P_SHFT 13
+#define UVH_GR1_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_GR1_TLB_INT0_CONFIG_T_SHFT 15
+#define UVH_GR1_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_GR1_TLB_INT0_CONFIG_M_SHFT 16
+#define UVH_GR1_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_GR1_TLB_INT0_CONFIG_APIC_ID_SHFT 32
+#define UVH_GR1_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+
+union uvh_gr1_tlb_int0_config_u {
+ unsigned long v;
+ struct uvh_gr1_tlb_int0_config_s {
+ unsigned long vector_ : 8; /* RW */
+ unsigned long dm : 3; /* RW */
+ unsigned long destmode : 1; /* RW */
+ unsigned long status : 1; /* RO */
+ unsigned long p : 1; /* RO */
+ unsigned long rsvd_14 : 1; /* */
+ unsigned long t : 1; /* RO */
+ unsigned long m : 1; /* RW */
+ unsigned long rsvd_17_31: 15; /* */
+ unsigned long apic_id : 32; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_GR1_TLB_INT1_CONFIG */
+/* ========================================================================= */
+#define UVH_GR1_TLB_INT1_CONFIG 0x61f40UL
+
+#define UVH_GR1_TLB_INT1_CONFIG_VECTOR_SHFT 0
+#define UVH_GR1_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_GR1_TLB_INT1_CONFIG_DM_SHFT 8
+#define UVH_GR1_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_GR1_TLB_INT1_CONFIG_DESTMODE_SHFT 11
+#define UVH_GR1_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_GR1_TLB_INT1_CONFIG_STATUS_SHFT 12
+#define UVH_GR1_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_GR1_TLB_INT1_CONFIG_P_SHFT 13
+#define UVH_GR1_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_GR1_TLB_INT1_CONFIG_T_SHFT 15
+#define UVH_GR1_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_GR1_TLB_INT1_CONFIG_M_SHFT 16
+#define UVH_GR1_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_GR1_TLB_INT1_CONFIG_APIC_ID_SHFT 32
+#define UVH_GR1_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+
+union uvh_gr1_tlb_int1_config_u {
+ unsigned long v;
+ struct uvh_gr1_tlb_int1_config_s {
+ unsigned long vector_ : 8; /* RW */
+ unsigned long dm : 3; /* RW */
+ unsigned long destmode : 1; /* RW */
+ unsigned long status : 1; /* RO */
+ unsigned long p : 1; /* RO */
+ unsigned long rsvd_14 : 1; /* */
+ unsigned long t : 1; /* RO */
+ unsigned long m : 1; /* RW */
+ unsigned long rsvd_17_31: 15; /* */
+ unsigned long apic_id : 32; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
/* UVH_INT_CMPB */
/* ========================================================================= */
#define UVH_INT_CMPB 0x22080UL
@@ -670,4 +822,4 @@ union uvh_si_alias2_overlay_config_u {
};
-#endif /* __ASM_IA64_UV_MMRS__ */
+#endif /* _ASM_IA64_UV_UV_MMRS_H */
diff --git a/arch/ia64/include/asm/xen/hypervisor.h b/arch/ia64/include/asm/xen/hypervisor.h
index 7a804e80fc6..e425227a418 100644
--- a/arch/ia64/include/asm/xen/hypervisor.h
+++ b/arch/ia64/include/asm/xen/hypervisor.h
@@ -33,9 +33,6 @@
#ifndef _ASM_IA64_XEN_HYPERVISOR_H
#define _ASM_IA64_XEN_HYPERVISOR_H
-#ifdef CONFIG_XEN
-
-#include <linux/init.h>
#include <xen/interface/xen.h>
#include <xen/interface/version.h> /* to compile feature.c */
#include <xen/features.h> /* to comiple xen-netfront.c */
@@ -43,22 +40,32 @@
/* xen_domain_type is set before executing any C code by early_xen_setup */
enum xen_domain_type {
- XEN_NATIVE,
- XEN_PV_DOMAIN,
- XEN_HVM_DOMAIN,
+ XEN_NATIVE, /* running on bare hardware */
+ XEN_PV_DOMAIN, /* running in a PV domain */
+ XEN_HVM_DOMAIN, /* running in a Xen hvm domain*/
};
+#ifdef CONFIG_XEN
extern enum xen_domain_type xen_domain_type;
+#else
+#define xen_domain_type XEN_NATIVE
+#endif
#define xen_domain() (xen_domain_type != XEN_NATIVE)
-#define xen_pv_domain() (xen_domain_type == XEN_PV_DOMAIN)
-#define xen_initial_domain() (xen_pv_domain() && \
+#define xen_pv_domain() (xen_domain() && \
+ xen_domain_type == XEN_PV_DOMAIN)
+#define xen_hvm_domain() (xen_domain() && \
+ xen_domain_type == XEN_HVM_DOMAIN)
+
+#ifdef CONFIG_XEN_DOM0
+#define xen_initial_domain() (xen_pv_domain() && \
(xen_start_info->flags & SIF_INITDOMAIN))
-#define xen_hvm_domain() (xen_domain_type == XEN_HVM_DOMAIN)
+#else
+#define xen_initial_domain() (0)
+#endif
-/* deprecated. remove this */
-#define is_running_on_xen() (xen_domain_type == XEN_PV_DOMAIN)
+#ifdef CONFIG_XEN
extern struct shared_info *HYPERVISOR_shared_info;
extern struct start_info *xen_start_info;
@@ -74,16 +81,6 @@ void force_evtchn_callback(void);
/* For setup_arch() in arch/ia64/kernel/setup.c */
void xen_ia64_enable_opt_feature(void);
-
-#else /* CONFIG_XEN */
-
-#define xen_domain() (0)
-#define xen_pv_domain() (0)
-#define xen_initial_domain() (0)
-#define xen_hvm_domain() (0)
-#define is_running_on_xen() (0) /* deprecated. remove this */
#endif
-#define is_initial_xendomain() (0) /* deprecated. remove this */
-
#endif /* _ASM_IA64_XEN_HYPERVISOR_H */
diff --git a/arch/ia64/include/asm/xen/inst.h b/arch/ia64/include/asm/xen/inst.h
index 19c2ae1d878..c53a4761120 100644
--- a/arch/ia64/include/asm/xen/inst.h
+++ b/arch/ia64/include/asm/xen/inst.h
@@ -33,6 +33,9 @@
#define __paravirt_work_processed_syscall_target \
xen_work_processed_syscall
+#define paravirt_fsyscall_table xen_fsyscall_table
+#define paravirt_fsys_bubble_down xen_fsys_bubble_down
+
#define MOV_FROM_IFA(reg) \
movl reg = XSI_IFA; \
;; \
@@ -110,6 +113,27 @@
.endm
#define MOV_FROM_PSR(pred, reg, clob) __MOV_FROM_PSR pred, reg, clob
+/* assuming ar.itc is read with interrupt disabled. */
+#define MOV_FROM_ITC(pred, pred_clob, reg, clob) \
+(pred) movl clob = XSI_ITC_OFFSET; \
+ ;; \
+(pred) ld8 clob = [clob]; \
+(pred) mov reg = ar.itc; \
+ ;; \
+(pred) add reg = reg, clob; \
+ ;; \
+(pred) movl clob = XSI_ITC_LAST; \
+ ;; \
+(pred) ld8 clob = [clob]; \
+ ;; \
+(pred) cmp.geu.unc pred_clob, p0 = clob, reg; \
+ ;; \
+(pred_clob) add reg = 1, clob; \
+ ;; \
+(pred) movl clob = XSI_ITC_LAST; \
+ ;; \
+(pred) st8 [clob] = reg
+
#define MOV_TO_IFA(reg, clob) \
movl clob = XSI_IFA; \
@@ -362,6 +386,10 @@
#define RSM_PSR_DT \
XEN_HYPER_RSM_PSR_DT
+#define RSM_PSR_BE_I(clob0, clob1) \
+ RSM_PSR_I(p0, clob0, clob1); \
+ rum psr.be
+
#define SSM_PSR_DT_AND_SRLZ_I \
XEN_HYPER_SSM_PSR_DT
diff --git a/arch/ia64/include/asm/xen/interface.h b/arch/ia64/include/asm/xen/interface.h
index f00fab40854..e951e740bdf 100644
--- a/arch/ia64/include/asm/xen/interface.h
+++ b/arch/ia64/include/asm/xen/interface.h
@@ -209,6 +209,15 @@ struct mapped_regs {
unsigned long krs[8]; /* kernel registers */
unsigned long tmp[16]; /* temp registers
(e.g. for hyperprivops) */
+
+ /* itc paravirtualization
+ * vAR.ITC = mAR.ITC + itc_offset
+ * itc_last is one which was lastly passed to
+ * the guest OS in order to prevent it from
+ * going backwords.
+ */
+ unsigned long itc_offset;
+ unsigned long itc_last;
};
};
};
diff --git a/arch/ia64/include/asm/xen/minstate.h b/arch/ia64/include/asm/xen/minstate.h
index 4d92d9bbda7..c57fa910f2c 100644
--- a/arch/ia64/include/asm/xen/minstate.h
+++ b/arch/ia64/include/asm/xen/minstate.h
@@ -1,3 +1,12 @@
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+/* read ar.itc in advance, and use it before leaving bank 0 */
+#define XEN_ACCOUNT_GET_STAMP \
+ MOV_FROM_ITC(pUStk, p6, r20, r2);
+#else
+#define XEN_ACCOUNT_GET_STAMP
+#endif
+
/*
* DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
* the minimum state necessary that allows us to turn psr.ic back
@@ -123,7 +132,7 @@
;; \
.mem.offset 0,0; st8.spill [r16]=r2,16; \
.mem.offset 8,0; st8.spill [r17]=r3,16; \
- ACCOUNT_GET_STAMP \
+ XEN_ACCOUNT_GET_STAMP \
adds r2=IA64_PT_REGS_R16_OFFSET,r1; \
;; \
EXTRA; \
diff --git a/arch/ia64/include/asm/xen/patchlist.h b/arch/ia64/include/asm/xen/patchlist.h
new file mode 100644
index 00000000000..eae944e8884
--- /dev/null
+++ b/arch/ia64/include/asm/xen/patchlist.h
@@ -0,0 +1,38 @@
+/******************************************************************************
+ * arch/ia64/include/asm/xen/patchlist.h
+ *
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ * VA Linux Systems Japan K.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#define __paravirt_start_gate_fsyscall_patchlist \
+ __xen_start_gate_fsyscall_patchlist
+#define __paravirt_end_gate_fsyscall_patchlist \
+ __xen_end_gate_fsyscall_patchlist
+#define __paravirt_start_gate_brl_fsys_bubble_down_patchlist \
+ __xen_start_gate_brl_fsys_bubble_down_patchlist
+#define __paravirt_end_gate_brl_fsys_bubble_down_patchlist \
+ __xen_end_gate_brl_fsys_bubble_down_patchlist
+#define __paravirt_start_gate_vtop_patchlist \
+ __xen_start_gate_vtop_patchlist
+#define __paravirt_end_gate_vtop_patchlist \
+ __xen_end_gate_vtop_patchlist
+#define __paravirt_start_gate_mckinley_e9_patchlist \
+ __xen_start_gate_mckinley_e9_patchlist
+#define __paravirt_end_gate_mckinley_e9_patchlist \
+ __xen_end_gate_mckinley_e9_patchlist
diff --git a/arch/ia64/include/asm/xen/privop.h b/arch/ia64/include/asm/xen/privop.h
index 71ec7546e10..fb4ec5e0b06 100644
--- a/arch/ia64/include/asm/xen/privop.h
+++ b/arch/ia64/include/asm/xen/privop.h
@@ -55,6 +55,8 @@
#define XSI_BANK1_R16 (XSI_BASE + XSI_BANK1_R16_OFS)
#define XSI_BANKNUM (XSI_BASE + XSI_BANKNUM_OFS)
#define XSI_IHA (XSI_BASE + XSI_IHA_OFS)
+#define XSI_ITC_OFFSET (XSI_BASE + XSI_ITC_OFFSET_OFS)
+#define XSI_ITC_LAST (XSI_BASE + XSI_ITC_LAST_OFS)
#endif
#ifndef __ASSEMBLY__
@@ -67,7 +69,7 @@
* may have different semantics depending on whether they are executed
* at PL0 vs PL!=0. When paravirtualized, these instructions mustn't
* be allowed to execute directly, lest incorrect semantics result. */
-extern void xen_fc(unsigned long addr);
+extern void xen_fc(void *addr);
extern unsigned long xen_thash(unsigned long addr);
/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag"
@@ -80,8 +82,10 @@ extern unsigned long xen_thash(unsigned long addr);
extern unsigned long xen_get_cpuid(int index);
extern unsigned long xen_get_pmd(int index);
+#ifndef ASM_SUPPORTED
extern unsigned long xen_get_eflag(void); /* see xen_ia64_getreg */
extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */
+#endif
/************************************************/
/* Instructions paravirtualized for performance */
@@ -106,6 +110,7 @@ extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */
#define xen_get_virtual_pend() \
(*(((uint8_t *)XEN_MAPPEDREGS->interrupt_mask_addr) - 1))
+#ifndef ASM_SUPPORTED
/* Although all privileged operations can be left to trap and will
* be properly handled by Xen, some are frequent enough that we use
* hyperprivops for performance. */
@@ -123,6 +128,7 @@ extern void xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
unsigned long val4);
extern void xen_set_kr(unsigned long index, unsigned long val);
extern void xen_ptcga(unsigned long addr, unsigned long size);
+#endif /* !ASM_SUPPORTED */
#endif /* !__ASSEMBLY__ */
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index c381ea95489..5628e9a990a 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -5,9 +5,9 @@
extra-y := head.o init_task.o vmlinux.lds
obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \
- irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \
+ irq_lsapic.o ivt.o machvec.o pal.o paravirt_patchlist.o patch.o process.o perfmon.o ptrace.o sal.o \
salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \
- unwind.o mca.o mca_asm.o topology.o
+ unwind.o mca.o mca_asm.o topology.o dma-mapping.o
obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o
obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o
@@ -36,46 +36,23 @@ obj-$(CONFIG_PCI_MSI) += msi_ia64.o
mca_recovery-y += mca_drv.o mca_drv_asm.o
obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o
-obj-$(CONFIG_PARAVIRT) += paravirt.o paravirtentry.o
+obj-$(CONFIG_PARAVIRT) += paravirt.o paravirtentry.o \
+ paravirt_patch.o
obj-$(CONFIG_IA64_ESI) += esi.o
ifneq ($(CONFIG_IA64_ESI),)
obj-y += esi_stub.o # must be in kernel proper
endif
obj-$(CONFIG_DMAR) += pci-dma.o
-ifeq ($(CONFIG_DMAR), y)
obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
-endif
-
-# The gate DSO image is built using a special linker script.
-targets += gate.so gate-syms.o
-
-extra-y += gate.so gate-syms.o gate.lds gate.o
# fp_emulate() expects f2-f5,f16-f31 to contain the user-level state.
CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31
-CPPFLAGS_gate.lds := -P -C -U$(ARCH)
-
-quiet_cmd_gate = GATE $@
- cmd_gate = $(CC) -nostdlib $(GATECFLAGS_$(@F)) -Wl,-T,$(filter-out FORCE,$^) -o $@
-
-GATECFLAGS_gate.so = -shared -s -Wl,-soname=linux-gate.so.1 \
- $(call ld-option, -Wl$(comma)--hash-style=sysv)
-$(obj)/gate.so: $(obj)/gate.lds $(obj)/gate.o FORCE
- $(call if_changed,gate)
-
-$(obj)/built-in.o: $(obj)/gate-syms.o
-$(obj)/built-in.o: ld_flags += -R $(obj)/gate-syms.o
-
-GATECFLAGS_gate-syms.o = -r
-$(obj)/gate-syms.o: $(obj)/gate.lds $(obj)/gate.o FORCE
- $(call if_changed,gate)
-
-# gate-data.o contains the gate DSO image as data in section .data.gate.
-# We must build gate.so before we can assemble it.
-# Note: kbuild does not track this dependency due to usage of .incbin
-$(obj)/gate-data.o: $(obj)/gate.so
+# The gate DSO image is built using a special linker script.
+include $(srctree)/arch/ia64/kernel/Makefile.gate
+# tell compiled for native
+CPPFLAGS_gate.lds += -D__IA64_GATE_PARAVIRTUALIZED_NATIVE
# Calculate NR_IRQ = max(IA64_NATIVE_NR_IRQS, XEN_NR_IRQS, ...) based on config
define sed-y
@@ -111,9 +88,9 @@ include/asm-ia64/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s
clean-files += $(objtree)/include/asm-ia64/nr-irqs.h
#
-# native ivt.S and entry.S
+# native ivt.S, entry.S and fsys.S
#
-ASM_PARAVIRT_OBJS = ivt.o entry.o
+ASM_PARAVIRT_OBJS = ivt.o entry.o fsys.o
define paravirtualized_native
AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_NATIVE
AFLAGS_pvchk-sed-$(1) += -D__IA64_ASM_PARAVIRTUALIZED_PVCHECK
diff --git a/arch/ia64/kernel/Makefile.gate b/arch/ia64/kernel/Makefile.gate
new file mode 100644
index 00000000000..1d87f84069b
--- /dev/null
+++ b/arch/ia64/kernel/Makefile.gate
@@ -0,0 +1,27 @@
+# The gate DSO image is built using a special linker script.
+
+targets += gate.so gate-syms.o
+
+extra-y += gate.so gate-syms.o gate.lds gate.o
+
+CPPFLAGS_gate.lds := -P -C -U$(ARCH)
+
+quiet_cmd_gate = GATE $@
+ cmd_gate = $(CC) -nostdlib $(GATECFLAGS_$(@F)) -Wl,-T,$(filter-out FORCE,$^) -o $@
+
+GATECFLAGS_gate.so = -shared -s -Wl,-soname=linux-gate.so.1 \
+ $(call ld-option, -Wl$(comma)--hash-style=sysv)
+$(obj)/gate.so: $(obj)/gate.lds $(obj)/gate.o FORCE
+ $(call if_changed,gate)
+
+$(obj)/built-in.o: $(obj)/gate-syms.o
+$(obj)/built-in.o: ld_flags += -R $(obj)/gate-syms.o
+
+GATECFLAGS_gate-syms.o = -r
+$(obj)/gate-syms.o: $(obj)/gate.lds $(obj)/gate.o FORCE
+ $(call if_changed,gate)
+
+# gate-data.o contains the gate DSO image as data in section .data.gate.
+# We must build gate.so before we can assemble it.
+# Note: kbuild does not track this dependency due to usage of .incbin
+$(obj)/gate-data.o: $(obj)/gate.so
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index d541671caf4..5510317db37 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -199,6 +199,10 @@ char *__init __acpi_map_table(unsigned long phys_addr, unsigned long size)
return __va(phys_addr);
}
+void __init __acpi_unmap_table(char *map, unsigned long size)
+{
+}
+
/* --------------------------------------------------------------------------
Boot-time Table Parsing
-------------------------------------------------------------------------- */
@@ -886,7 +890,7 @@ __init void prefill_possible_map(void)
possible, max((possible - available_cpus), 0));
for (i = 0; i < possible; i++)
- cpu_set(i, cpu_possible_map);
+ set_cpu_possible(i, true);
}
int acpi_map_lsapic(acpi_handle handle, int *pcpu)
@@ -924,9 +928,9 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
buffer.length = ACPI_ALLOCATE_BUFFER;
buffer.pointer = NULL;
- cpus_complement(tmp_map, cpu_present_map);
- cpu = first_cpu(tmp_map);
- if (cpu >= NR_CPUS)
+ cpumask_complement(&tmp_map, cpu_present_mask);
+ cpu = cpumask_first(&tmp_map);
+ if (cpu >= nr_cpu_ids)
return -EINVAL;
acpi_map_cpu2node(handle, cpu, physid);
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c
index 742dbb1d5a4..af565016904 100644
--- a/arch/ia64/kernel/asm-offsets.c
+++ b/arch/ia64/kernel/asm-offsets.c
@@ -316,5 +316,7 @@ void foo(void)
DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]);
DEFINE_MAPPED_REG_OFS(XSI_B0NATS_OFS, vbnat);
DEFINE_MAPPED_REG_OFS(XSI_B1NATS_OFS, vnat);
+ DEFINE_MAPPED_REG_OFS(XSI_ITC_OFFSET_OFS, itc_offset);
+ DEFINE_MAPPED_REG_OFS(XSI_ITC_LAST_OFS, itc_last);
#endif /* CONFIG_XEN */
}
diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
new file mode 100644
index 00000000000..086a2aeb040
--- /dev/null
+++ b/arch/ia64/kernel/dma-mapping.c
@@ -0,0 +1,13 @@
+#include <linux/dma-mapping.h>
+
+/* Set this to 1 if there is a HW IOMMU in the system */
+int iommu_detected __read_mostly;
+
+struct dma_map_ops *dma_ops;
+EXPORT_SYMBOL(dma_ops);
+
+struct dma_map_ops *dma_get_ops(struct device *dev)
+{
+ return dma_ops;
+}
+EXPORT_SYMBOL(dma_get_ops);
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index efaff15d8cf..7ef80e8161c 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -456,6 +456,7 @@ efi_map_pal_code (void)
GRANULEROUNDDOWN((unsigned long) pal_vaddr),
pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)),
IA64_GRANULE_SHIFT);
+ paravirt_dv_serialize_data();
ia64_set_psr(psr); /* restore psr */
}
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index e5341e2c117..ccfdeee9d89 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -735,7 +735,7 @@ GLOBAL_ENTRY(__paravirt_leave_syscall)
__paravirt_work_processed_syscall:
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
adds r2=PT(LOADRS)+16,r12
-(pUStk) mov.m r22=ar.itc // fetch time at leave
+ MOV_FROM_ITC(pUStk, p9, r22, r19) // fetch time at leave
adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
;;
(p6) ld4 r31=[r18] // load current_thread_info()->flags
@@ -984,7 +984,7 @@ GLOBAL_ENTRY(__paravirt_leave_kernel)
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
.pred.rel.mutex pUStk,pKStk
MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled
-(pUStk) mov.m r22=ar.itc // M fetch time at leave
+ MOV_FROM_ITC(pUStk, p9, r22, r29) // M fetch time at leave
nop.i 0
;;
#else
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index c1625c7e177..3567d54f8ce 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -25,6 +25,7 @@
#include <asm/unistd.h>
#include "entry.h"
+#include "paravirt_inst.h"
/*
* See Documentation/ia64/fsys.txt for details on fsyscalls.
@@ -279,7 +280,7 @@ ENTRY(fsys_gettimeofday)
(p9) cmp.eq p13,p0 = 0,r30 // if mmio_ptr, clear p13 jitter control
;;
.pred.rel.mutex p8,p9
-(p8) mov r2 = ar.itc // CPU_TIMER. 36 clocks latency!!!
+ MOV_FROM_ITC(p8, p6, r2, r10) // CPU_TIMER. 36 clocks latency!!!
(p9) ld8 r2 = [r30] // MMIO_TIMER. Could also have latency issues..
(p13) ld8 r25 = [r19] // get itc_lastcycle value
ld8 r9 = [r22],IA64_TIMESPEC_TV_NSEC_OFFSET // tv_sec
@@ -418,7 +419,7 @@ EX(.fail_efault, ld8 r14=[r33]) // r14 <- *set
mov r17=(1 << (SIGKILL - 1)) | (1 << (SIGSTOP - 1))
;;
- rsm psr.i // mask interrupt delivery
+ RSM_PSR_I(p0, r18, r19) // mask interrupt delivery
mov ar.ccv=0
andcm r14=r14,r17 // filter out SIGKILL & SIGSTOP
@@ -491,7 +492,7 @@ EX(.fail_efault, ld8 r14=[r33]) // r14 <- *set
#ifdef CONFIG_SMP
st4.rel [r31]=r0 // release the lock
#endif
- ssm psr.i
+ SSM_PSR_I(p0, p9, r31)
;;
srlz.d // ensure psr.i is set again
@@ -513,7 +514,7 @@ EX(.fail_efault, (p15) st8 [r34]=r3)
#ifdef CONFIG_SMP
st4.rel [r31]=r0 // release the lock
#endif
- ssm psr.i
+ SSM_PSR_I(p0, p9, r17)
;;
srlz.d
br.sptk.many fsys_fallback_syscall // with signal pending, do the heavy-weight syscall
@@ -521,7 +522,7 @@ EX(.fail_efault, (p15) st8 [r34]=r3)
#ifdef CONFIG_SMP
.lock_contention:
/* Rather than spinning here, fall back on doing a heavy-weight syscall. */
- ssm psr.i
+ SSM_PSR_I(p0, p9, r17)
;;
srlz.d
br.sptk.many fsys_fallback_syscall
@@ -592,17 +593,17 @@ ENTRY(fsys_fallback_syscall)
adds r17=-1024,r15
movl r14=sys_call_table
;;
- rsm psr.i
+ RSM_PSR_I(p0, r26, r27)
shladd r18=r17,3,r14
;;
ld8 r18=[r18] // load normal (heavy-weight) syscall entry-point
- mov r29=psr // read psr (12 cyc load latency)
+ MOV_FROM_PSR(p0, r29, r26) // read psr (12 cyc load latency)
mov r27=ar.rsc
mov r21=ar.fpsr
mov r26=ar.pfs
END(fsys_fallback_syscall)
/* FALL THROUGH */
-GLOBAL_ENTRY(fsys_bubble_down)
+GLOBAL_ENTRY(paravirt_fsys_bubble_down)
.prologue
.altrp b6
.body
@@ -640,7 +641,7 @@ GLOBAL_ENTRY(fsys_bubble_down)
*
* PSR.BE : already is turned off in __kernel_syscall_via_epc()
* PSR.AC : don't care (kernel normally turns PSR.AC on)
- * PSR.I : already turned off by the time fsys_bubble_down gets
+ * PSR.I : already turned off by the time paravirt_fsys_bubble_down gets
* invoked
* PSR.DFL: always 0 (kernel never turns it on)
* PSR.DFH: don't care --- kernel never touches f32-f127 on its own
@@ -650,7 +651,7 @@ GLOBAL_ENTRY(fsys_bubble_down)
* PSR.DB : don't care --- kernel never enables kernel-level
* breakpoints
* PSR.TB : must be 0 already; if it wasn't zero on entry to
- * __kernel_syscall_via_epc, the branch to fsys_bubble_down
+ * __kernel_syscall_via_epc, the branch to paravirt_fsys_bubble_down
* will trigger a taken branch; the taken-trap-handler then
* converts the syscall into a break-based system-call.
*/
@@ -683,7 +684,7 @@ GLOBAL_ENTRY(fsys_bubble_down)
;;
mov ar.rsc=0 // M2 set enforced lazy mode, pl 0, LE, loadrs=0
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
- mov.m r30=ar.itc // M get cycle for accounting
+ MOV_FROM_ITC(p0, p6, r30, r23) // M get cycle for accounting
#else
nop.m 0
#endif
@@ -734,21 +735,21 @@ GLOBAL_ENTRY(fsys_bubble_down)
mov rp=r14 // I0 set the real return addr
and r3=_TIF_SYSCALL_TRACEAUDIT,r3 // A
;;
- ssm psr.i // M2 we're on kernel stacks now, reenable irqs
+ SSM_PSR_I(p0, p6, r22) // M2 we're on kernel stacks now, reenable irqs
cmp.eq p8,p0=r3,r0 // A
(p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT
nop.m 0
(p8) br.call.sptk.many b6=b6 // B (ignore return address)
br.cond.spnt ia64_trace_syscall // B
-END(fsys_bubble_down)
+END(paravirt_fsys_bubble_down)
.rodata
.align 8
- .globl fsyscall_table
+ .globl paravirt_fsyscall_table
- data8 fsys_bubble_down
-fsyscall_table:
+ data8 paravirt_fsys_bubble_down
+paravirt_fsyscall_table:
data8 fsys_ni_syscall
data8 0 // exit // 1025
data8 0 // read
@@ -1033,4 +1034,4 @@ fsyscall_table:
// fill in zeros for the remaining entries
.zero:
- .space fsyscall_table + 8*NR_syscalls - .zero, 0
+ .space paravirt_fsyscall_table + 8*NR_syscalls - .zero, 0
diff --git a/arch/ia64/kernel/gate.S b/arch/ia64/kernel/gate.S
index 74b1ccce4e8..cf5e0a105e1 100644
--- a/arch/ia64/kernel/gate.S
+++ b/arch/ia64/kernel/gate.S
@@ -13,6 +13,7 @@
#include <asm/sigcontext.h>
#include <asm/system.h>
#include <asm/unistd.h>
+#include "paravirt_inst.h"
/*
* We can't easily refer to symbols inside the kernel. To avoid full runtime relocation,
@@ -48,87 +49,6 @@ GLOBAL_ENTRY(__kernel_syscall_via_break)
}
END(__kernel_syscall_via_break)
-/*
- * On entry:
- * r11 = saved ar.pfs
- * r15 = system call #
- * b0 = saved return address
- * b6 = return address
- * On exit:
- * r11 = saved ar.pfs
- * r15 = system call #
- * b0 = saved return address
- * all other "scratch" registers: undefined
- * all "preserved" registers: same as on entry
- */
-
-GLOBAL_ENTRY(__kernel_syscall_via_epc)
- .prologue
- .altrp b6
- .body
-{
- /*
- * Note: the kernel cannot assume that the first two instructions in this
- * bundle get executed. The remaining code must be safe even if
- * they do not get executed.
- */
- adds r17=-1024,r15 // A
- mov r10=0 // A default to successful syscall execution
- epc // B causes split-issue
-}
- ;;
- rsm psr.be | psr.i // M2 (5 cyc to srlz.d)
- LOAD_FSYSCALL_TABLE(r14) // X
- ;;
- mov r16=IA64_KR(CURRENT) // M2 (12 cyc)
- shladd r18=r17,3,r14 // A
- mov r19=NR_syscalls-1 // A
- ;;
- lfetch [r18] // M0|1
- mov r29=psr // M2 (12 cyc)
- // If r17 is a NaT, p6 will be zero
- cmp.geu p6,p7=r19,r17 // A (sysnr > 0 && sysnr < 1024+NR_syscalls)?
- ;;
- mov r21=ar.fpsr // M2 (12 cyc)
- tnat.nz p10,p9=r15 // I0
- mov.i r26=ar.pfs // I0 (would stall anyhow due to srlz.d...)
- ;;
- srlz.d // M0 (forces split-issue) ensure PSR.BE==0
-(p6) ld8 r18=[r18] // M0|1
- nop.i 0
- ;;
- nop.m 0
-(p6) tbit.z.unc p8,p0=r18,0 // I0 (dual-issues with "mov b7=r18"!)
- nop.i 0
- ;;
-(p8) ssm psr.i
-(p6) mov b7=r18 // I0
-(p8) br.dptk.many b7 // B
-
- mov r27=ar.rsc // M2 (12 cyc)
-/*
- * brl.cond doesn't work as intended because the linker would convert this branch
- * into a branch to a PLT. Perhaps there will be a way to avoid this with some
- * future version of the linker. In the meantime, we just use an indirect branch
- * instead.
- */
-#ifdef CONFIG_ITANIUM
-(p6) add r14=-8,r14 // r14 <- addr of fsys_bubble_down entry
- ;;
-(p6) ld8 r14=[r14] // r14 <- fsys_bubble_down
- ;;
-(p6) mov b7=r14
-(p6) br.sptk.many b7
-#else
- BRL_COND_FSYS_BUBBLE_DOWN(p6)
-#endif
- ssm psr.i
- mov r10=-1
-(p10) mov r8=EINVAL
-(p9) mov r8=ENOSYS
- FSYS_RETURN
-END(__kernel_syscall_via_epc)
-
# define ARG0_OFF (16 + IA64_SIGFRAME_ARG0_OFFSET)
# define ARG1_OFF (16 + IA64_SIGFRAME_ARG1_OFFSET)
# define ARG2_OFF (16 + IA64_SIGFRAME_ARG2_OFFSET)
@@ -374,3 +294,92 @@ restore_rbs:
// invala not necessary as that will happen when returning to user-mode
br.cond.sptk back_from_restore_rbs
END(__kernel_sigtramp)
+
+/*
+ * On entry:
+ * r11 = saved ar.pfs
+ * r15 = system call #
+ * b0 = saved return address
+ * b6 = return address
+ * On exit:
+ * r11 = saved ar.pfs
+ * r15 = system call #
+ * b0 = saved return address
+ * all other "scratch" registers: undefined
+ * all "preserved" registers: same as on entry
+ */
+
+GLOBAL_ENTRY(__kernel_syscall_via_epc)
+ .prologue
+ .altrp b6
+ .body
+{
+ /*
+ * Note: the kernel cannot assume that the first two instructions in this
+ * bundle get executed. The remaining code must be safe even if
+ * they do not get executed.
+ */
+ adds r17=-1024,r15 // A
+ mov r10=0 // A default to successful syscall execution
+ epc // B causes split-issue
+}
+ ;;
+ RSM_PSR_BE_I(r20, r22) // M2 (5 cyc to srlz.d)
+ LOAD_FSYSCALL_TABLE(r14) // X
+ ;;
+ mov r16=IA64_KR(CURRENT) // M2 (12 cyc)
+ shladd r18=r17,3,r14 // A
+ mov r19=NR_syscalls-1 // A
+ ;;
+ lfetch [r18] // M0|1
+ MOV_FROM_PSR(p0, r29, r8) // M2 (12 cyc)
+ // If r17 is a NaT, p6 will be zero
+ cmp.geu p6,p7=r19,r17 // A (sysnr > 0 && sysnr < 1024+NR_syscalls)?
+ ;;
+ mov r21=ar.fpsr // M2 (12 cyc)
+ tnat.nz p10,p9=r15 // I0
+ mov.i r26=ar.pfs // I0 (would stall anyhow due to srlz.d...)
+ ;;
+ srlz.d // M0 (forces split-issue) ensure PSR.BE==0
+(p6) ld8 r18=[r18] // M0|1
+ nop.i 0
+ ;;
+ nop.m 0
+(p6) tbit.z.unc p8,p0=r18,0 // I0 (dual-issues with "mov b7=r18"!)
+ nop.i 0
+ ;;
+ SSM_PSR_I(p8, p14, r25)
+(p6) mov b7=r18 // I0
+(p8) br.dptk.many b7 // B
+
+ mov r27=ar.rsc // M2 (12 cyc)
+/*
+ * brl.cond doesn't work as intended because the linker would convert this branch
+ * into a branch to a PLT. Perhaps there will be a way to avoid this with some
+ * future version of the linker. In the meantime, we just use an indirect branch
+ * instead.
+ */
+#ifdef CONFIG_ITANIUM
+(p6) add r14=-8,r14 // r14 <- addr of fsys_bubble_down entry
+ ;;
+(p6) ld8 r14=[r14] // r14 <- fsys_bubble_down
+ ;;
+(p6) mov b7=r14
+(p6) br.sptk.many b7
+#else
+ BRL_COND_FSYS_BUBBLE_DOWN(p6)
+#endif
+ SSM_PSR_I(p0, p14, r10)
+ mov r10=-1
+(p10) mov r8=EINVAL
+(p9) mov r8=ENOSYS
+ FSYS_RETURN
+
+#ifdef CONFIG_PARAVIRT
+ /*
+ * padd to make the size of this symbol constant
+ * independent of paravirtualization.
+ */
+ .align PAGE_SIZE / 8
+#endif
+END(__kernel_syscall_via_epc)
diff --git a/arch/ia64/kernel/gate.lds.S b/arch/ia64/kernel/gate.lds.S
index 3cb1abc00e2..88c64ed47c3 100644
--- a/arch/ia64/kernel/gate.lds.S
+++ b/arch/ia64/kernel/gate.lds.S
@@ -7,6 +7,7 @@
#include <asm/system.h>
+#include "paravirt_patchlist.h"
SECTIONS
{
@@ -33,21 +34,21 @@ SECTIONS
. = GATE_ADDR + 0x600;
.data.patch : {
- __start_gate_mckinley_e9_patchlist = .;
+ __paravirt_start_gate_mckinley_e9_patchlist = .;
*(.data.patch.mckinley_e9)
- __end_gate_mckinley_e9_patchlist = .;
+ __paravirt_end_gate_mckinley_e9_patchlist = .;
- __start_gate_vtop_patchlist = .;
+ __paravirt_start_gate_vtop_patchlist = .;
*(.data.patch.vtop)
- __end_gate_vtop_patchlist = .;
+ __paravirt_end_gate_vtop_patchlist = .;
- __start_gate_fsyscall_patchlist = .;
+ __paravirt_start_gate_fsyscall_patchlist = .;
*(.data.patch.fsyscall_table)
- __end_gate_fsyscall_patchlist = .;
+ __paravirt_end_gate_fsyscall_patchlist = .;
- __start_gate_brl_fsys_bubble_down_patchlist = .;
+ __paravirt_start_gate_brl_fsys_bubble_down_patchlist = .;
*(.data.patch.brl_fsys_bubble_down)
- __end_gate_brl_fsys_bubble_down_patchlist = .;
+ __paravirt_end_gate_brl_fsys_bubble_down_patchlist = .;
} :readable
.IA_64.unwind_info : { *(.IA_64.unwind_info*) }
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index 59301c47280..23f846de62d 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -1050,7 +1050,7 @@ END(ia64_delay_loop)
* except that the multiplication and the shift are done with 128-bit
* intermediate precision so that we can produce a full 64-bit result.
*/
-GLOBAL_ENTRY(sched_clock)
+GLOBAL_ENTRY(ia64_native_sched_clock)
addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
mov.m r9=ar.itc // fetch cycle-counter (35 cyc)
;;
@@ -1066,7 +1066,13 @@ GLOBAL_ENTRY(sched_clock)
;;
shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT
br.ret.sptk.many rp
-END(sched_clock)
+END(ia64_native_sched_clock)
+#ifndef CONFIG_PARAVIRT
+ //unsigned long long
+ //sched_clock(void) __attribute__((alias("ia64_native_sched_clock")));
+ .global sched_clock
+sched_clock = ia64_native_sched_clock
+#endif
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
GLOBAL_ENTRY(cycle_to_cputime)
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index e13125058be..166e0d839fa 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -880,7 +880,7 @@ iosapic_unregister_intr (unsigned int gsi)
if (iosapic_intr_info[irq].count == 0) {
#ifdef CONFIG_SMP
/* Clear affinity */
- cpus_setall(idesc->affinity);
+ cpumask_setall(idesc->affinity);
#endif
/* Clear the interrupt information */
iosapic_intr_info[irq].dest = 0;
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index a58f64ca9f0..7429752ef5a 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -80,7 +80,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_printf(p, "%10u ", kstat_irqs(i));
#else
for_each_online_cpu(j) {
- seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
+ seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
}
#endif
seq_printf(p, " %14s", irq_desc[i].chip->name);
@@ -103,7 +103,7 @@ static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
{
if (irq < NR_IRQS) {
- cpumask_copy(&irq_desc[irq].affinity,
+ cpumask_copy(irq_desc[irq].affinity,
cpumask_of(cpu_logical_id(hwid)));
irq_redir[irq] = (char) (redir & 0xff);
}
@@ -148,7 +148,7 @@ static void migrate_irqs(void)
if (desc->status == IRQ_PER_CPU)
continue;
- if (cpumask_any_and(&irq_desc[irq].affinity, cpu_online_mask)
+ if (cpumask_any_and(irq_desc[irq].affinity, cpu_online_mask)
>= nr_cpu_ids) {
/*
* Save it for phase 2 processing
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 28d3d483db9..acc4d19ae62 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -493,14 +493,15 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
ia64_srlz_d();
while (vector != IA64_SPURIOUS_INT_VECTOR) {
+ int irq = local_vector_to_irq(vector);
+ struct irq_desc *desc = irq_to_desc(irq);
+
if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
smp_local_flush_tlb();
- kstat_this_cpu.irqs[vector]++;
- } else if (unlikely(IS_RESCHEDULE(vector)))
- kstat_this_cpu.irqs[vector]++;
- else {
- int irq = local_vector_to_irq(vector);
-
+ kstat_incr_irqs_this_cpu(irq, desc);
+ } else if (unlikely(IS_RESCHEDULE(vector))) {
+ kstat_incr_irqs_this_cpu(irq, desc);
+ } else {
ia64_setreg(_IA64_REG_CR_TPR, vector);
ia64_srlz_d();
@@ -543,22 +544,24 @@ void ia64_process_pending_intr(void)
vector = ia64_get_ivr();
- irq_enter();
- saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
- ia64_srlz_d();
+ irq_enter();
+ saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
+ ia64_srlz_d();
/*
* Perform normal interrupt style processing
*/
while (vector != IA64_SPURIOUS_INT_VECTOR) {
+ int irq = local_vector_to_irq(vector);
+ struct irq_desc *desc = irq_to_desc(irq);
+
if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
smp_local_flush_tlb();
- kstat_this_cpu.irqs[vector]++;
- } else if (unlikely(IS_RESCHEDULE(vector)))
- kstat_this_cpu.irqs[vector]++;
- else {
+ kstat_incr_irqs_this_cpu(irq, desc);
+ } else if (unlikely(IS_RESCHEDULE(vector))) {
+ kstat_incr_irqs_this_cpu(irq, desc);
+ } else {
struct pt_regs *old_regs = set_irq_regs(NULL);
- int irq = local_vector_to_irq(vector);
ia64_setreg(_IA64_REG_CR_TPR, vector);
ia64_srlz_d();
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index f675d8e3385..ec9a5fdfa1b 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -804,7 +804,7 @@ ENTRY(break_fault)
///////////////////////////////////////////////////////////////////////
st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
- mov.m r30=ar.itc // M get cycle for accounting
+ MOV_FROM_ITC(p0, p14, r30, r18) // M get cycle for accounting
#else
mov b6=r30 // I0 setup syscall handler branch reg early
#endif
diff --git a/arch/ia64/kernel/machvec.c b/arch/ia64/kernel/machvec.c
index 7ccb228ceed..d41a40ef80c 100644
--- a/arch/ia64/kernel/machvec.c
+++ b/arch/ia64/kernel/machvec.c
@@ -1,5 +1,5 @@
#include <linux/module.h>
-
+#include <linux/dma-mapping.h>
#include <asm/machvec.h>
#include <asm/system.h>
@@ -75,14 +75,16 @@ machvec_timer_interrupt (int irq, void *dev_id)
EXPORT_SYMBOL(machvec_timer_interrupt);
void
-machvec_dma_sync_single (struct device *hwdev, dma_addr_t dma_handle, size_t size, int dir)
+machvec_dma_sync_single(struct device *hwdev, dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction dir)
{
mb();
}
EXPORT_SYMBOL(machvec_dma_sync_single);
void
-machvec_dma_sync_sg (struct device *hwdev, struct scatterlist *sg, int n, int dir)
+machvec_dma_sync_sg(struct device *hwdev, struct scatterlist *sg, int n,
+ enum dma_data_direction dir)
{
mb();
}
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index bab1de2d2f6..8f33a884042 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -1456,9 +1456,9 @@ ia64_mca_cmc_int_caller(int cmc_irq, void *arg)
ia64_mca_cmc_int_handler(cmc_irq, arg);
- for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);
+ cpuid = cpumask_next(cpuid+1, cpu_online_mask);
- if (cpuid < NR_CPUS) {
+ if (cpuid < nr_cpu_ids) {
platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
} else {
/* If no log record, switch out of polling mode */
@@ -1525,7 +1525,7 @@ ia64_mca_cpe_int_caller(int cpe_irq, void *arg)
ia64_mca_cpe_int_handler(cpe_irq, arg);
- for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);
+ cpuid = cpumask_next(cpuid+1, cpu_online_mask);
if (cpuid < NR_CPUS) {
platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
index aaa7d901521..da3b0cf495a 100644
--- a/arch/ia64/kernel/module.c
+++ b/arch/ia64/kernel/module.c
@@ -446,6 +446,14 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
mod->arch.opd = s;
else if (strcmp(".IA_64.unwind", secstrings + s->sh_name) == 0)
mod->arch.unwind = s;
+#ifdef CONFIG_PARAVIRT
+ else if (strcmp(".paravirt_bundles",
+ secstrings + s->sh_name) == 0)
+ mod->arch.paravirt_bundles = s;
+ else if (strcmp(".paravirt_insts",
+ secstrings + s->sh_name) == 0)
+ mod->arch.paravirt_insts = s;
+#endif
if (!mod->arch.core_plt || !mod->arch.init_plt || !mod->arch.got || !mod->arch.opd) {
printk(KERN_ERR "%s: sections missing\n", mod->name);
@@ -525,8 +533,7 @@ get_ltoff (struct module *mod, uint64_t value, int *okp)
goto found;
/* Not enough GOT entries? */
- if (e >= (struct got_entry *) (mod->arch.got->sh_addr + mod->arch.got->sh_size))
- BUG();
+ BUG_ON(e >= (struct got_entry *) (mod->arch.got->sh_addr + mod->arch.got->sh_size));
e->val = value;
++mod->arch.next_got_entry;
@@ -921,6 +928,30 @@ module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mo
DEBUGP("%s: init: entry=%p\n", __func__, mod->init);
if (mod->arch.unwind)
register_unwind_table(mod);
+#ifdef CONFIG_PARAVIRT
+ if (mod->arch.paravirt_bundles) {
+ struct paravirt_patch_site_bundle *start =
+ (struct paravirt_patch_site_bundle *)
+ mod->arch.paravirt_bundles->sh_addr;
+ struct paravirt_patch_site_bundle *end =
+ (struct paravirt_patch_site_bundle *)
+ (mod->arch.paravirt_bundles->sh_addr +
+ mod->arch.paravirt_bundles->sh_size);
+
+ paravirt_patch_apply_bundle(start, end);
+ }
+ if (mod->arch.paravirt_insts) {
+ struct paravirt_patch_site_inst *start =
+ (struct paravirt_patch_site_inst *)
+ mod->arch.paravirt_insts->sh_addr;
+ struct paravirt_patch_site_inst *end =
+ (struct paravirt_patch_site_inst *)
+ (mod->arch.paravirt_insts->sh_addr +
+ mod->arch.paravirt_insts->sh_size);
+
+ paravirt_patch_apply_inst(start, end);
+ }
+#endif
return 0;
}
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
index 89033933903..2b15e233f7f 100644
--- a/arch/ia64/kernel/msi_ia64.c
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -7,44 +7,7 @@
#include <linux/msi.h>
#include <linux/dmar.h>
#include <asm/smp.h>
-
-/*
- * Shifts for APIC-based data
- */
-
-#define MSI_DATA_VECTOR_SHIFT 0
-#define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT)
-#define MSI_DATA_VECTOR_MASK 0xffffff00
-
-#define MSI_DATA_DELIVERY_SHIFT 8
-#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_SHIFT)
-#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_SHIFT)
-
-#define MSI_DATA_LEVEL_SHIFT 14
-#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT)
-#define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT)
-
-#define MSI_DATA_TRIGGER_SHIFT 15
-#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT)
-#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT)
-
-/*
- * Shift/mask fields for APIC-based bus address
- */
-
-#define MSI_TARGET_CPU_SHIFT 4
-#define MSI_ADDR_HEADER 0xfee00000
-
-#define MSI_ADDR_DESTID_MASK 0xfff0000f
-#define MSI_ADDR_DESTID_CPU(cpu) ((cpu) << MSI_TARGET_CPU_SHIFT)
-
-#define MSI_ADDR_DESTMODE_SHIFT 2
-#define MSI_ADDR_DESTMODE_PHYS (0 << MSI_ADDR_DESTMODE_SHIFT)
-#define MSI_ADDR_DESTMODE_LOGIC (1 << MSI_ADDR_DESTMODE_SHIFT)
-
-#define MSI_ADDR_REDIRECTION_SHIFT 3
-#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT)
-#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT)
+#include <asm/msidef.h>
static struct irq_chip ia64_msi_chip;
@@ -65,8 +28,8 @@ static void ia64_set_msi_irq_affinity(unsigned int irq,
read_msi_msg(irq, &msg);
addr = msg.address_lo;
- addr &= MSI_ADDR_DESTID_MASK;
- addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu));
+ addr &= MSI_ADDR_DEST_ID_MASK;
+ addr |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
msg.address_lo = addr;
data = msg.data;
@@ -75,7 +38,7 @@ static void ia64_set_msi_irq_affinity(unsigned int irq,
msg.data = data;
write_msi_msg(irq, &msg);
- irq_desc[irq].affinity = cpumask_of_cpu(cpu);
+ cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
}
#endif /* CONFIG_SMP */
@@ -98,9 +61,9 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
msg.address_hi = 0;
msg.address_lo =
MSI_ADDR_HEADER |
- MSI_ADDR_DESTMODE_PHYS |
+ MSI_ADDR_DEST_MODE_PHYS |
MSI_ADDR_REDIRECTION_CPU |
- MSI_ADDR_DESTID_CPU(dest_phys_id);
+ MSI_ADDR_DEST_ID_CPU(dest_phys_id);
msg.data =
MSI_DATA_TRIGGER_EDGE |
@@ -183,11 +146,11 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
msg.data &= ~MSI_DATA_VECTOR_MASK;
msg.data |= MSI_DATA_VECTOR(cfg->vector);
- msg.address_lo &= ~MSI_ADDR_DESTID_MASK;
- msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu));
+ msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
+ msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
dmar_msi_write(irq, &msg);
- irq_desc[irq].affinity = *mask;
+ cpumask_copy(irq_desc[irq].affinity, mask);
}
#endif /* CONFIG_SMP */
@@ -215,9 +178,9 @@ msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
msg->address_hi = 0;
msg->address_lo =
MSI_ADDR_HEADER |
- MSI_ADDR_DESTMODE_PHYS |
+ MSI_ADDR_DEST_MODE_PHYS |
MSI_ADDR_REDIRECTION_CPU |
- MSI_ADDR_DESTID_CPU(dest);
+ MSI_ADDR_DEST_ID_CPU(dest);
msg->data =
MSI_DATA_TRIGGER_EDGE |
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index e5c57f413ca..a4f19c70aad 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -1002,8 +1002,6 @@ create_palinfo_proc_entries(unsigned int cpu)
*pdir = create_proc_read_entry(
palinfo_entries[j].name, 0, cpu_dir,
palinfo_read_entry, (void *)f.value);
- if (*pdir)
- (*pdir)->owner = THIS_MODULE;
pdir++;
}
}
diff --git a/arch/ia64/kernel/paravirt.c b/arch/ia64/kernel/paravirt.c
index 9f14c16f636..a21d7bb9c69 100644
--- a/arch/ia64/kernel/paravirt.c
+++ b/arch/ia64/kernel/paravirt.c
@@ -46,13 +46,23 @@ struct pv_info pv_info = {
* initialization hooks.
*/
-struct pv_init_ops pv_init_ops;
+static void __init
+ia64_native_patch_branch(unsigned long tag, unsigned long type);
+
+struct pv_init_ops pv_init_ops =
+{
+#ifdef ASM_SUPPORTED
+ .patch_bundle = ia64_native_patch_bundle,
+#endif
+ .patch_branch = ia64_native_patch_branch,
+};
/***************************************************************************
* pv_cpu_ops
* intrinsics hooks.
*/
+#ifndef ASM_SUPPORTED
/* ia64_native_xxx are macros so that we have to make them real functions */
#define DEFINE_VOID_FUNC1(name) \
@@ -60,7 +70,14 @@ struct pv_init_ops pv_init_ops;
ia64_native_ ## name ## _func(unsigned long arg) \
{ \
ia64_native_ ## name(arg); \
- } \
+ }
+
+#define DEFINE_VOID_FUNC1_VOID(name) \
+ static void \
+ ia64_native_ ## name ## _func(void *arg) \
+ { \
+ ia64_native_ ## name(arg); \
+ }
#define DEFINE_VOID_FUNC2(name) \
static void \
@@ -68,7 +85,7 @@ struct pv_init_ops pv_init_ops;
unsigned long arg1) \
{ \
ia64_native_ ## name(arg0, arg1); \
- } \
+ }
#define DEFINE_FUNC0(name) \
static unsigned long \
@@ -84,7 +101,7 @@ struct pv_init_ops pv_init_ops;
return ia64_native_ ## name(arg); \
} \
-DEFINE_VOID_FUNC1(fc);
+DEFINE_VOID_FUNC1_VOID(fc);
DEFINE_VOID_FUNC1(intrin_local_irq_restore);
DEFINE_VOID_FUNC2(ptcga);
@@ -274,6 +291,266 @@ ia64_native_setreg_func(int regnum, unsigned long val)
break;
}
}
+#else
+
+#define __DEFINE_FUNC(name, code) \
+ extern const char ia64_native_ ## name ## _direct_start[]; \
+ extern const char ia64_native_ ## name ## _direct_end[]; \
+ asm (".align 32\n" \
+ ".proc ia64_native_" #name "_func\n" \
+ "ia64_native_" #name "_func:\n" \
+ "ia64_native_" #name "_direct_start:\n" \
+ code \
+ "ia64_native_" #name "_direct_end:\n" \
+ "br.cond.sptk.many b6\n" \
+ ".endp ia64_native_" #name "_func\n")
+
+#define DEFINE_VOID_FUNC0(name, code) \
+ extern void \
+ ia64_native_ ## name ## _func(void); \
+ __DEFINE_FUNC(name, code)
+
+#define DEFINE_VOID_FUNC1(name, code) \
+ extern void \
+ ia64_native_ ## name ## _func(unsigned long arg); \
+ __DEFINE_FUNC(name, code)
+
+#define DEFINE_VOID_FUNC1_VOID(name, code) \
+ extern void \
+ ia64_native_ ## name ## _func(void *arg); \
+ __DEFINE_FUNC(name, code)
+
+#define DEFINE_VOID_FUNC2(name, code) \
+ extern void \
+ ia64_native_ ## name ## _func(unsigned long arg0, \
+ unsigned long arg1); \
+ __DEFINE_FUNC(name, code)
+
+#define DEFINE_FUNC0(name, code) \
+ extern unsigned long \
+ ia64_native_ ## name ## _func(void); \
+ __DEFINE_FUNC(name, code)
+
+#define DEFINE_FUNC1(name, type, code) \
+ extern unsigned long \
+ ia64_native_ ## name ## _func(type arg); \
+ __DEFINE_FUNC(name, code)
+
+DEFINE_VOID_FUNC1_VOID(fc,
+ "fc r8\n");
+DEFINE_VOID_FUNC1(intrin_local_irq_restore,
+ ";;\n"
+ " cmp.ne p6, p7 = r8, r0\n"
+ ";;\n"
+ "(p6) ssm psr.i\n"
+ "(p7) rsm psr.i\n"
+ ";;\n"
+ "(p6) srlz.d\n");
+
+DEFINE_VOID_FUNC2(ptcga,
+ "ptc.ga r8, r9\n");
+DEFINE_VOID_FUNC2(set_rr,
+ "mov rr[r8] = r9\n");
+
+/* ia64_native_getreg(_IA64_REG_PSR) & IA64_PSR_I */
+DEFINE_FUNC0(get_psr_i,
+ "mov r2 = " __stringify(1 << IA64_PSR_I_BIT) "\n"
+ "mov r8 = psr\n"
+ ";;\n"
+ "and r8 = r2, r8\n");
+
+DEFINE_FUNC1(thash, unsigned long,
+ "thash r8 = r8\n");
+DEFINE_FUNC1(get_cpuid, int,
+ "mov r8 = cpuid[r8]\n");
+DEFINE_FUNC1(get_pmd, int,
+ "mov r8 = pmd[r8]\n");
+DEFINE_FUNC1(get_rr, unsigned long,
+ "mov r8 = rr[r8]\n");
+
+DEFINE_VOID_FUNC0(ssm_i,
+ "ssm psr.i\n");
+DEFINE_VOID_FUNC0(rsm_i,
+ "rsm psr.i\n");
+
+extern void
+ia64_native_set_rr0_to_rr4_func(unsigned long val0, unsigned long val1,
+ unsigned long val2, unsigned long val3,
+ unsigned long val4);
+__DEFINE_FUNC(set_rr0_to_rr4,
+ "mov rr[r0] = r8\n"
+ "movl r2 = 0x2000000000000000\n"
+ ";;\n"
+ "mov rr[r2] = r9\n"
+ "shl r3 = r2, 1\n" /* movl r3 = 0x4000000000000000 */
+ ";;\n"
+ "add r2 = r2, r3\n" /* movl r2 = 0x6000000000000000 */
+ "mov rr[r3] = r10\n"
+ ";;\n"
+ "mov rr[r2] = r11\n"
+ "shl r3 = r3, 1\n" /* movl r3 = 0x8000000000000000 */
+ ";;\n"
+ "mov rr[r3] = r14\n");
+
+extern unsigned long ia64_native_getreg_func(int regnum);
+asm(".global ia64_native_getreg_func\n");
+#define __DEFINE_GET_REG(id, reg) \
+ "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \
+ ";;\n" \
+ "cmp.eq p6, p0 = r2, r8\n" \
+ ";;\n" \
+ "(p6) mov r8 = " #reg "\n" \
+ "(p6) br.cond.sptk.many b6\n" \
+ ";;\n"
+#define __DEFINE_GET_AR(id, reg) __DEFINE_GET_REG(AR_ ## id, ar.reg)
+#define __DEFINE_GET_CR(id, reg) __DEFINE_GET_REG(CR_ ## id, cr.reg)
+
+__DEFINE_FUNC(getreg,
+ __DEFINE_GET_REG(GP, gp)
+ /*__DEFINE_GET_REG(IP, ip)*/ /* returned ip value shouldn't be constant */
+ __DEFINE_GET_REG(PSR, psr)
+ __DEFINE_GET_REG(TP, tp)
+ __DEFINE_GET_REG(SP, sp)
+
+ __DEFINE_GET_REG(AR_KR0, ar0)
+ __DEFINE_GET_REG(AR_KR1, ar1)
+ __DEFINE_GET_REG(AR_KR2, ar2)
+ __DEFINE_GET_REG(AR_KR3, ar3)
+ __DEFINE_GET_REG(AR_KR4, ar4)
+ __DEFINE_GET_REG(AR_KR5, ar5)
+ __DEFINE_GET_REG(AR_KR6, ar6)
+ __DEFINE_GET_REG(AR_KR7, ar7)
+ __DEFINE_GET_AR(RSC, rsc)
+ __DEFINE_GET_AR(BSP, bsp)
+ __DEFINE_GET_AR(BSPSTORE, bspstore)
+ __DEFINE_GET_AR(RNAT, rnat)
+ __DEFINE_GET_AR(FCR, fcr)
+ __DEFINE_GET_AR(EFLAG, eflag)
+ __DEFINE_GET_AR(CSD, csd)
+ __DEFINE_GET_AR(SSD, ssd)
+ __DEFINE_GET_REG(AR_CFLAG, ar27)
+ __DEFINE_GET_AR(FSR, fsr)
+ __DEFINE_GET_AR(FIR, fir)
+ __DEFINE_GET_AR(FDR, fdr)
+ __DEFINE_GET_AR(CCV, ccv)
+ __DEFINE_GET_AR(UNAT, unat)
+ __DEFINE_GET_AR(FPSR, fpsr)
+ __DEFINE_GET_AR(ITC, itc)
+ __DEFINE_GET_AR(PFS, pfs)
+ __DEFINE_GET_AR(LC, lc)
+ __DEFINE_GET_AR(EC, ec)
+
+ __DEFINE_GET_CR(DCR, dcr)
+ __DEFINE_GET_CR(ITM, itm)
+ __DEFINE_GET_CR(IVA, iva)
+ __DEFINE_GET_CR(PTA, pta)
+ __DEFINE_GET_CR(IPSR, ipsr)
+ __DEFINE_GET_CR(ISR, isr)
+ __DEFINE_GET_CR(IIP, iip)
+ __DEFINE_GET_CR(IFA, ifa)
+ __DEFINE_GET_CR(ITIR, itir)
+ __DEFINE_GET_CR(IIPA, iipa)
+ __DEFINE_GET_CR(IFS, ifs)
+ __DEFINE_GET_CR(IIM, iim)
+ __DEFINE_GET_CR(IHA, iha)
+ __DEFINE_GET_CR(LID, lid)
+ __DEFINE_GET_CR(IVR, ivr)
+ __DEFINE_GET_CR(TPR, tpr)
+ __DEFINE_GET_CR(EOI, eoi)
+ __DEFINE_GET_CR(IRR0, irr0)
+ __DEFINE_GET_CR(IRR1, irr1)
+ __DEFINE_GET_CR(IRR2, irr2)
+ __DEFINE_GET_CR(IRR3, irr3)
+ __DEFINE_GET_CR(ITV, itv)
+ __DEFINE_GET_CR(PMV, pmv)
+ __DEFINE_GET_CR(CMCV, cmcv)
+ __DEFINE_GET_CR(LRR0, lrr0)
+ __DEFINE_GET_CR(LRR1, lrr1)
+
+ "mov r8 = -1\n" /* unsupported case */
+ );
+
+extern void ia64_native_setreg_func(int regnum, unsigned long val);
+asm(".global ia64_native_setreg_func\n");
+#define __DEFINE_SET_REG(id, reg) \
+ "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \
+ ";;\n" \
+ "cmp.eq p6, p0 = r2, r9\n" \
+ ";;\n" \
+ "(p6) mov " #reg " = r8\n" \
+ "(p6) br.cond.sptk.many b6\n" \
+ ";;\n"
+#define __DEFINE_SET_AR(id, reg) __DEFINE_SET_REG(AR_ ## id, ar.reg)
+#define __DEFINE_SET_CR(id, reg) __DEFINE_SET_REG(CR_ ## id, cr.reg)
+__DEFINE_FUNC(setreg,
+ "mov r2 = " __stringify(_IA64_REG_PSR_L) "\n"
+ ";;\n"
+ "cmp.eq p6, p0 = r2, r9\n"
+ ";;\n"
+ "(p6) mov psr.l = r8\n"
+#ifdef HAVE_SERIALIZE_DIRECTIVE
+ ".serialize.data\n"
+#endif
+ "(p6) br.cond.sptk.many b6\n"
+ __DEFINE_SET_REG(GP, gp)
+ __DEFINE_SET_REG(SP, sp)
+
+ __DEFINE_SET_REG(AR_KR0, ar0)
+ __DEFINE_SET_REG(AR_KR1, ar1)
+ __DEFINE_SET_REG(AR_KR2, ar2)
+ __DEFINE_SET_REG(AR_KR3, ar3)
+ __DEFINE_SET_REG(AR_KR4, ar4)
+ __DEFINE_SET_REG(AR_KR5, ar5)
+ __DEFINE_SET_REG(AR_KR6, ar6)
+ __DEFINE_SET_REG(AR_KR7, ar7)
+ __DEFINE_SET_AR(RSC, rsc)
+ __DEFINE_SET_AR(BSP, bsp)
+ __DEFINE_SET_AR(BSPSTORE, bspstore)
+ __DEFINE_SET_AR(RNAT, rnat)
+ __DEFINE_SET_AR(FCR, fcr)
+ __DEFINE_SET_AR(EFLAG, eflag)
+ __DEFINE_SET_AR(CSD, csd)
+ __DEFINE_SET_AR(SSD, ssd)
+ __DEFINE_SET_REG(AR_CFLAG, ar27)
+ __DEFINE_SET_AR(FSR, fsr)
+ __DEFINE_SET_AR(FIR, fir)
+ __DEFINE_SET_AR(FDR, fdr)
+ __DEFINE_SET_AR(CCV, ccv)
+ __DEFINE_SET_AR(UNAT, unat)
+ __DEFINE_SET_AR(FPSR, fpsr)
+ __DEFINE_SET_AR(ITC, itc)
+ __DEFINE_SET_AR(PFS, pfs)
+ __DEFINE_SET_AR(LC, lc)
+ __DEFINE_SET_AR(EC, ec)
+
+ __DEFINE_SET_CR(DCR, dcr)
+ __DEFINE_SET_CR(ITM, itm)
+ __DEFINE_SET_CR(IVA, iva)
+ __DEFINE_SET_CR(PTA, pta)
+ __DEFINE_SET_CR(IPSR, ipsr)
+ __DEFINE_SET_CR(ISR, isr)
+ __DEFINE_SET_CR(IIP, iip)
+ __DEFINE_SET_CR(IFA, ifa)
+ __DEFINE_SET_CR(ITIR, itir)
+ __DEFINE_SET_CR(IIPA, iipa)
+ __DEFINE_SET_CR(IFS, ifs)
+ __DEFINE_SET_CR(IIM, iim)
+ __DEFINE_SET_CR(IHA, iha)
+ __DEFINE_SET_CR(LID, lid)
+ __DEFINE_SET_CR(IVR, ivr)
+ __DEFINE_SET_CR(TPR, tpr)
+ __DEFINE_SET_CR(EOI, eoi)
+ __DEFINE_SET_CR(IRR0, irr0)
+ __DEFINE_SET_CR(IRR1, irr1)
+ __DEFINE_SET_CR(IRR2, irr2)
+ __DEFINE_SET_CR(IRR3, irr3)
+ __DEFINE_SET_CR(ITV, itv)
+ __DEFINE_SET_CR(PMV, pmv)
+ __DEFINE_SET_CR(CMCV, cmcv)
+ __DEFINE_SET_CR(LRR0, lrr0)
+ __DEFINE_SET_CR(LRR1, lrr1)
+ );
+#endif
struct pv_cpu_ops pv_cpu_ops = {
.fc = ia64_native_fc_func,
@@ -366,4 +643,258 @@ ia64_native_do_steal_accounting(unsigned long *new_itm)
struct pv_time_ops pv_time_ops = {
.do_steal_accounting = ia64_native_do_steal_accounting,
+ .sched_clock = ia64_native_sched_clock,
+};
+
+/***************************************************************************
+ * binary pacthing
+ * pv_init_ops.patch_bundle
+ */
+
+#ifdef ASM_SUPPORTED
+#define IA64_NATIVE_PATCH_DEFINE_GET_REG(name, reg) \
+ __DEFINE_FUNC(get_ ## name, \
+ ";;\n" \
+ "mov r8 = " #reg "\n" \
+ ";;\n")
+
+#define IA64_NATIVE_PATCH_DEFINE_SET_REG(name, reg) \
+ __DEFINE_FUNC(set_ ## name, \
+ ";;\n" \
+ "mov " #reg " = r8\n" \
+ ";;\n")
+
+#define IA64_NATIVE_PATCH_DEFINE_REG(name, reg) \
+ IA64_NATIVE_PATCH_DEFINE_GET_REG(name, reg); \
+ IA64_NATIVE_PATCH_DEFINE_SET_REG(name, reg) \
+
+#define IA64_NATIVE_PATCH_DEFINE_AR(name, reg) \
+ IA64_NATIVE_PATCH_DEFINE_REG(ar_ ## name, ar.reg)
+
+#define IA64_NATIVE_PATCH_DEFINE_CR(name, reg) \
+ IA64_NATIVE_PATCH_DEFINE_REG(cr_ ## name, cr.reg)
+
+
+IA64_NATIVE_PATCH_DEFINE_GET_REG(psr, psr);
+IA64_NATIVE_PATCH_DEFINE_GET_REG(tp, tp);
+
+/* IA64_NATIVE_PATCH_DEFINE_SET_REG(psr_l, psr.l); */
+__DEFINE_FUNC(set_psr_l,
+ ";;\n"
+ "mov psr.l = r8\n"
+#ifdef HAVE_SERIALIZE_DIRECTIVE
+ ".serialize.data\n"
+#endif
+ ";;\n");
+
+IA64_NATIVE_PATCH_DEFINE_REG(gp, gp);
+IA64_NATIVE_PATCH_DEFINE_REG(sp, sp);
+
+IA64_NATIVE_PATCH_DEFINE_REG(kr0, ar0);
+IA64_NATIVE_PATCH_DEFINE_REG(kr1, ar1);
+IA64_NATIVE_PATCH_DEFINE_REG(kr2, ar2);
+IA64_NATIVE_PATCH_DEFINE_REG(kr3, ar3);
+IA64_NATIVE_PATCH_DEFINE_REG(kr4, ar4);
+IA64_NATIVE_PATCH_DEFINE_REG(kr5, ar5);
+IA64_NATIVE_PATCH_DEFINE_REG(kr6, ar6);
+IA64_NATIVE_PATCH_DEFINE_REG(kr7, ar7);
+
+IA64_NATIVE_PATCH_DEFINE_AR(rsc, rsc);
+IA64_NATIVE_PATCH_DEFINE_AR(bsp, bsp);
+IA64_NATIVE_PATCH_DEFINE_AR(bspstore, bspstore);
+IA64_NATIVE_PATCH_DEFINE_AR(rnat, rnat);
+IA64_NATIVE_PATCH_DEFINE_AR(fcr, fcr);
+IA64_NATIVE_PATCH_DEFINE_AR(eflag, eflag);
+IA64_NATIVE_PATCH_DEFINE_AR(csd, csd);
+IA64_NATIVE_PATCH_DEFINE_AR(ssd, ssd);
+IA64_NATIVE_PATCH_DEFINE_REG(ar27, ar27);
+IA64_NATIVE_PATCH_DEFINE_AR(fsr, fsr);
+IA64_NATIVE_PATCH_DEFINE_AR(fir, fir);
+IA64_NATIVE_PATCH_DEFINE_AR(fdr, fdr);
+IA64_NATIVE_PATCH_DEFINE_AR(ccv, ccv);
+IA64_NATIVE_PATCH_DEFINE_AR(unat, unat);
+IA64_NATIVE_PATCH_DEFINE_AR(fpsr, fpsr);
+IA64_NATIVE_PATCH_DEFINE_AR(itc, itc);
+IA64_NATIVE_PATCH_DEFINE_AR(pfs, pfs);
+IA64_NATIVE_PATCH_DEFINE_AR(lc, lc);
+IA64_NATIVE_PATCH_DEFINE_AR(ec, ec);
+
+IA64_NATIVE_PATCH_DEFINE_CR(dcr, dcr);
+IA64_NATIVE_PATCH_DEFINE_CR(itm, itm);
+IA64_NATIVE_PATCH_DEFINE_CR(iva, iva);
+IA64_NATIVE_PATCH_DEFINE_CR(pta, pta);
+IA64_NATIVE_PATCH_DEFINE_CR(ipsr, ipsr);
+IA64_NATIVE_PATCH_DEFINE_CR(isr, isr);
+IA64_NATIVE_PATCH_DEFINE_CR(iip, iip);
+IA64_NATIVE_PATCH_DEFINE_CR(ifa, ifa);
+IA64_NATIVE_PATCH_DEFINE_CR(itir, itir);
+IA64_NATIVE_PATCH_DEFINE_CR(iipa, iipa);
+IA64_NATIVE_PATCH_DEFINE_CR(ifs, ifs);
+IA64_NATIVE_PATCH_DEFINE_CR(iim, iim);
+IA64_NATIVE_PATCH_DEFINE_CR(iha, iha);
+IA64_NATIVE_PATCH_DEFINE_CR(lid, lid);
+IA64_NATIVE_PATCH_DEFINE_CR(ivr, ivr);
+IA64_NATIVE_PATCH_DEFINE_CR(tpr, tpr);
+IA64_NATIVE_PATCH_DEFINE_CR(eoi, eoi);
+IA64_NATIVE_PATCH_DEFINE_CR(irr0, irr0);
+IA64_NATIVE_PATCH_DEFINE_CR(irr1, irr1);
+IA64_NATIVE_PATCH_DEFINE_CR(irr2, irr2);
+IA64_NATIVE_PATCH_DEFINE_CR(irr3, irr3);
+IA64_NATIVE_PATCH_DEFINE_CR(itv, itv);
+IA64_NATIVE_PATCH_DEFINE_CR(pmv, pmv);
+IA64_NATIVE_PATCH_DEFINE_CR(cmcv, cmcv);
+IA64_NATIVE_PATCH_DEFINE_CR(lrr0, lrr0);
+IA64_NATIVE_PATCH_DEFINE_CR(lrr1, lrr1);
+
+static const struct paravirt_patch_bundle_elem ia64_native_patch_bundle_elems[]
+__initdata_or_module =
+{
+#define IA64_NATIVE_PATCH_BUNDLE_ELEM(name, type) \
+ { \
+ (void*)ia64_native_ ## name ## _direct_start, \
+ (void*)ia64_native_ ## name ## _direct_end, \
+ PARAVIRT_PATCH_TYPE_ ## type, \
+ }
+
+ IA64_NATIVE_PATCH_BUNDLE_ELEM(fc, FC),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM(thash, THASH),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM(get_cpuid, GET_CPUID),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM(get_pmd, GET_PMD),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM(ptcga, PTCGA),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM(get_rr, GET_RR),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM(set_rr, SET_RR),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM(set_rr0_to_rr4, SET_RR0_TO_RR4),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM(ssm_i, SSM_I),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM(rsm_i, RSM_I),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM(get_psr_i, GET_PSR_I),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM(intrin_local_irq_restore,
+ INTRIN_LOCAL_IRQ_RESTORE),
+
+#define IA64_NATIVE_PATCH_BUNDLE_ELEM_GETREG(name, reg) \
+ { \
+ (void*)ia64_native_get_ ## name ## _direct_start, \
+ (void*)ia64_native_get_ ## name ## _direct_end, \
+ PARAVIRT_PATCH_TYPE_GETREG + _IA64_REG_ ## reg, \
+ }
+
+#define IA64_NATIVE_PATCH_BUNDLE_ELEM_SETREG(name, reg) \
+ { \
+ (void*)ia64_native_set_ ## name ## _direct_start, \
+ (void*)ia64_native_set_ ## name ## _direct_end, \
+ PARAVIRT_PATCH_TYPE_SETREG + _IA64_REG_ ## reg, \
+ }
+
+#define IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(name, reg) \
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_GETREG(name, reg), \
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_SETREG(name, reg) \
+
+#define IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(name, reg) \
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(ar_ ## name, AR_ ## reg)
+
+#define IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(name, reg) \
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(cr_ ## name, CR_ ## reg)
+
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_GETREG(psr, PSR),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_GETREG(tp, TP),
+
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_SETREG(psr_l, PSR_L),
+
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(gp, GP),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(sp, SP),
+
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr0, AR_KR0),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr1, AR_KR1),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr2, AR_KR2),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr3, AR_KR3),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr4, AR_KR4),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr5, AR_KR5),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr6, AR_KR6),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr7, AR_KR7),
+
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(rsc, RSC),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(bsp, BSP),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(bspstore, BSPSTORE),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(rnat, RNAT),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fcr, FCR),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(eflag, EFLAG),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(csd, CSD),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(ssd, SSD),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(ar27, AR_CFLAG),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fsr, FSR),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fir, FIR),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fdr, FDR),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(ccv, CCV),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(unat, UNAT),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fpsr, FPSR),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(itc, ITC),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(pfs, PFS),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(lc, LC),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(ec, EC),
+
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(dcr, DCR),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(itm, ITM),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iva, IVA),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(pta, PTA),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(ipsr, IPSR),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(isr, ISR),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iip, IIP),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(ifa, IFA),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(itir, ITIR),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iipa, IIPA),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(ifs, IFS),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iim, IIM),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iha, IHA),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(lid, LID),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(ivr, IVR),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(tpr, TPR),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(eoi, EOI),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(irr0, IRR0),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(irr1, IRR1),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(irr2, IRR2),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(irr3, IRR3),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(itv, ITV),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(pmv, PMV),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(cmcv, CMCV),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(lrr0, LRR0),
+ IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(lrr1, LRR1),
};
+
+unsigned long __init_or_module
+ia64_native_patch_bundle(void *sbundle, void *ebundle, unsigned long type)
+{
+ const unsigned long nelems = sizeof(ia64_native_patch_bundle_elems) /
+ sizeof(ia64_native_patch_bundle_elems[0]);
+
+ return __paravirt_patch_apply_bundle(sbundle, ebundle, type,
+ ia64_native_patch_bundle_elems,
+ nelems, NULL);
+}
+#endif /* ASM_SUPPOTED */
+
+extern const char ia64_native_switch_to[];
+extern const char ia64_native_leave_syscall[];
+extern const char ia64_native_work_processed_syscall[];
+extern const char ia64_native_leave_kernel[];
+
+const struct paravirt_patch_branch_target ia64_native_branch_target[]
+__initconst = {
+#define PARAVIRT_BR_TARGET(name, type) \
+ { \
+ ia64_native_ ## name, \
+ PARAVIRT_PATCH_TYPE_BR_ ## type, \
+ }
+ PARAVIRT_BR_TARGET(switch_to, SWITCH_TO),
+ PARAVIRT_BR_TARGET(leave_syscall, LEAVE_SYSCALL),
+ PARAVIRT_BR_TARGET(work_processed_syscall, WORK_PROCESSED_SYSCALL),
+ PARAVIRT_BR_TARGET(leave_kernel, LEAVE_KERNEL),
+};
+
+static void __init
+ia64_native_patch_branch(unsigned long tag, unsigned long type)
+{
+ const unsigned long nelem =
+ sizeof(ia64_native_branch_target) /
+ sizeof(ia64_native_branch_target[0]);
+ __paravirt_patch_apply_branch(tag, type,
+ ia64_native_branch_target, nelem);
+}
diff --git a/arch/ia64/kernel/paravirt_patch.c b/arch/ia64/kernel/paravirt_patch.c
new file mode 100644
index 00000000000..bfdfef1b1ff
--- /dev/null
+++ b/arch/ia64/kernel/paravirt_patch.c
@@ -0,0 +1,514 @@
+/******************************************************************************
+ * linux/arch/ia64/xen/paravirt_patch.c
+ *
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ * VA Linux Systems Japan K.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/init.h>
+#include <asm/intrinsics.h>
+#include <asm/kprobes.h>
+#include <asm/paravirt.h>
+#include <asm/paravirt_patch.h>
+
+typedef union ia64_inst {
+ struct {
+ unsigned long long qp : 6;
+ unsigned long long : 31;
+ unsigned long long opcode : 4;
+ unsigned long long reserved : 23;
+ } generic;
+ unsigned long long l;
+} ia64_inst_t;
+
+/*
+ * flush_icache_range() can't be used here.
+ * we are here before cpu_init() which initializes
+ * ia64_i_cache_stride_shift. flush_icache_range() uses it.
+ */
+void __init_or_module
+paravirt_flush_i_cache_range(const void *instr, unsigned long size)
+{
+ extern void paravirt_fc_i(const void *addr);
+ unsigned long i;
+
+ for (i = 0; i < size; i += sizeof(bundle_t))
+ paravirt_fc_i(instr + i);
+}
+
+bundle_t* __init_or_module
+paravirt_get_bundle(unsigned long tag)
+{
+ return (bundle_t *)(tag & ~3UL);
+}
+
+unsigned long __init_or_module
+paravirt_get_slot(unsigned long tag)
+{
+ return tag & 3UL;
+}
+
+unsigned long __init_or_module
+paravirt_get_num_inst(unsigned long stag, unsigned long etag)
+{
+ bundle_t *sbundle = paravirt_get_bundle(stag);
+ unsigned long sslot = paravirt_get_slot(stag);
+ bundle_t *ebundle = paravirt_get_bundle(etag);
+ unsigned long eslot = paravirt_get_slot(etag);
+
+ return (ebundle - sbundle) * 3 + eslot - sslot + 1;
+}
+
+unsigned long __init_or_module
+paravirt_get_next_tag(unsigned long tag)
+{
+ unsigned long slot = paravirt_get_slot(tag);
+
+ switch (slot) {
+ case 0:
+ case 1:
+ return tag + 1;
+ case 2: {
+ bundle_t *bundle = paravirt_get_bundle(tag);
+ return (unsigned long)(bundle + 1);
+ }
+ default:
+ BUG();
+ }
+ /* NOTREACHED */
+}
+
+ia64_inst_t __init_or_module
+paravirt_read_slot0(const bundle_t *bundle)
+{
+ ia64_inst_t inst;
+ inst.l = bundle->quad0.slot0;
+ return inst;
+}
+
+ia64_inst_t __init_or_module
+paravirt_read_slot1(const bundle_t *bundle)
+{
+ ia64_inst_t inst;
+ inst.l = bundle->quad0.slot1_p0 |
+ ((unsigned long long)bundle->quad1.slot1_p1 << 18UL);
+ return inst;
+}
+
+ia64_inst_t __init_or_module
+paravirt_read_slot2(const bundle_t *bundle)
+{
+ ia64_inst_t inst;
+ inst.l = bundle->quad1.slot2;
+ return inst;
+}
+
+ia64_inst_t __init_or_module
+paravirt_read_inst(unsigned long tag)
+{
+ bundle_t *bundle = paravirt_get_bundle(tag);
+ unsigned long slot = paravirt_get_slot(tag);
+
+ switch (slot) {
+ case 0:
+ return paravirt_read_slot0(bundle);
+ case 1:
+ return paravirt_read_slot1(bundle);
+ case 2:
+ return paravirt_read_slot2(bundle);
+ default:
+ BUG();
+ }
+ /* NOTREACHED */
+}
+
+void __init_or_module
+paravirt_write_slot0(bundle_t *bundle, ia64_inst_t inst)
+{
+ bundle->quad0.slot0 = inst.l;
+}
+
+void __init_or_module
+paravirt_write_slot1(bundle_t *bundle, ia64_inst_t inst)
+{
+ bundle->quad0.slot1_p0 = inst.l;
+ bundle->quad1.slot1_p1 = inst.l >> 18UL;
+}
+
+void __init_or_module
+paravirt_write_slot2(bundle_t *bundle, ia64_inst_t inst)
+{
+ bundle->quad1.slot2 = inst.l;
+}
+
+void __init_or_module
+paravirt_write_inst(unsigned long tag, ia64_inst_t inst)
+{
+ bundle_t *bundle = paravirt_get_bundle(tag);
+ unsigned long slot = paravirt_get_slot(tag);
+
+ switch (slot) {
+ case 0:
+ paravirt_write_slot0(bundle, inst);
+ break;
+ case 1:
+ paravirt_write_slot1(bundle, inst);
+ break;
+ case 2:
+ paravirt_write_slot2(bundle, inst);
+ break;
+ default:
+ BUG();
+ break;
+ }
+ paravirt_flush_i_cache_range(bundle, sizeof(*bundle));
+}
+
+/* for debug */
+void
+paravirt_print_bundle(const bundle_t *bundle)
+{
+ const unsigned long *quad = (const unsigned long *)bundle;
+ ia64_inst_t slot0 = paravirt_read_slot0(bundle);
+ ia64_inst_t slot1 = paravirt_read_slot1(bundle);
+ ia64_inst_t slot2 = paravirt_read_slot2(bundle);
+
+ printk(KERN_DEBUG
+ "bundle 0x%p 0x%016lx 0x%016lx\n", bundle, quad[0], quad[1]);
+ printk(KERN_DEBUG
+ "bundle template 0x%x\n",
+ bundle->quad0.template);
+ printk(KERN_DEBUG
+ "slot0 0x%lx slot1_p0 0x%lx slot1_p1 0x%lx slot2 0x%lx\n",
+ (unsigned long)bundle->quad0.slot0,
+ (unsigned long)bundle->quad0.slot1_p0,
+ (unsigned long)bundle->quad1.slot1_p1,
+ (unsigned long)bundle->quad1.slot2);
+ printk(KERN_DEBUG
+ "slot0 0x%016llx slot1 0x%016llx slot2 0x%016llx\n",
+ slot0.l, slot1.l, slot2.l);
+}
+
+static int noreplace_paravirt __init_or_module = 0;
+
+static int __init setup_noreplace_paravirt(char *str)
+{
+ noreplace_paravirt = 1;
+ return 1;
+}
+__setup("noreplace-paravirt", setup_noreplace_paravirt);
+
+#ifdef ASM_SUPPORTED
+static void __init_or_module
+fill_nop_bundle(void *sbundle, void *ebundle)
+{
+ extern const char paravirt_nop_bundle[];
+ extern const unsigned long paravirt_nop_bundle_size;
+
+ void *bundle = sbundle;
+
+ BUG_ON((((unsigned long)sbundle) % sizeof(bundle_t)) != 0);
+ BUG_ON((((unsigned long)ebundle) % sizeof(bundle_t)) != 0);
+
+ while (bundle < ebundle) {
+ memcpy(bundle, paravirt_nop_bundle, paravirt_nop_bundle_size);
+
+ bundle += paravirt_nop_bundle_size;
+ }
+}
+
+/* helper function */
+unsigned long __init_or_module
+__paravirt_patch_apply_bundle(void *sbundle, void *ebundle, unsigned long type,
+ const struct paravirt_patch_bundle_elem *elems,
+ unsigned long nelems,
+ const struct paravirt_patch_bundle_elem **found)
+{
+ unsigned long used = 0;
+ unsigned long i;
+
+ BUG_ON((((unsigned long)sbundle) % sizeof(bundle_t)) != 0);
+ BUG_ON((((unsigned long)ebundle) % sizeof(bundle_t)) != 0);
+
+ found = NULL;
+ for (i = 0; i < nelems; i++) {
+ const struct paravirt_patch_bundle_elem *p = &elems[i];
+ if (p->type == type) {
+ unsigned long need = p->ebundle - p->sbundle;
+ unsigned long room = ebundle - sbundle;
+
+ if (found != NULL)
+ *found = p;
+
+ if (room < need) {
+ /* no room to replace. skip it */
+ printk(KERN_DEBUG
+ "the space is too small to put "
+ "bundles. type %ld need %ld room %ld\n",
+ type, need, room);
+ break;
+ }
+
+ used = need;
+ memcpy(sbundle, p->sbundle, used);
+ break;
+ }
+ }
+
+ return used;
+}
+
+void __init_or_module
+paravirt_patch_apply_bundle(const struct paravirt_patch_site_bundle *start,
+ const struct paravirt_patch_site_bundle *end)
+{
+ const struct paravirt_patch_site_bundle *p;
+
+ if (noreplace_paravirt)
+ return;
+ if (pv_init_ops.patch_bundle == NULL)
+ return;
+
+ for (p = start; p < end; p++) {
+ unsigned long used;
+
+ used = (*pv_init_ops.patch_bundle)(p->sbundle, p->ebundle,
+ p->type);
+ if (used == 0)
+ continue;
+
+ fill_nop_bundle(p->sbundle + used, p->ebundle);
+ paravirt_flush_i_cache_range(p->sbundle,
+ p->ebundle - p->sbundle);
+ }
+ ia64_sync_i();
+ ia64_srlz_i();
+}
+
+/*
+ * nop.i, nop.m, nop.f instruction are same format.
+ * but nop.b has differennt format.
+ * This doesn't support nop.b for now.
+ */
+static void __init_or_module
+fill_nop_inst(unsigned long stag, unsigned long etag)
+{
+ extern const bundle_t paravirt_nop_mfi_inst_bundle[];
+ unsigned long tag;
+ const ia64_inst_t nop_inst =
+ paravirt_read_slot0(paravirt_nop_mfi_inst_bundle);
+
+ for (tag = stag; tag < etag; tag = paravirt_get_next_tag(tag))
+ paravirt_write_inst(tag, nop_inst);
+}
+
+void __init_or_module
+paravirt_patch_apply_inst(const struct paravirt_patch_site_inst *start,
+ const struct paravirt_patch_site_inst *end)
+{
+ const struct paravirt_patch_site_inst *p;
+
+ if (noreplace_paravirt)
+ return;
+ if (pv_init_ops.patch_inst == NULL)
+ return;
+
+ for (p = start; p < end; p++) {
+ unsigned long tag;
+ bundle_t *sbundle;
+ bundle_t *ebundle;
+
+ tag = (*pv_init_ops.patch_inst)(p->stag, p->etag, p->type);
+ if (tag == p->stag)
+ continue;
+
+ fill_nop_inst(tag, p->etag);
+ sbundle = paravirt_get_bundle(p->stag);
+ ebundle = paravirt_get_bundle(p->etag) + 1;
+ paravirt_flush_i_cache_range(sbundle, (ebundle - sbundle) *
+ sizeof(bundle_t));
+ }
+ ia64_sync_i();
+ ia64_srlz_i();
+}
+#endif /* ASM_SUPPOTED */
+
+/* brl.cond.sptk.many <target64> X3 */
+typedef union inst_x3_op {
+ ia64_inst_t inst;
+ struct {
+ unsigned long qp: 6;
+ unsigned long btyp: 3;
+ unsigned long unused: 3;
+ unsigned long p: 1;
+ unsigned long imm20b: 20;
+ unsigned long wh: 2;
+ unsigned long d: 1;
+ unsigned long i: 1;
+ unsigned long opcode: 4;
+ };
+ unsigned long l;
+} inst_x3_op_t;
+
+typedef union inst_x3_imm {
+ ia64_inst_t inst;
+ struct {
+ unsigned long unused: 2;
+ unsigned long imm39: 39;
+ };
+ unsigned long l;
+} inst_x3_imm_t;
+
+void __init_or_module
+paravirt_patch_reloc_brl(unsigned long tag, const void *target)
+{
+ unsigned long tag_op = paravirt_get_next_tag(tag);
+ unsigned long tag_imm = tag;
+ bundle_t *bundle = paravirt_get_bundle(tag);
+
+ ia64_inst_t inst_op = paravirt_read_inst(tag_op);
+ ia64_inst_t inst_imm = paravirt_read_inst(tag_imm);
+
+ inst_x3_op_t inst_x3_op = { .l = inst_op.l };
+ inst_x3_imm_t inst_x3_imm = { .l = inst_imm.l };
+
+ unsigned long imm60 =
+ ((unsigned long)target - (unsigned long)bundle) >> 4;
+
+ BUG_ON(paravirt_get_slot(tag) != 1); /* MLX */
+ BUG_ON(((unsigned long)target & (sizeof(bundle_t) - 1)) != 0);
+
+ /* imm60[59] 1bit */
+ inst_x3_op.i = (imm60 >> 59) & 1;
+ /* imm60[19:0] 20bit */
+ inst_x3_op.imm20b = imm60 & ((1UL << 20) - 1);
+ /* imm60[58:20] 39bit */
+ inst_x3_imm.imm39 = (imm60 >> 20) & ((1UL << 39) - 1);
+
+ inst_op.l = inst_x3_op.l;
+ inst_imm.l = inst_x3_imm.l;
+
+ paravirt_write_inst(tag_op, inst_op);
+ paravirt_write_inst(tag_imm, inst_imm);
+}
+
+/* br.cond.sptk.many <target25> B1 */
+typedef union inst_b1 {
+ ia64_inst_t inst;
+ struct {
+ unsigned long qp: 6;
+ unsigned long btype: 3;
+ unsigned long unused: 3;
+ unsigned long p: 1;
+ unsigned long imm20b: 20;
+ unsigned long wh: 2;
+ unsigned long d: 1;
+ unsigned long s: 1;
+ unsigned long opcode: 4;
+ };
+ unsigned long l;
+} inst_b1_t;
+
+void __init
+paravirt_patch_reloc_br(unsigned long tag, const void *target)
+{
+ bundle_t *bundle = paravirt_get_bundle(tag);
+ ia64_inst_t inst = paravirt_read_inst(tag);
+ unsigned long target25 = (unsigned long)target - (unsigned long)bundle;
+ inst_b1_t inst_b1;
+
+ BUG_ON(((unsigned long)target & (sizeof(bundle_t) - 1)) != 0);
+
+ inst_b1.l = inst.l;
+ if (target25 & (1UL << 63))
+ inst_b1.s = 1;
+ else
+ inst_b1.s = 0;
+
+ inst_b1.imm20b = target25 >> 4;
+ inst.l = inst_b1.l;
+
+ paravirt_write_inst(tag, inst);
+}
+
+void __init
+__paravirt_patch_apply_branch(
+ unsigned long tag, unsigned long type,
+ const struct paravirt_patch_branch_target *entries,
+ unsigned int nr_entries)
+{
+ unsigned int i;
+ for (i = 0; i < nr_entries; i++) {
+ if (entries[i].type == type) {
+ paravirt_patch_reloc_br(tag, entries[i].entry);
+ break;
+ }
+ }
+}
+
+static void __init
+paravirt_patch_apply_branch(const struct paravirt_patch_site_branch *start,
+ const struct paravirt_patch_site_branch *end)
+{
+ const struct paravirt_patch_site_branch *p;
+
+ if (noreplace_paravirt)
+ return;
+ if (pv_init_ops.patch_branch == NULL)
+ return;
+
+ for (p = start; p < end; p++)
+ (*pv_init_ops.patch_branch)(p->tag, p->type);
+
+ ia64_sync_i();
+ ia64_srlz_i();
+}
+
+void __init
+paravirt_patch_apply(void)
+{
+ extern const char __start_paravirt_bundles[];
+ extern const char __stop_paravirt_bundles[];
+ extern const char __start_paravirt_insts[];
+ extern const char __stop_paravirt_insts[];
+ extern const char __start_paravirt_branches[];
+ extern const char __stop_paravirt_branches[];
+
+ paravirt_patch_apply_bundle((const struct paravirt_patch_site_bundle *)
+ __start_paravirt_bundles,
+ (const struct paravirt_patch_site_bundle *)
+ __stop_paravirt_bundles);
+ paravirt_patch_apply_inst((const struct paravirt_patch_site_inst *)
+ __start_paravirt_insts,
+ (const struct paravirt_patch_site_inst *)
+ __stop_paravirt_insts);
+ paravirt_patch_apply_branch((const struct paravirt_patch_site_branch *)
+ __start_paravirt_branches,
+ (const struct paravirt_patch_site_branch *)
+ __stop_paravirt_branches);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "linux"
+ * c-basic-offset: 8
+ * tab-width: 8
+ * indent-tabs-mode: t
+ * End:
+ */
diff --git a/arch/ia64/kernel/paravirt_patchlist.c b/arch/ia64/kernel/paravirt_patchlist.c
new file mode 100644
index 00000000000..b28082a95d4
--- /dev/null
+++ b/arch/ia64/kernel/paravirt_patchlist.c
@@ -0,0 +1,79 @@
+/******************************************************************************
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ * VA Linux Systems Japan K.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/bug.h>
+#include <asm/paravirt.h>
+
+#define DECLARE(name) \
+ extern unsigned long \
+ __ia64_native_start_gate_##name##_patchlist[]; \
+ extern unsigned long \
+ __ia64_native_end_gate_##name##_patchlist[]
+
+DECLARE(fsyscall);
+DECLARE(brl_fsys_bubble_down);
+DECLARE(vtop);
+DECLARE(mckinley_e9);
+
+extern unsigned long __start_gate_section[];
+
+#define ASSIGN(name) \
+ .start_##name##_patchlist = \
+ (unsigned long)__ia64_native_start_gate_##name##_patchlist, \
+ .end_##name##_patchlist = \
+ (unsigned long)__ia64_native_end_gate_##name##_patchlist
+
+struct pv_patchdata pv_patchdata __initdata = {
+ ASSIGN(fsyscall),
+ ASSIGN(brl_fsys_bubble_down),
+ ASSIGN(vtop),
+ ASSIGN(mckinley_e9),
+
+ .gate_section = (void*)__start_gate_section,
+};
+
+
+unsigned long __init
+paravirt_get_gate_patchlist(enum pv_gate_patchlist type)
+{
+
+#define CASE(NAME, name) \
+ case PV_GATE_START_##NAME: \
+ return pv_patchdata.start_##name##_patchlist; \
+ case PV_GATE_END_##NAME: \
+ return pv_patchdata.end_##name##_patchlist; \
+
+ switch (type) {
+ CASE(FSYSCALL, fsyscall);
+ CASE(BRL_FSYS_BUBBLE_DOWN, brl_fsys_bubble_down);
+ CASE(VTOP, vtop);
+ CASE(MCKINLEY_E9, mckinley_e9);
+ default:
+ BUG();
+ break;
+ }
+ return 0;
+}
+
+void * __init
+paravirt_get_gate_section(void)
+{
+ return pv_patchdata.gate_section;
+}
diff --git a/arch/ia64/kernel/paravirt_patchlist.h b/arch/ia64/kernel/paravirt_patchlist.h
new file mode 100644
index 00000000000..0684aa6c650
--- /dev/null
+++ b/arch/ia64/kernel/paravirt_patchlist.h
@@ -0,0 +1,28 @@
+/******************************************************************************
+ * linux/arch/ia64/xen/paravirt_patchlist.h
+ *
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
+ * VA Linux Systems Japan K.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#if defined(__IA64_GATE_PARAVIRTUALIZED_XEN)
+#include <asm/xen/patchlist.h>
+#else
+#include <asm/native/patchlist.h>
+#endif
+
diff --git a/arch/ia64/kernel/paravirtentry.S b/arch/ia64/kernel/paravirtentry.S
index 2f42fcb9776..6158560d7f1 100644
--- a/arch/ia64/kernel/paravirtentry.S
+++ b/arch/ia64/kernel/paravirtentry.S
@@ -20,8 +20,11 @@
*
*/
+#include <linux/init.h>
#include <asm/asmmacro.h>
#include <asm/asm-offsets.h>
+#include <asm/paravirt_privop.h>
+#include <asm/paravirt_patch.h>
#include "entry.h"
#define DATA8(sym, init_value) \
@@ -32,29 +35,87 @@
data8 init_value ; \
.popsection
-#define BRANCH(targ, reg, breg) \
- movl reg=targ ; \
- ;; \
- ld8 reg=[reg] ; \
- ;; \
- mov breg=reg ; \
+#define BRANCH(targ, reg, breg, type) \
+ PARAVIRT_PATCH_SITE_BR(PARAVIRT_PATCH_TYPE_BR_ ## type) ; \
+ ;; \
+ movl reg=targ ; \
+ ;; \
+ ld8 reg=[reg] ; \
+ ;; \
+ mov breg=reg ; \
br.cond.sptk.many breg
-#define BRANCH_PROC(sym, reg, breg) \
- DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \
- GLOBAL_ENTRY(paravirt_ ## sym) ; \
- BRANCH(paravirt_ ## sym ## _targ, reg, breg) ; \
+#define BRANCH_PROC(sym, reg, breg, type) \
+ DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \
+ GLOBAL_ENTRY(paravirt_ ## sym) ; \
+ BRANCH(paravirt_ ## sym ## _targ, reg, breg, type) ; \
END(paravirt_ ## sym)
-#define BRANCH_PROC_UNWINFO(sym, reg, breg) \
- DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \
- GLOBAL_ENTRY(paravirt_ ## sym) ; \
- PT_REGS_UNWIND_INFO(0) ; \
- BRANCH(paravirt_ ## sym ## _targ, reg, breg) ; \
+#define BRANCH_PROC_UNWINFO(sym, reg, breg, type) \
+ DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \
+ GLOBAL_ENTRY(paravirt_ ## sym) ; \
+ PT_REGS_UNWIND_INFO(0) ; \
+ BRANCH(paravirt_ ## sym ## _targ, reg, breg, type) ; \
END(paravirt_ ## sym)
-BRANCH_PROC(switch_to, r22, b7)
-BRANCH_PROC_UNWINFO(leave_syscall, r22, b7)
-BRANCH_PROC(work_processed_syscall, r2, b7)
-BRANCH_PROC_UNWINFO(leave_kernel, r22, b7)
+BRANCH_PROC(switch_to, r22, b7, SWITCH_TO)
+BRANCH_PROC_UNWINFO(leave_syscall, r22, b7, LEAVE_SYSCALL)
+BRANCH_PROC(work_processed_syscall, r2, b7, WORK_PROCESSED_SYSCALL)
+BRANCH_PROC_UNWINFO(leave_kernel, r22, b7, LEAVE_KERNEL)
+
+
+#ifdef CONFIG_MODULES
+#define __INIT_OR_MODULE .text
+#define __INITDATA_OR_MODULE .data
+#else
+#define __INIT_OR_MODULE __INIT
+#define __INITDATA_OR_MODULE __INITDATA
+#endif /* CONFIG_MODULES */
+
+ __INIT_OR_MODULE
+ GLOBAL_ENTRY(paravirt_fc_i)
+ fc.i r32
+ br.ret.sptk.many rp
+ END(paravirt_fc_i)
+ __FINIT
+
+ __INIT_OR_MODULE
+ .align 32
+ GLOBAL_ENTRY(paravirt_nop_b_inst_bundle)
+ {
+ nop.b 0
+ nop.b 0
+ nop.b 0
+ }
+ END(paravirt_nop_b_inst_bundle)
+ __FINIT
+
+ /* NOTE: nop.[mfi] has same format */
+ __INIT_OR_MODULE
+ GLOBAL_ENTRY(paravirt_nop_mfi_inst_bundle)
+ {
+ nop.m 0
+ nop.f 0
+ nop.i 0
+ }
+ END(paravirt_nop_mfi_inst_bundle)
+ __FINIT
+
+ __INIT_OR_MODULE
+ GLOBAL_ENTRY(paravirt_nop_bundle)
+paravirt_nop_bundle_start:
+ {
+ nop 0
+ nop 0
+ nop 0
+ }
+paravirt_nop_bundle_end:
+ END(paravirt_nop_bundle)
+ __FINIT
+
+ __INITDATA_OR_MODULE
+ .align 8
+ .global paravirt_nop_bundle_size
+paravirt_nop_bundle_size:
+ data8 paravirt_nop_bundle_end - paravirt_nop_bundle_start
diff --git a/arch/ia64/kernel/patch.c b/arch/ia64/kernel/patch.c
index b83b2c51600..68a1311db80 100644
--- a/arch/ia64/kernel/patch.c
+++ b/arch/ia64/kernel/patch.c
@@ -7,6 +7,7 @@
#include <linux/init.h>
#include <linux/string.h>
+#include <asm/paravirt.h>
#include <asm/patch.h>
#include <asm/processor.h>
#include <asm/sections.h>
@@ -169,16 +170,35 @@ ia64_patch_mckinley_e9 (unsigned long start, unsigned long end)
ia64_srlz_i();
}
+extern unsigned long ia64_native_fsyscall_table[NR_syscalls];
+extern char ia64_native_fsys_bubble_down[];
+struct pv_fsys_data pv_fsys_data __initdata = {
+ .fsyscall_table = (unsigned long *)ia64_native_fsyscall_table,
+ .fsys_bubble_down = (void *)ia64_native_fsys_bubble_down,
+};
+
+unsigned long * __init
+paravirt_get_fsyscall_table(void)
+{
+ return pv_fsys_data.fsyscall_table;
+}
+
+char * __init
+paravirt_get_fsys_bubble_down(void)
+{
+ return pv_fsys_data.fsys_bubble_down;
+}
+
static void __init
patch_fsyscall_table (unsigned long start, unsigned long end)
{
- extern unsigned long fsyscall_table[NR_syscalls];
+ u64 fsyscall_table = (u64)paravirt_get_fsyscall_table();
s32 *offp = (s32 *) start;
u64 ip;
while (offp < (s32 *) end) {
ip = (u64) ia64_imva((char *) offp + *offp);
- ia64_patch_imm64(ip, (u64) fsyscall_table);
+ ia64_patch_imm64(ip, fsyscall_table);
ia64_fc((void *) ip);
++offp;
}
@@ -189,7 +209,7 @@ patch_fsyscall_table (unsigned long start, unsigned long end)
static void __init
patch_brl_fsys_bubble_down (unsigned long start, unsigned long end)
{
- extern char fsys_bubble_down[];
+ u64 fsys_bubble_down = (u64)paravirt_get_fsys_bubble_down();
s32 *offp = (s32 *) start;
u64 ip;
@@ -207,13 +227,13 @@ patch_brl_fsys_bubble_down (unsigned long start, unsigned long end)
void __init
ia64_patch_gate (void)
{
-# define START(name) ((unsigned long) __start_gate_##name##_patchlist)
-# define END(name) ((unsigned long)__end_gate_##name##_patchlist)
+# define START(name) paravirt_get_gate_patchlist(PV_GATE_START_##name)
+# define END(name) paravirt_get_gate_patchlist(PV_GATE_END_##name)
- patch_fsyscall_table(START(fsyscall), END(fsyscall));
- patch_brl_fsys_bubble_down(START(brl_fsys_bubble_down), END(brl_fsys_bubble_down));
- ia64_patch_vtop(START(vtop), END(vtop));
- ia64_patch_mckinley_e9(START(mckinley_e9), END(mckinley_e9));
+ patch_fsyscall_table(START(FSYSCALL), END(FSYSCALL));
+ patch_brl_fsys_bubble_down(START(BRL_FSYS_BUBBLE_DOWN), END(BRL_FSYS_BUBBLE_DOWN));
+ ia64_patch_vtop(START(VTOP), END(VTOP));
+ ia64_patch_mckinley_e9(START(MCKINLEY_E9), END(MCKINLEY_E9));
}
void ia64_patch_phys_stack_reg(unsigned long val)
@@ -229,7 +249,7 @@ void ia64_patch_phys_stack_reg(unsigned long val)
while (offp < end) {
ip = (u64) offp + *offp;
ia64_patch(ip, mask, imm);
- ia64_fc(ip);
+ ia64_fc((void *)ip);
++offp;
}
ia64_sync_i();
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
index d0ada067a4a..e4cb443bb98 100644
--- a/arch/ia64/kernel/pci-dma.c
+++ b/arch/ia64/kernel/pci-dma.c
@@ -32,9 +32,6 @@ int force_iommu __read_mostly = 1;
int force_iommu __read_mostly;
#endif
-/* Set this to 1 if there is a HW IOMMU in the system */
-int iommu_detected __read_mostly;
-
/* Dummy device used for NULL arguments (normally ISA). Better would
be probably a smaller DMA mask, but this is bug-to-bug compatible
to i386. */
@@ -44,18 +41,7 @@ struct device fallback_dev = {
.dma_mask = &fallback_dev.coherent_dma_mask,
};
-void __init pci_iommu_alloc(void)
-{
- /*
- * The order of these functions is important for
- * fall-back/fail-over reasons
- */
- detect_intel_iommu();
-
-#ifdef CONFIG_SWIOTLB
- pci_swiotlb_init();
-#endif
-}
+extern struct dma_map_ops intel_dma_ops;
static int __init pci_iommu_init(void)
{
@@ -79,15 +65,12 @@ iommu_dma_init(void)
return;
}
-struct dma_mapping_ops *dma_ops;
-EXPORT_SYMBOL(dma_ops);
-
int iommu_dma_supported(struct device *dev, u64 mask)
{
- struct dma_mapping_ops *ops = get_dma_ops(dev);
+ struct dma_map_ops *ops = platform_dma_get_ops(dev);
- if (ops->dma_supported_op)
- return ops->dma_supported_op(dev, mask);
+ if (ops->dma_supported)
+ return ops->dma_supported(dev, mask);
/* Copied from i386. Doesn't make much sense, because it will
only work for pci_alloc_coherent.
@@ -116,4 +99,25 @@ int iommu_dma_supported(struct device *dev, u64 mask)
}
EXPORT_SYMBOL(iommu_dma_supported);
+void __init pci_iommu_alloc(void)
+{
+ dma_ops = &intel_dma_ops;
+
+ dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
+ dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
+ dma_ops->sync_single_for_device = machvec_dma_sync_single;
+ dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
+ dma_ops->dma_supported = iommu_dma_supported;
+
+ /*
+ * The order of these functions is important for
+ * fall-back/fail-over reasons
+ */
+ detect_intel_iommu();
+
+#ifdef CONFIG_SWIOTLB
+ pci_swiotlb_init();
+#endif
+}
+
#endif
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
index 16c50516dbc..573f02c39a0 100644
--- a/arch/ia64/kernel/pci-swiotlb.c
+++ b/arch/ia64/kernel/pci-swiotlb.c
@@ -13,23 +13,37 @@
int swiotlb __read_mostly;
EXPORT_SYMBOL(swiotlb);
-struct dma_mapping_ops swiotlb_dma_ops = {
- .mapping_error = swiotlb_dma_mapping_error,
- .alloc_coherent = swiotlb_alloc_coherent,
+static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp)
+{
+ if (dev->coherent_dma_mask != DMA_64BIT_MASK)
+ gfp |= GFP_DMA;
+ return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
+}
+
+struct dma_map_ops swiotlb_dma_ops = {
+ .alloc_coherent = ia64_swiotlb_alloc_coherent,
.free_coherent = swiotlb_free_coherent,
- .map_single = swiotlb_map_single,
- .unmap_single = swiotlb_unmap_single,
+ .map_page = swiotlb_map_page,
+ .unmap_page = swiotlb_unmap_page,
+ .map_sg = swiotlb_map_sg_attrs,
+ .unmap_sg = swiotlb_unmap_sg_attrs,
.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
.sync_single_for_device = swiotlb_sync_single_for_device,
.sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
.sync_single_range_for_device = swiotlb_sync_single_range_for_device,
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
.sync_sg_for_device = swiotlb_sync_sg_for_device,
- .map_sg = swiotlb_map_sg,
- .unmap_sg = swiotlb_unmap_sg,
- .dma_supported_op = swiotlb_dma_supported,
+ .dma_supported = swiotlb_dma_supported,
+ .mapping_error = swiotlb_dma_mapping_error,
};
+void __init swiotlb_dma_init(void)
+{
+ dma_ops = &swiotlb_dma_ops;
+ swiotlb_init();
+}
+
void __init pci_swiotlb_init(void)
{
if (!iommu_detected) {
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 0e499757309..8a06dc48059 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -2196,7 +2196,7 @@ pfmfs_delete_dentry(struct dentry *dentry)
return 1;
}
-static struct dentry_operations pfmfs_dentry_operations = {
+static const struct dentry_operations pfmfs_dentry_operations = {
.d_delete = pfmfs_delete_dentry,
};
@@ -5603,7 +5603,7 @@ pfm_interrupt_handler(int irq, void *arg)
* /proc/perfmon interface, for debug only
*/
-#define PFM_PROC_SHOW_HEADER ((void *)NR_CPUS+1)
+#define PFM_PROC_SHOW_HEADER ((void *)nr_cpu_ids+1)
static void *
pfm_proc_start(struct seq_file *m, loff_t *pos)
@@ -5612,7 +5612,7 @@ pfm_proc_start(struct seq_file *m, loff_t *pos)
return PFM_PROC_SHOW_HEADER;
}
- while (*pos <= NR_CPUS) {
+ while (*pos <= nr_cpu_ids) {
if (cpu_online(*pos - 1)) {
return (void *)*pos;
}
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index c5716270514..5d7c0e5b9e7 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -413,7 +413,7 @@ ia64_load_extra (struct task_struct *task)
* so there is nothing to worry about.
*/
int
-copy_thread (int nr, unsigned long clone_flags,
+copy_thread(unsigned long clone_flags,
unsigned long user_stack_base, unsigned long user_stack_size,
struct task_struct *p, struct pt_regs *regs)
{
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index ecb9eb78d68..7053c55b764 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -317,7 +317,7 @@ retry:
}
n = data->cpu_check;
- for (i = 0; i < NR_CPUS; i++) {
+ for (i = 0; i < nr_cpu_ids; i++) {
if (cpu_isset(n, data->cpu_event)) {
if (!cpu_online(n)) {
cpu_clear(n, data->cpu_event);
@@ -326,7 +326,7 @@ retry:
cpu = n;
break;
}
- if (++n == NR_CPUS)
+ if (++n == nr_cpu_ids)
n = 0;
}
@@ -337,7 +337,7 @@ retry:
/* for next read, start checking at next CPU */
data->cpu_check = cpu;
- if (++data->cpu_check == NR_CPUS)
+ if (++data->cpu_check == nr_cpu_ids)
data->cpu_check = 0;
snprintf(cmd, sizeof(cmd), "read %d\n", cpu);
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 865af27c773..714066aeda7 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -52,6 +52,7 @@
#include <asm/meminit.h>
#include <asm/page.h>
#include <asm/paravirt.h>
+#include <asm/paravirt_patch.h>
#include <asm/patch.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
@@ -537,6 +538,7 @@ setup_arch (char **cmdline_p)
paravirt_arch_setup_early();
ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
+ paravirt_patch_apply();
*cmdline_p = __va(ia64_boot_param->command_line);
strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
@@ -730,10 +732,10 @@ static void *
c_start (struct seq_file *m, loff_t *pos)
{
#ifdef CONFIG_SMP
- while (*pos < NR_CPUS && !cpu_isset(*pos, cpu_online_map))
+ while (*pos < nr_cpu_ids && !cpu_online(*pos))
++*pos;
#endif
- return *pos < NR_CPUS ? cpu_data(*pos) : NULL;
+ return *pos < nr_cpu_ids ? cpu_data(*pos) : NULL;
}
static void *
@@ -1016,8 +1018,7 @@ cpu_init (void)
| IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
- if (current->mm)
- BUG();
+ BUG_ON(current->mm);
ia64_mmu_init(ia64_imva(cpu_data));
ia64_mca_cpu_init(ia64_imva(cpu_data));
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index da8f020d82c..2ea4199d9c5 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -166,11 +166,11 @@ send_IPI_allbutself (int op)
* Called with preemption disabled.
*/
static inline void
-send_IPI_mask(cpumask_t mask, int op)
+send_IPI_mask(const struct cpumask *mask, int op)
{
unsigned int cpu;
- for_each_cpu_mask(cpu, mask) {
+ for_each_cpu(cpu, mask) {
send_IPI_single(cpu, op);
}
}
@@ -316,7 +316,7 @@ void arch_send_call_function_single_ipi(int cpu)
send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE);
}
-void arch_send_call_function_ipi(cpumask_t mask)
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
send_IPI_mask(mask, IPI_CALL_FUNC);
}
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 52290547c85..7700e23034b 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -581,14 +581,14 @@ smp_build_cpu_map (void)
ia64_cpu_to_sapicid[0] = boot_cpu_id;
cpus_clear(cpu_present_map);
- cpu_set(0, cpu_present_map);
- cpu_set(0, cpu_possible_map);
+ set_cpu_present(0, true);
+ set_cpu_possible(0, true);
for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) {
sapicid = smp_boot_data.cpu_phys_id[i];
if (sapicid == boot_cpu_id)
continue;
- cpu_set(cpu, cpu_present_map);
- cpu_set(cpu, cpu_possible_map);
+ set_cpu_present(cpu, true);
+ set_cpu_possible(cpu, true);
ia64_cpu_to_sapicid[cpu] = sapicid;
cpu++;
}
@@ -626,12 +626,9 @@ smp_prepare_cpus (unsigned int max_cpus)
*/
if (!max_cpus) {
printk(KERN_INFO "SMP mode deactivated.\n");
- cpus_clear(cpu_online_map);
- cpus_clear(cpu_present_map);
- cpus_clear(cpu_possible_map);
- cpu_set(0, cpu_online_map);
- cpu_set(0, cpu_present_map);
- cpu_set(0, cpu_possible_map);
+ init_cpu_online(cpumask_of(0));
+ init_cpu_present(cpumask_of(0));
+ init_cpu_possible(cpumask_of(0));
return;
}
}
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index f0ebb342409..641c8b61c4f 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -20,6 +20,7 @@
#include <linux/efi.h>
#include <linux/timex.h>
#include <linux/clocksource.h>
+#include <linux/platform_device.h>
#include <asm/machvec.h>
#include <asm/delay.h>
@@ -50,6 +51,15 @@ EXPORT_SYMBOL(last_cli_ip);
#endif
#ifdef CONFIG_PARAVIRT
+/* We need to define a real function for sched_clock, to override the
+ weak default version */
+unsigned long long sched_clock(void)
+{
+ return paravirt_sched_clock();
+}
+#endif
+
+#ifdef CONFIG_PARAVIRT
static void
paravirt_clocksource_resume(void)
{
@@ -405,6 +415,21 @@ static struct irqaction timer_irqaction = {
.name = "timer"
};
+static struct platform_device rtc_efi_dev = {
+ .name = "rtc-efi",
+ .id = -1,
+};
+
+static int __init rtc_init(void)
+{
+ if (platform_device_register(&rtc_efi_dev) < 0)
+ printk(KERN_ERR "unable to register rtc device...\n");
+
+ /* not necessarily an error */
+ return 0;
+}
+module_init(rtc_init);
+
void __init
time_init (void)
{
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 10a7d47e851..4a95e86b9ac 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -169,6 +169,30 @@ SECTIONS
__end___mckinley_e9_bundles = .;
}
+#if defined(CONFIG_PARAVIRT)
+ . = ALIGN(16);
+ .paravirt_bundles : AT(ADDR(.paravirt_bundles) - LOAD_OFFSET)
+ {
+ __start_paravirt_bundles = .;
+ *(.paravirt_bundles)
+ __stop_paravirt_bundles = .;
+ }
+ . = ALIGN(16);
+ .paravirt_insts : AT(ADDR(.paravirt_insts) - LOAD_OFFSET)
+ {
+ __start_paravirt_insts = .;
+ *(.paravirt_insts)
+ __stop_paravirt_insts = .;
+ }
+ . = ALIGN(16);
+ .paravirt_branches : AT(ADDR(.paravirt_branches) - LOAD_OFFSET)
+ {
+ __start_paravirt_branches = .;
+ *(.paravirt_branches)
+ __stop_paravirt_branches = .;
+ }
+#endif
+
#if defined(CONFIG_IA64_GENERIC)
/* Machine Vector */
. = ALIGN(16);
@@ -201,6 +225,12 @@ SECTIONS
__start_gate_section = .;
*(.data.gate)
__stop_gate_section = .;
+#ifdef CONFIG_XEN
+ . = ALIGN(PAGE_SIZE);
+ __xen_start_gate_section = .;
+ *(.data.gate.xen)
+ __xen_stop_gate_section = .;
+#endif
}
. = ALIGN(PAGE_SIZE); /* make sure the gate page doesn't expose
* kernel data
@@ -213,16 +243,9 @@ SECTIONS
{ *(.data.cacheline_aligned) }
/* Per-cpu data: */
- percpu : { } :percpu
. = ALIGN(PERCPU_PAGE_SIZE);
- __phys_per_cpu_start = .;
- .data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET)
- {
- __per_cpu_start = .;
- *(.data.percpu)
- *(.data.percpu.shared_aligned)
- __per_cpu_end = .;
- }
+ PERCPU_VADDR(PERCPU_ADDR, :percpu)
+ __phys_per_cpu_start = __per_cpu_load;
. = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
* into percpu page size
*/
diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig
index f833a0b4188..0a2d6b86075 100644
--- a/arch/ia64/kvm/Kconfig
+++ b/arch/ia64/kvm/Kconfig
@@ -4,6 +4,10 @@
config HAVE_KVM
bool
+config HAVE_KVM_IRQCHIP
+ bool
+ default y
+
menuconfig VIRTUALIZATION
bool "Virtualization"
depends on HAVE_KVM || IA64
diff --git a/arch/ia64/kvm/irq.h b/arch/ia64/kvm/irq.h
index c6786e8b1bf..c0785a72827 100644
--- a/arch/ia64/kvm/irq.h
+++ b/arch/ia64/kvm/irq.h
@@ -23,6 +23,8 @@
#ifndef __IRQ_H
#define __IRQ_H
+#include "lapic.h"
+
static inline int irqchip_in_kernel(struct kvm *kvm)
{
return 1;
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 28f982045f2..28af6a731bb 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -70,7 +70,7 @@ static void kvm_flush_icache(unsigned long start, unsigned long len)
int l;
for (l = 0; l < (len + 32); l += 32)
- ia64_fc(start + l);
+ ia64_fc((void *)(start + l));
ia64_sync_i();
ia64_srlz_i();
@@ -182,7 +182,7 @@ int kvm_dev_ioctl_check_extension(long ext)
switch (ext) {
case KVM_CAP_IRQCHIP:
case KVM_CAP_MP_STATE:
-
+ case KVM_CAP_IRQ_INJECT_STATUS:
r = 1;
break;
case KVM_CAP_COALESCED_MMIO:
@@ -314,7 +314,7 @@ static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id,
union ia64_lid lid;
int i;
- for (i = 0; i < KVM_MAX_VCPUS; i++) {
+ for (i = 0; i < kvm->arch.online_vcpus; i++) {
if (kvm->vcpus[i]) {
lid.val = VCPU_LID(kvm->vcpus[i]);
if (lid.id == id && lid.eid == eid)
@@ -388,7 +388,7 @@ static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
call_data.ptc_g_data = p->u.ptc_g_data;
- for (i = 0; i < KVM_MAX_VCPUS; i++) {
+ for (i = 0; i < kvm->arch.online_vcpus; i++) {
if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state ==
KVM_MP_STATE_UNINITIALIZED ||
vcpu == kvm->vcpus[i])
@@ -788,6 +788,8 @@ struct kvm *kvm_arch_create_vm(void)
return ERR_PTR(-ENOMEM);
kvm_init_vm(kvm);
+ kvm->arch.online_vcpus = 0;
+
return kvm;
}
@@ -919,7 +921,13 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = kvm_ioapic_init(kvm);
if (r)
goto out;
+ r = kvm_setup_default_irq_routing(kvm);
+ if (r) {
+ kfree(kvm->arch.vioapic);
+ goto out;
+ }
break;
+ case KVM_IRQ_LINE_STATUS:
case KVM_IRQ_LINE: {
struct kvm_irq_level irq_event;
@@ -927,10 +935,17 @@ long kvm_arch_vm_ioctl(struct file *filp,
if (copy_from_user(&irq_event, argp, sizeof irq_event))
goto out;
if (irqchip_in_kernel(kvm)) {
+ __s32 status;
mutex_lock(&kvm->lock);
- kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
+ status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
irq_event.irq, irq_event.level);
mutex_unlock(&kvm->lock);
+ if (ioctl == KVM_IRQ_LINE_STATUS) {
+ irq_event.status = status;
+ if (copy_to_user(argp, &irq_event,
+ sizeof irq_event))
+ goto out;
+ }
r = 0;
}
break;
@@ -1149,7 +1164,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
/*Initialize itc offset for vcpus*/
itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC);
- for (i = 0; i < KVM_MAX_VCPUS; i++) {
+ for (i = 0; i < kvm->arch.online_vcpus; i++) {
v = (struct kvm_vcpu *)((char *)vcpu +
sizeof(struct kvm_vcpu_data) * i);
v->arch.itc_offset = itc_offset;
@@ -1283,6 +1298,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
goto fail;
}
+ kvm->arch.online_vcpus++;
+
return vcpu;
fail:
return ERR_PTR(r);
@@ -1303,8 +1320,8 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
return -EINVAL;
}
-int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
- struct kvm_debug_guest *dbg)
+int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
+ struct kvm_guest_debug *dbg)
{
return -EINVAL;
}
@@ -1421,6 +1438,23 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
return 0;
}
+int kvm_arch_vcpu_ioctl_get_stack(struct kvm_vcpu *vcpu,
+ struct kvm_ia64_vcpu_stack *stack)
+{
+ memcpy(stack, vcpu, sizeof(struct kvm_ia64_vcpu_stack));
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_stack(struct kvm_vcpu *vcpu,
+ struct kvm_ia64_vcpu_stack *stack)
+{
+ memcpy(vcpu + 1, &stack->stack[0] + sizeof(struct kvm_vcpu),
+ sizeof(struct kvm_ia64_vcpu_stack) - sizeof(struct kvm_vcpu));
+
+ vcpu->arch.exit_data = ((struct kvm_vcpu *)stack)->arch.exit_data;
+ return 0;
+}
+
void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
{
@@ -1430,9 +1464,78 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
long kvm_arch_vcpu_ioctl(struct file *filp,
- unsigned int ioctl, unsigned long arg)
+ unsigned int ioctl, unsigned long arg)
{
- return -EINVAL;
+ struct kvm_vcpu *vcpu = filp->private_data;
+ void __user *argp = (void __user *)arg;
+ struct kvm_ia64_vcpu_stack *stack = NULL;
+ long r;
+
+ switch (ioctl) {
+ case KVM_IA64_VCPU_GET_STACK: {
+ struct kvm_ia64_vcpu_stack __user *user_stack;
+ void __user *first_p = argp;
+
+ r = -EFAULT;
+ if (copy_from_user(&user_stack, first_p, sizeof(void *)))
+ goto out;
+
+ if (!access_ok(VERIFY_WRITE, user_stack,
+ sizeof(struct kvm_ia64_vcpu_stack))) {
+ printk(KERN_INFO "KVM_IA64_VCPU_GET_STACK: "
+ "Illegal user destination address for stack\n");
+ goto out;
+ }
+ stack = kzalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL);
+ if (!stack) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ r = kvm_arch_vcpu_ioctl_get_stack(vcpu, stack);
+ if (r)
+ goto out;
+
+ if (copy_to_user(user_stack, stack,
+ sizeof(struct kvm_ia64_vcpu_stack)))
+ goto out;
+
+ break;
+ }
+ case KVM_IA64_VCPU_SET_STACK: {
+ struct kvm_ia64_vcpu_stack __user *user_stack;
+ void __user *first_p = argp;
+
+ r = -EFAULT;
+ if (copy_from_user(&user_stack, first_p, sizeof(void *)))
+ goto out;
+
+ if (!access_ok(VERIFY_READ, user_stack,
+ sizeof(struct kvm_ia64_vcpu_stack))) {
+ printk(KERN_INFO "KVM_IA64_VCPU_SET_STACK: "
+ "Illegal user address for stack\n");
+ goto out;
+ }
+ stack = kmalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL);
+ if (!stack) {
+ r = -ENOMEM;
+ goto out;
+ }
+ if (copy_from_user(stack, user_stack,
+ sizeof(struct kvm_ia64_vcpu_stack)))
+ goto out;
+
+ r = kvm_arch_vcpu_ioctl_set_stack(vcpu, stack);
+ break;
+ }
+
+ default:
+ r = -EINVAL;
+ }
+
+out:
+ kfree(stack);
+ return r;
}
int kvm_arch_set_memory_region(struct kvm *kvm,
@@ -1472,7 +1575,7 @@ void kvm_arch_flush_shadow(struct kvm *kvm)
}
long kvm_arch_dev_ioctl(struct file *filp,
- unsigned int ioctl, unsigned long arg)
+ unsigned int ioctl, unsigned long arg)
{
return -EINVAL;
}
@@ -1737,7 +1840,7 @@ struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector,
struct kvm_vcpu *lvcpu = kvm->vcpus[0];
int i;
- for (i = 1; i < KVM_MAX_VCPUS; i++) {
+ for (i = 1; i < kvm->arch.online_vcpus; i++) {
if (!kvm->vcpus[i])
continue;
if (lvcpu->arch.xtp > kvm->vcpus[i]->arch.xtp)
diff --git a/arch/ia64/kvm/kvm_fw.c b/arch/ia64/kvm/kvm_fw.c
index cb7600bdff9..a8ae52ed563 100644
--- a/arch/ia64/kvm/kvm_fw.c
+++ b/arch/ia64/kvm/kvm_fw.c
@@ -227,6 +227,18 @@ static struct ia64_pal_retval pal_proc_get_features(struct kvm_vcpu *vcpu)
return result;
}
+static struct ia64_pal_retval pal_register_info(struct kvm_vcpu *vcpu)
+{
+
+ struct ia64_pal_retval result = {0, 0, 0, 0};
+ long in0, in1, in2, in3;
+
+ kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
+ result.status = ia64_pal_register_info(in1, &result.v1, &result.v2);
+
+ return result;
+}
+
static struct ia64_pal_retval pal_cache_info(struct kvm_vcpu *vcpu)
{
@@ -268,8 +280,12 @@ static struct ia64_pal_retval pal_vm_summary(struct kvm_vcpu *vcpu)
static struct ia64_pal_retval pal_vm_info(struct kvm_vcpu *vcpu)
{
struct ia64_pal_retval result;
+ unsigned long in0, in1, in2, in3;
- INIT_PAL_STATUS_UNIMPLEMENTED(result);
+ kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
+
+ result.status = ia64_pal_vm_info(in1, in2,
+ (pal_tc_info_u_t *)&result.v1, &result.v2);
return result;
}
@@ -292,6 +308,108 @@ static void prepare_for_halt(struct kvm_vcpu *vcpu)
vcpu->arch.timer_fired = 0;
}
+static struct ia64_pal_retval pal_perf_mon_info(struct kvm_vcpu *vcpu)
+{
+ long status;
+ unsigned long in0, in1, in2, in3, r9;
+ unsigned long pm_buffer[16];
+
+ kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
+ status = ia64_pal_perf_mon_info(pm_buffer,
+ (pal_perf_mon_info_u_t *) &r9);
+ if (status != 0) {
+ printk(KERN_DEBUG"PAL_PERF_MON_INFO fails ret=%ld\n", status);
+ } else {
+ if (in1)
+ memcpy((void *)in1, pm_buffer, sizeof(pm_buffer));
+ else {
+ status = PAL_STATUS_EINVAL;
+ printk(KERN_WARNING"Invalid parameters "
+ "for PAL call:0x%lx!\n", in0);
+ }
+ }
+ return (struct ia64_pal_retval){status, r9, 0, 0};
+}
+
+static struct ia64_pal_retval pal_halt_info(struct kvm_vcpu *vcpu)
+{
+ unsigned long in0, in1, in2, in3;
+ long status;
+ unsigned long res = 1000UL | (1000UL << 16) | (10UL << 32)
+ | (1UL << 61) | (1UL << 60);
+
+ kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
+ if (in1) {
+ memcpy((void *)in1, &res, sizeof(res));
+ status = 0;
+ } else{
+ status = PAL_STATUS_EINVAL;
+ printk(KERN_WARNING"Invalid parameters "
+ "for PAL call:0x%lx!\n", in0);
+ }
+
+ return (struct ia64_pal_retval){status, 0, 0, 0};
+}
+
+static struct ia64_pal_retval pal_mem_attrib(struct kvm_vcpu *vcpu)
+{
+ unsigned long r9;
+ long status;
+
+ status = ia64_pal_mem_attrib(&r9);
+
+ return (struct ia64_pal_retval){status, r9, 0, 0};
+}
+
+static void remote_pal_prefetch_visibility(void *v)
+{
+ s64 trans_type = (s64)v;
+ ia64_pal_prefetch_visibility(trans_type);
+}
+
+static struct ia64_pal_retval pal_prefetch_visibility(struct kvm_vcpu *vcpu)
+{
+ struct ia64_pal_retval result = {0, 0, 0, 0};
+ unsigned long in0, in1, in2, in3;
+ kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
+ result.status = ia64_pal_prefetch_visibility(in1);
+ if (result.status == 0) {
+ /* Must be performed on all remote processors
+ in the coherence domain. */
+ smp_call_function(remote_pal_prefetch_visibility,
+ (void *)in1, 1);
+ /* Unnecessary on remote processor for other vcpus!*/
+ result.status = 1;
+ }
+ return result;
+}
+
+static void remote_pal_mc_drain(void *v)
+{
+ ia64_pal_mc_drain();
+}
+
+static struct ia64_pal_retval pal_get_brand_info(struct kvm_vcpu *vcpu)
+{
+ struct ia64_pal_retval result = {0, 0, 0, 0};
+ unsigned long in0, in1, in2, in3;
+
+ kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
+
+ if (in1 == 0 && in2) {
+ char brand_info[128];
+ result.status = ia64_pal_get_brand_info(brand_info);
+ if (result.status == PAL_STATUS_SUCCESS)
+ memcpy((void *)in2, brand_info, 128);
+ } else {
+ result.status = PAL_STATUS_REQUIRES_MEMORY;
+ printk(KERN_WARNING"Invalid parameters for "
+ "PAL call:0x%lx!\n", in0);
+ }
+
+ return result;
+}
+
int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
@@ -300,14 +418,22 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
int ret = 1;
gr28 = kvm_get_pal_call_index(vcpu);
- /*printk("pal_call index:%lx\n",gr28);*/
switch (gr28) {
case PAL_CACHE_FLUSH:
result = pal_cache_flush(vcpu);
break;
+ case PAL_MEM_ATTRIB:
+ result = pal_mem_attrib(vcpu);
+ break;
case PAL_CACHE_SUMMARY:
result = pal_cache_summary(vcpu);
break;
+ case PAL_PERF_MON_INFO:
+ result = pal_perf_mon_info(vcpu);
+ break;
+ case PAL_HALT_INFO:
+ result = pal_halt_info(vcpu);
+ break;
case PAL_HALT_LIGHT:
{
INIT_PAL_STATUS_SUCCESS(result);
@@ -317,6 +443,16 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
}
break;
+ case PAL_PREFETCH_VISIBILITY:
+ result = pal_prefetch_visibility(vcpu);
+ break;
+ case PAL_MC_DRAIN:
+ result.status = ia64_pal_mc_drain();
+ /* FIXME: All vcpus likely call PAL_MC_DRAIN.
+ That causes the congestion. */
+ smp_call_function(remote_pal_mc_drain, NULL, 1);
+ break;
+
case PAL_FREQ_RATIOS:
result = pal_freq_ratios(vcpu);
break;
@@ -346,6 +482,9 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
INIT_PAL_STATUS_SUCCESS(result);
result.v1 = (1L << 32) | 1L;
break;
+ case PAL_REGISTER_INFO:
+ result = pal_register_info(vcpu);
+ break;
case PAL_VM_PAGE_SIZE:
result.status = ia64_pal_vm_page_size(&result.v0,
&result.v1);
@@ -365,12 +504,18 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
result.status = ia64_pal_version(
(pal_version_u_t *)&result.v0,
(pal_version_u_t *)&result.v1);
-
break;
case PAL_FIXED_ADDR:
result.status = PAL_STATUS_SUCCESS;
result.v0 = vcpu->vcpu_id;
break;
+ case PAL_BRAND_INFO:
+ result = pal_get_brand_info(vcpu);
+ break;
+ case PAL_GET_PSTATE:
+ case PAL_CACHE_SHARED_INFO:
+ INIT_PAL_STATUS_UNIMPLEMENTED(result);
+ break;
default:
INIT_PAL_STATUS_UNIMPLEMENTED(result);
printk(KERN_WARNING"kvm: Unsupported pal call,"
diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c
index 230eae482f3..b1dc80952d9 100644
--- a/arch/ia64/kvm/process.c
+++ b/arch/ia64/kvm/process.c
@@ -167,7 +167,6 @@ static u64 vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, u64 ifa)
return (rr1.val);
}
-
/*
* Set vIFA & vITIR & vIHA, when vPSR.ic =1
* Parameter:
@@ -222,8 +221,6 @@ void itlb_fault(struct kvm_vcpu *vcpu, u64 vadr)
inject_guest_interruption(vcpu, IA64_INST_TLB_VECTOR);
}
-
-
/*
* Data Nested TLB Fault
* @ Data Nested TLB Vector
@@ -245,7 +242,6 @@ void alt_dtlb(struct kvm_vcpu *vcpu, u64 vadr)
inject_guest_interruption(vcpu, IA64_ALT_DATA_TLB_VECTOR);
}
-
/*
* Data TLB Fault
* @ Data TLB vector
@@ -265,8 +261,6 @@ static void _vhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
/* If vPSR.ic, IFA, ITIR, IHA*/
set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
inject_guest_interruption(vcpu, IA64_VHPT_TRANS_VECTOR);
-
-
}
/*
@@ -279,7 +273,6 @@ void ivhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
_vhpt_fault(vcpu, vadr);
}
-
/*
* VHPT Data Fault
* @ VHPT Translation vector
@@ -290,8 +283,6 @@ void dvhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
_vhpt_fault(vcpu, vadr);
}
-
-
/*
* Deal with:
* General Exception vector
@@ -301,7 +292,6 @@ void _general_exception(struct kvm_vcpu *vcpu)
inject_guest_interruption(vcpu, IA64_GENEX_VECTOR);
}
-
/*
* Illegal Operation Fault
* @ General Exception Vector
@@ -419,19 +409,16 @@ static void __page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR);
}
-
void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
{
__page_not_present(vcpu, vadr);
}
-
void inst_page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
{
__page_not_present(vcpu, vadr);
}
-
/* Deal with
* Data access rights vector
*/
@@ -563,22 +550,64 @@ void reflect_interruption(u64 ifa, u64 isr, u64 iim,
inject_guest_interruption(vcpu, vector);
}
+static unsigned long kvm_trans_pal_call_args(struct kvm_vcpu *vcpu,
+ unsigned long arg)
+{
+ struct thash_data *data;
+ unsigned long gpa, poff;
+
+ if (!is_physical_mode(vcpu)) {
+ /* Depends on caller to provide the DTR or DTC mapping.*/
+ data = vtlb_lookup(vcpu, arg, D_TLB);
+ if (data)
+ gpa = data->page_flags & _PAGE_PPN_MASK;
+ else {
+ data = vhpt_lookup(arg);
+ if (!data)
+ return 0;
+ gpa = data->gpaddr & _PAGE_PPN_MASK;
+ }
+
+ poff = arg & (PSIZE(data->ps) - 1);
+ arg = PAGEALIGN(gpa, data->ps) | poff;
+ }
+ arg = kvm_gpa_to_mpa(arg << 1 >> 1);
+
+ return (unsigned long)__va(arg);
+}
+
static void set_pal_call_data(struct kvm_vcpu *vcpu)
{
struct exit_ctl_data *p = &vcpu->arch.exit_data;
+ unsigned long gr28 = vcpu_get_gr(vcpu, 28);
+ unsigned long gr29 = vcpu_get_gr(vcpu, 29);
+ unsigned long gr30 = vcpu_get_gr(vcpu, 30);
/*FIXME:For static and stacked convention, firmware
* has put the parameters in gr28-gr31 before
* break to vmm !!*/
- p->u.pal_data.gr28 = vcpu_get_gr(vcpu, 28);
- p->u.pal_data.gr29 = vcpu_get_gr(vcpu, 29);
- p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30);
+ switch (gr28) {
+ case PAL_PERF_MON_INFO:
+ case PAL_HALT_INFO:
+ p->u.pal_data.gr29 = kvm_trans_pal_call_args(vcpu, gr29);
+ p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30);
+ break;
+ case PAL_BRAND_INFO:
+ p->u.pal_data.gr29 = gr29;;
+ p->u.pal_data.gr30 = kvm_trans_pal_call_args(vcpu, gr30);
+ break;
+ default:
+ p->u.pal_data.gr29 = gr29;;
+ p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30);
+ }
+ p->u.pal_data.gr28 = gr28;
p->u.pal_data.gr31 = vcpu_get_gr(vcpu, 31);
+
p->exit_reason = EXIT_REASON_PAL_CALL;
}
-static void set_pal_call_result(struct kvm_vcpu *vcpu)
+static void get_pal_call_result(struct kvm_vcpu *vcpu)
{
struct exit_ctl_data *p = &vcpu->arch.exit_data;
@@ -606,7 +635,7 @@ static void set_sal_call_data(struct kvm_vcpu *vcpu)
p->exit_reason = EXIT_REASON_SAL_CALL;
}
-static void set_sal_call_result(struct kvm_vcpu *vcpu)
+static void get_sal_call_result(struct kvm_vcpu *vcpu)
{
struct exit_ctl_data *p = &vcpu->arch.exit_data;
@@ -629,13 +658,13 @@ void kvm_ia64_handle_break(unsigned long ifa, struct kvm_pt_regs *regs,
if (iim == DOMN_PAL_REQUEST) {
set_pal_call_data(v);
vmm_transition(v);
- set_pal_call_result(v);
+ get_pal_call_result(v);
vcpu_increment_iip(v);
return;
} else if (iim == DOMN_SAL_REQUEST) {
set_sal_call_data(v);
vmm_transition(v);
- set_sal_call_result(v);
+ get_sal_call_result(v);
vcpu_increment_iip(v);
return;
}
@@ -703,7 +732,6 @@ void vhpi_detection(struct kvm_vcpu *vcpu)
}
}
-
void leave_hypervisor_tail(void)
{
struct kvm_vcpu *v = current_vcpu;
@@ -737,7 +765,6 @@ void leave_hypervisor_tail(void)
}
}
-
static inline void handle_lds(struct kvm_pt_regs *regs)
{
regs->cr_ipsr |= IA64_PSR_ED;
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c
index ecd526b5532..a18ee17b919 100644
--- a/arch/ia64/kvm/vcpu.c
+++ b/arch/ia64/kvm/vcpu.c
@@ -112,7 +112,6 @@ void switch_to_physical_rid(struct kvm_vcpu *vcpu)
return;
}
-
void switch_to_virtual_rid(struct kvm_vcpu *vcpu)
{
unsigned long psr;
@@ -166,8 +165,6 @@ void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
return;
}
-
-
/*
* In physical mode, insert tc/tr for region 0 and 4 uses
* RID[0] and RID[4] which is for physical mode emulation.
@@ -269,7 +266,6 @@ static inline unsigned long fph_index(struct kvm_pt_regs *regs,
return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR));
}
-
/*
* The inverse of the above: given bspstore and the number of
* registers, calculate ar.bsp.
@@ -390,7 +386,7 @@ void set_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
else
*rnat_addr = (*rnat_addr) & (~nat_mask);
- ia64_setreg(_IA64_REG_AR_BSPSTORE, bspstore);
+ ia64_setreg(_IA64_REG_AR_BSPSTORE, (unsigned long)bspstore);
ia64_setreg(_IA64_REG_AR_RNAT, rnat);
}
local_irq_restore(psr);
@@ -811,12 +807,15 @@ static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val);
static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
{
struct kvm_vcpu *v;
+ struct kvm *kvm;
int i;
long itc_offset = val - ia64_getreg(_IA64_REG_AR_ITC);
unsigned long vitv = VCPU(vcpu, itv);
+ kvm = (struct kvm *)KVM_VM_BASE;
+
if (vcpu->vcpu_id == 0) {
- for (i = 0; i < KVM_MAX_VCPUS; i++) {
+ for (i = 0; i < kvm->arch.online_vcpus; i++) {
v = (struct kvm_vcpu *)((char *)vcpu +
sizeof(struct kvm_vcpu_data) * i);
VMX(v, itc_offset) = itc_offset;
@@ -1039,8 +1038,6 @@ u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr)
return key;
}
-
-
void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst)
{
unsigned long thash, vadr;
@@ -1050,7 +1047,6 @@ void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst)
vcpu_set_gr(vcpu, inst.M46.r1, thash, 0);
}
-
void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst)
{
unsigned long tag, vadr;
@@ -1131,7 +1127,6 @@ int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, u64 *padr)
return IA64_NO_FAULT;
}
-
int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst)
{
unsigned long r1, r3;
@@ -1154,7 +1149,6 @@ void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst)
vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
}
-
/************************************
* Insert/Purge translation register/cache
************************************/
@@ -1385,7 +1379,6 @@ void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
vcpu_set_itc(vcpu, r2);
}
-
void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
{
unsigned long r1;
@@ -1393,8 +1386,9 @@ void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
r1 = vcpu_get_itc(vcpu);
vcpu_set_gr(vcpu, inst.M31.r1, r1, 0);
}
+
/**************************************************************************
- struct kvm_vcpu*protection key register access routines
+ struct kvm_vcpu protection key register access routines
**************************************************************************/
unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg)
@@ -1407,20 +1401,6 @@ void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val)
ia64_set_pkr(reg, val);
}
-
-unsigned long vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, unsigned long ifa)
-{
- union ia64_rr rr, rr1;
-
- rr.val = vcpu_get_rr(vcpu, ifa);
- rr1.val = 0;
- rr1.ps = rr.ps;
- rr1.rid = rr.rid;
- return (rr1.val);
-}
-
-
-
/********************************
* Moves to privileged registers
********************************/
@@ -1464,8 +1444,6 @@ unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg,
return (IA64_NO_FAULT);
}
-
-
void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst)
{
unsigned long r3, r2;
@@ -1510,8 +1488,6 @@ void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst)
vcpu_set_pkr(vcpu, r3, r2);
}
-
-
void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst)
{
unsigned long r3, r1;
@@ -1557,7 +1533,6 @@ void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst)
vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
}
-
unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg)
{
/* FIXME: This could get called as a result of a rsvd-reg fault */
@@ -1609,7 +1584,6 @@ unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst)
return 0;
}
-
unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst)
{
unsigned long tgt = inst.M33.r1;
@@ -1633,8 +1607,6 @@ unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst)
return 0;
}
-
-
void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val)
{
@@ -1776,9 +1748,6 @@ void vcpu_bsw1(struct kvm_vcpu *vcpu)
}
}
-
-
-
void vcpu_rfi(struct kvm_vcpu *vcpu)
{
unsigned long ifs, psr;
@@ -1796,7 +1765,6 @@ void vcpu_rfi(struct kvm_vcpu *vcpu)
regs->cr_iip = VCPU(vcpu, iip);
}
-
/*
VPSR can't keep track of below bits of guest PSR
This function gets guest PSR
diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h
index b2f12a562bd..042af92ced8 100644
--- a/arch/ia64/kvm/vcpu.h
+++ b/arch/ia64/kvm/vcpu.h
@@ -703,7 +703,7 @@ extern u64 guest_vhpt_lookup(u64 iha, u64 *pte);
extern void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps);
extern void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps);
extern u64 translate_phy_pte(u64 *pte, u64 itir, u64 va);
-extern int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte,
+extern void thash_purge_and_insert(struct kvm_vcpu *v, u64 pte,
u64 itir, u64 ifa, int type);
extern void thash_purge_all(struct kvm_vcpu *v);
extern struct thash_data *vtlb_lookup(struct kvm_vcpu *v,
@@ -738,7 +738,7 @@ void kvm_init_vhpt(struct kvm_vcpu *v);
void thash_init(struct thash_cb *hcb, u64 sz);
void panic_vm(struct kvm_vcpu *v, const char *fmt, ...);
-
+u64 kvm_gpa_to_mpa(u64 gpa);
extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3,
u64 arg4, u64 arg5, u64 arg6, u64 arg7);
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
index 6b6307a3bd5..2c2501f1315 100644
--- a/arch/ia64/kvm/vtlb.c
+++ b/arch/ia64/kvm/vtlb.c
@@ -164,11 +164,11 @@ static void vhpt_insert(u64 pte, u64 itir, u64 ifa, u64 gpte)
unsigned long ps, gpaddr;
ps = itir_ps(itir);
+ rr.val = ia64_get_rr(ifa);
- gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) |
- (ifa & ((1UL << ps) - 1));
+ gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) |
+ (ifa & ((1UL << ps) - 1));
- rr.val = ia64_get_rr(ifa);
head = (struct thash_data *)ia64_thash(ifa);
head->etag = INVALID_TI_TAG;
ia64_mf();
@@ -210,6 +210,7 @@ void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va, int type)
phy_pte &= ~PAGE_FLAGS_RV_MASK;
psr = ia64_clear_ic();
ia64_itc(type, va, phy_pte, itir_ps(itir));
+ paravirt_dv_serialize_data();
ia64_set_psr(psr);
}
@@ -412,16 +413,14 @@ u64 translate_phy_pte(u64 *pte, u64 itir, u64 va)
/*
* Purge overlap TCs and then insert the new entry to emulate itc ops.
- * Notes: Only TC entry can purge and insert.
- * 1 indicates this is MMIO
+ * Notes: Only TC entry can purge and insert.
*/
-int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
+void thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
u64 ifa, int type)
{
u64 ps;
u64 phy_pte, io_mask, index;
union ia64_rr vrr, mrr;
- int ret = 0;
ps = itir_ps(itir);
vrr.val = vcpu_get_rr(v, ifa);
@@ -441,35 +440,29 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
phy_pte &= ~_PAGE_MA_MASK;
}
- if (pte & VTLB_PTE_IO)
- ret = 1;
-
vtlb_purge(v, ifa, ps);
vhpt_purge(v, ifa, ps);
- if (ps == mrr.ps) {
- if (!(pte&VTLB_PTE_IO)) {
- vhpt_insert(phy_pte, itir, ifa, pte);
- } else {
- vtlb_insert(v, pte, itir, ifa);
- vcpu_quick_region_set(VMX(v, tc_regions), ifa);
- }
- } else if (ps > mrr.ps) {
+ if ((ps != mrr.ps) || (pte & VTLB_PTE_IO)) {
vtlb_insert(v, pte, itir, ifa);
vcpu_quick_region_set(VMX(v, tc_regions), ifa);
- if (!(pte&VTLB_PTE_IO))
- vhpt_insert(phy_pte, itir, ifa, pte);
- } else {
+ }
+ if (pte & VTLB_PTE_IO)
+ return;
+
+ if (ps >= mrr.ps)
+ vhpt_insert(phy_pte, itir, ifa, pte);
+ else {
u64 psr;
phy_pte &= ~PAGE_FLAGS_RV_MASK;
psr = ia64_clear_ic();
ia64_itc(type, ifa, phy_pte, ps);
+ paravirt_dv_serialize_data();
ia64_set_psr(psr);
}
if (!(pte&VTLB_PTE_IO))
mark_pages_dirty(v, pte, ps);
- return ret;
}
/*
@@ -509,7 +502,6 @@ void thash_purge_all(struct kvm_vcpu *v)
local_flush_tlb_all();
}
-
/*
* Lookup the hash table and its collision chain to find an entry
* covering this address rid:va or the entry.
@@ -517,7 +509,6 @@ void thash_purge_all(struct kvm_vcpu *v)
* INPUT:
* in: TLB format for both VHPT & TLB.
*/
-
struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data)
{
struct thash_data *cch;
@@ -547,7 +538,6 @@ struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data)
return NULL;
}
-
/*
* Initialize internal control data before service.
*/
@@ -573,6 +563,10 @@ void thash_init(struct thash_cb *hcb, u64 sz)
u64 kvm_get_mpt_entry(u64 gpfn)
{
u64 *base = (u64 *) KVM_P2M_BASE;
+
+ if (gpfn >= (KVM_P2M_SIZE >> 3))
+ panic_vm(current_vcpu, "Invalid gpfn =%lx\n", gpfn);
+
return *(base + gpfn);
}
@@ -589,7 +583,6 @@ u64 kvm_gpa_to_mpa(u64 gpa)
return (pte >> PAGE_SHIFT << PAGE_SHIFT) | (gpa & ~PAGE_MASK);
}
-
/*
* Fetch guest bundle code.
* INPUT:
@@ -631,7 +624,6 @@ int fetch_code(struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle)
return IA64_NO_FAULT;
}
-
void kvm_init_vhpt(struct kvm_vcpu *v)
{
v->arch.vhpt.num = VHPT_NUM_ENTRIES;
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 56e12903973..c0f3bee6904 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -35,6 +35,7 @@
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include <asm/mca.h>
+#include <asm/paravirt.h>
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
@@ -259,6 +260,7 @@ put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
static void __init
setup_gate (void)
{
+ void *gate_section;
struct page *page;
/*
@@ -266,10 +268,11 @@ setup_gate (void)
* headers etc. and once execute-only page to enable
* privilege-promotion via "epc":
*/
- page = virt_to_page(ia64_imva(__start_gate_section));
+ gate_section = paravirt_get_gate_section();
+ page = virt_to_page(ia64_imva(gate_section));
put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
#ifdef HAVE_BUGGY_SEGREL
- page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE));
+ page = virt_to_page(ia64_imva(gate_section + PAGE_SIZE));
put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
#else
put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
@@ -633,8 +636,7 @@ mem_init (void)
#endif
#ifdef CONFIG_FLATMEM
- if (!mem_map)
- BUG();
+ BUG_ON(!mem_map);
max_mapnr = max_low_pfn;
#endif
@@ -667,8 +669,8 @@ mem_init (void)
* code can tell them apart.
*/
for (i = 0; i < NR_syscalls; ++i) {
- extern unsigned long fsyscall_table[NR_syscalls];
extern unsigned long sys_call_table[NR_syscalls];
+ unsigned long *fsyscall_table = paravirt_get_fsyscall_table();
if (!fsyscall_table[i] || nolwsys)
fsyscall_table[i] = sys_call_table[i] | 1;
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index bd9818a36b4..b9f3d7bbb33 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -309,7 +309,7 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
preempt_disable();
#ifdef CONFIG_SMP
- if (mm != current->active_mm || cpus_weight(mm->cpu_vm_mask) != 1) {
+ if (mm != current->active_mm || cpumask_weight(mm_cpumask(mm)) != 1) {
platform_global_tlb_purge(mm, start, end, nbits);
preempt_enable();
return;
diff --git a/arch/ia64/scripts/pvcheck.sed b/arch/ia64/scripts/pvcheck.sed
index ba66ac2e4c6..e59809a3fc0 100644
--- a/arch/ia64/scripts/pvcheck.sed
+++ b/arch/ia64/scripts/pvcheck.sed
@@ -17,6 +17,7 @@ s/mov.*=.*cr\.iip/.warning \"cr.iip should not used directly\"/g
s/mov.*=.*cr\.ivr/.warning \"cr.ivr should not used directly\"/g
s/mov.*=[^\.]*psr/.warning \"psr should not used directly\"/g # avoid ar.fpsr
s/mov.*=.*ar\.eflags/.warning \"ar.eflags should not used directly\"/g
+s/mov.*=.*ar\.itc.*/.warning \"ar.itc should not used directly\"/g
s/mov.*cr\.ifa.*=.*/.warning \"cr.ifa should not used directly\"/g
s/mov.*cr\.itir.*=.*/.warning \"cr.itir should not used directly\"/g
s/mov.*cr\.iha.*=.*/.warning \"cr.iha should not used directly\"/g
diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c
index 0d4ffa4da1d..57f280dd9de 100644
--- a/arch/ia64/sn/kernel/io_common.c
+++ b/arch/ia64/sn/kernel/io_common.c
@@ -135,8 +135,7 @@ static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device,
}
war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL);
- if (!war_list)
- BUG();
+ BUG_ON(!war_list);
SAL_CALL_NOLOCK(isrv, SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST,
nasid, widget, __pa(war_list), 0, 0, 0 ,0);
@@ -180,23 +179,20 @@ sn_common_hubdev_init(struct hubdev_info *hubdev)
sizeof(struct sn_flush_device_kernel *);
hubdev->hdi_flush_nasid_list.widget_p =
kzalloc(size, GFP_KERNEL);
- if (!hubdev->hdi_flush_nasid_list.widget_p)
- BUG();
+ BUG_ON(!hubdev->hdi_flush_nasid_list.widget_p);
for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) {
size = DEV_PER_WIDGET *
sizeof(struct sn_flush_device_kernel);
sn_flush_device_kernel = kzalloc(size, GFP_KERNEL);
- if (!sn_flush_device_kernel)
- BUG();
+ BUG_ON(!sn_flush_device_kernel);
dev_entry = sn_flush_device_kernel;
for (device = 0; device < DEV_PER_WIDGET;
device++, dev_entry++) {
size = sizeof(struct sn_flush_device_common);
dev_entry->common = kzalloc(size, GFP_KERNEL);
- if (!dev_entry->common)
- BUG();
+ BUG_ON(!dev_entry->common);
if (sn_prom_feature_available(PRF_DEVICE_FLUSH_LIST))
status = sal_get_device_dmaflush_list(
hubdev->hdi_nasid, widget, device,
@@ -326,8 +322,7 @@ sn_common_bus_fixup(struct pci_bus *bus,
*/
controller->platform_data = kzalloc(sizeof(struct sn_platform_data),
GFP_KERNEL);
- if (controller->platform_data == NULL)
- BUG();
+ BUG_ON(controller->platform_data == NULL);
sn_platform_data =
(struct sn_platform_data *) controller->platform_data;
sn_platform_data->provider_soft = provider_soft;
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index e2eb2da60f9..ee774c366a0 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -128,8 +128,7 @@ sn_legacy_pci_window_fixup(struct pci_controller *controller,
{
controller->window = kcalloc(2, sizeof(struct pci_window),
GFP_KERNEL);
- if (controller->window == NULL)
- BUG();
+ BUG_ON(controller->window == NULL);
controller->window[0].offset = legacy_io;
controller->window[0].resource.name = "legacy_io";
controller->window[0].resource.flags = IORESOURCE_IO;
@@ -168,8 +167,7 @@ sn_pci_window_fixup(struct pci_dev *dev, unsigned int count,
idx = controller->windows;
new_count = controller->windows + count;
new_window = kcalloc(new_count, sizeof(struct pci_window), GFP_KERNEL);
- if (new_window == NULL)
- BUG();
+ BUG_ON(new_window == NULL);
if (controller->window) {
memcpy(new_window, controller->window,
sizeof(struct pci_window) * controller->windows);
@@ -222,8 +220,7 @@ sn_io_slot_fixup(struct pci_dev *dev)
(u64) __pa(pcidev_info),
(u64) __pa(sn_irq_info));
- if (status)
- BUG(); /* Cannot get platform pci device information */
+ BUG_ON(status); /* Cannot get platform pci device information */
/* Copy over PIO Mapped Addresses */
@@ -307,8 +304,7 @@ sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus)
prom_bussoft_ptr = __va(prom_bussoft_ptr);
controller = kzalloc(sizeof(*controller), GFP_KERNEL);
- if (!controller)
- BUG();
+ BUG_ON(!controller);
controller->segment = segment;
/*
diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c
index ca553b0429c..81e428943d7 100644
--- a/arch/ia64/sn/kernel/msi_sn.c
+++ b/arch/ia64/sn/kernel/msi_sn.c
@@ -205,7 +205,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq,
msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);
write_msi_msg(irq, &msg);
- irq_desc[irq].affinity = *cpu_mask;
+ cpumask_copy(irq_desc[irq].affinity, cpu_mask);
}
#endif /* CONFIG_SMP */
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index 02c5b8a9fb6..e456f062f24 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -732,8 +732,7 @@ void __init build_cnode_tables(void)
kl_config_hdr_t *klgraph_header;
nasid = cnodeid_to_nasid(node);
klgraph_header = ia64_sn_get_klconfig_addr(nasid);
- if (klgraph_header == NULL)
- BUG();
+ BUG_ON(klgraph_header == NULL);
brd = NODE_OFFSET_TO_LBOARD(nasid, klgraph_header->ch_board_info);
while (brd) {
if (board_needs_cnode(brd->brd_type) && physical_node_map[brd->brd_nasid] < 0) {
@@ -750,7 +749,7 @@ nasid_slice_to_cpuid(int nasid, int slice)
{
long cpu;
- for (cpu = 0; cpu < NR_CPUS; cpu++)
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++)
if (cpuid_to_nasid(cpu) == nasid &&
cpuid_to_slice(cpu) == slice)
return cpu;
diff --git a/arch/ia64/sn/kernel/sn2/prominfo_proc.c b/arch/ia64/sn/kernel/sn2/prominfo_proc.c
index 4dcce3d0e04..e6332881864 100644
--- a/arch/ia64/sn/kernel/sn2/prominfo_proc.c
+++ b/arch/ia64/sn/kernel/sn2/prominfo_proc.c
@@ -225,7 +225,6 @@ static struct proc_dir_entry *sgi_prominfo_entry;
int __init prominfo_init(void)
{
struct proc_dir_entry **entp;
- struct proc_dir_entry *p;
cnodeid_t cnodeid;
unsigned long nasid;
int size;
@@ -246,14 +245,10 @@ int __init prominfo_init(void)
sprintf(name, "node%d", cnodeid);
*entp = proc_mkdir(name, sgi_prominfo_entry);
nasid = cnodeid_to_nasid(cnodeid);
- p = create_proc_read_entry("fit", 0, *entp, read_fit_entry,
+ create_proc_read_entry("fit", 0, *entp, read_fit_entry,
(void *)nasid);
- if (p)
- p->owner = THIS_MODULE;
- p = create_proc_read_entry("version", 0, *entp,
+ create_proc_read_entry("version", 0, *entp,
read_version_entry, (void *)nasid);
- if (p)
- p->owner = THIS_MODULE;
entp++;
}
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c
index e585f9a2afb..1176506b2ba 100644
--- a/arch/ia64/sn/kernel/sn2/sn2_smp.c
+++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c
@@ -133,7 +133,7 @@ sn2_ipi_flush_all_tlb(struct mm_struct *mm)
unsigned long itc;
itc = ia64_get_itc();
- smp_flush_tlb_cpumask(mm->cpu_vm_mask);
+ smp_flush_tlb_cpumask(*mm_cpumask(mm));
itc = ia64_get_itc() - itc;
__get_cpu_var(ptcstats).shub_ipi_flushes_itc_clocks += itc;
__get_cpu_var(ptcstats).shub_ipi_flushes++;
@@ -182,7 +182,7 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
nodes_clear(nodes_flushed);
i = 0;
- for_each_cpu_mask(cpu, mm->cpu_vm_mask) {
+ for_each_cpu(cpu, mm_cpumask(mm)) {
cnode = cpu_to_node(cpu);
node_set(cnode, nodes_flushed);
lcpu = cpu;
@@ -461,7 +461,7 @@ bool sn_cpu_disable_allowed(int cpu)
static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset)
{
- if (*offset < NR_CPUS)
+ if (*offset < nr_cpu_ids)
return offset;
return NULL;
}
@@ -469,7 +469,7 @@ static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset)
static void *sn2_ptc_seq_next(struct seq_file *file, void *data, loff_t * offset)
{
(*offset)++;
- if (*offset < NR_CPUS)
+ if (*offset < nr_cpu_ids)
return offset;
return NULL;
}
@@ -491,7 +491,7 @@ static int sn2_ptc_seq_show(struct seq_file *file, void *data)
seq_printf(file, "# ptctest %d, flushopt %d\n", sn2_ptctest, sn2_flush_opt);
}
- if (cpu < NR_CPUS && cpu_online(cpu)) {
+ if (cpu < nr_cpu_ids && cpu_online(cpu)) {
stat = &per_cpu(ptcstats, cpu);
seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l,
stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed,
@@ -554,7 +554,7 @@ static int __init sn2_ptc_init(void)
proc_sn2_ptc = proc_create(PTC_BASENAME, 0444,
NULL, &proc_sn2_ptc_operations);
- if (!&proc_sn2_ptc_operations) {
+ if (!proc_sn2_ptc) {
printk(KERN_ERR "unable to create %s proc entry", PTC_BASENAME);
return -EINVAL;
}
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
index be339477f90..9e6491cf72b 100644
--- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c
+++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
@@ -275,8 +275,7 @@ static int sn_hwperf_get_nearest_node_objdata(struct sn_hwperf_object_info *objb
/* get it's interconnect topology */
sz = op->ports * sizeof(struct sn_hwperf_port_info);
- if (sz > sizeof(ptdata))
- BUG();
+ BUG_ON(sz > sizeof(ptdata));
e = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
SN_HWPERF_ENUM_PORTS, nodeobj->id, sz,
(u64)&ptdata, 0, 0, NULL);
@@ -310,8 +309,7 @@ static int sn_hwperf_get_nearest_node_objdata(struct sn_hwperf_object_info *objb
if (router && (!found_cpu || !found_mem)) {
/* search for a node connected to the same router */
sz = router->ports * sizeof(struct sn_hwperf_port_info);
- if (sz > sizeof(ptdata))
- BUG();
+ BUG_ON(sz > sizeof(ptdata));
e = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
SN_HWPERF_ENUM_PORTS, router->id, sz,
(u64)&ptdata, 0, 0, NULL);
@@ -612,7 +610,7 @@ static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info)
op_info->a->arg &= SN_HWPERF_ARG_OBJID_MASK;
if (cpu != SN_HWPERF_ARG_ANY_CPU) {
- if (cpu >= NR_CPUS || !cpu_online(cpu)) {
+ if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
r = -EINVAL;
goto out;
}
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index 863f5017baa..8c130e8f00e 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -10,7 +10,7 @@
*/
#include <linux/module.h>
-#include <linux/dma-attrs.h>
+#include <linux/dma-mapping.h>
#include <asm/dma.h>
#include <asm/sn/intr.h>
#include <asm/sn/pcibus_provider_defs.h>
@@ -31,7 +31,7 @@
* this function. Of course, SN only supports devices that have 32 or more
* address bits when using the PMU.
*/
-int sn_dma_supported(struct device *dev, u64 mask)
+static int sn_dma_supported(struct device *dev, u64 mask)
{
BUG_ON(dev->bus != &pci_bus_type);
@@ -39,7 +39,6 @@ int sn_dma_supported(struct device *dev, u64 mask)
return 0;
return 1;
}
-EXPORT_SYMBOL(sn_dma_supported);
/**
* sn_dma_set_mask - set the DMA mask
@@ -75,8 +74,8 @@ EXPORT_SYMBOL(sn_dma_set_mask);
* queue for a SCSI controller). See Documentation/DMA-API.txt for
* more information.
*/
-void *sn_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t * dma_handle, gfp_t flags)
+static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t * dma_handle, gfp_t flags)
{
void *cpuaddr;
unsigned long phys_addr;
@@ -124,7 +123,6 @@ void *sn_dma_alloc_coherent(struct device *dev, size_t size,
return cpuaddr;
}
-EXPORT_SYMBOL(sn_dma_alloc_coherent);
/**
* sn_pci_free_coherent - free memory associated with coherent DMAable region
@@ -136,8 +134,8 @@ EXPORT_SYMBOL(sn_dma_alloc_coherent);
* Frees the memory allocated by dma_alloc_coherent(), potentially unmapping
* any associated IOMMU mappings.
*/
-void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t dma_handle)
+static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
@@ -147,7 +145,6 @@ void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
provider->dma_unmap(pdev, dma_handle, 0);
free_pages((unsigned long)cpu_addr, get_order(size));
}
-EXPORT_SYMBOL(sn_dma_free_coherent);
/**
* sn_dma_map_single_attrs - map a single page for DMA
@@ -173,10 +170,12 @@ EXPORT_SYMBOL(sn_dma_free_coherent);
* TODO: simplify our interface;
* figure out how to save dmamap handle so can use two step.
*/
-dma_addr_t sn_dma_map_single_attrs(struct device *dev, void *cpu_addr,
- size_t size, int direction,
- struct dma_attrs *attrs)
+static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
+ void *cpu_addr = page_address(page) + offset;
dma_addr_t dma_addr;
unsigned long phys_addr;
struct pci_dev *pdev = to_pci_dev(dev);
@@ -201,7 +200,6 @@ dma_addr_t sn_dma_map_single_attrs(struct device *dev, void *cpu_addr,
}
return dma_addr;
}
-EXPORT_SYMBOL(sn_dma_map_single_attrs);
/**
* sn_dma_unmap_single_attrs - unamp a DMA mapped page
@@ -215,21 +213,20 @@ EXPORT_SYMBOL(sn_dma_map_single_attrs);
* by @dma_handle into the coherence domain. On SN, we're always cache
* coherent, so we just need to free any ATEs associated with this mapping.
*/
-void sn_dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr,
- size_t size, int direction,
- struct dma_attrs *attrs)
+static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
BUG_ON(dev->bus != &pci_bus_type);
- provider->dma_unmap(pdev, dma_addr, direction);
+ provider->dma_unmap(pdev, dma_addr, dir);
}
-EXPORT_SYMBOL(sn_dma_unmap_single_attrs);
/**
- * sn_dma_unmap_sg_attrs - unmap a DMA scatterlist
+ * sn_dma_unmap_sg - unmap a DMA scatterlist
* @dev: device to unmap
* @sg: scatterlist to unmap
* @nhwentries: number of scatterlist entries
@@ -238,9 +235,9 @@ EXPORT_SYMBOL(sn_dma_unmap_single_attrs);
*
* Unmap a set of streaming mode DMA translations.
*/
-void sn_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
- int nhwentries, int direction,
- struct dma_attrs *attrs)
+static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
+ int nhwentries, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
int i;
struct pci_dev *pdev = to_pci_dev(dev);
@@ -250,15 +247,14 @@ void sn_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
BUG_ON(dev->bus != &pci_bus_type);
for_each_sg(sgl, sg, nhwentries, i) {
- provider->dma_unmap(pdev, sg->dma_address, direction);
+ provider->dma_unmap(pdev, sg->dma_address, dir);
sg->dma_address = (dma_addr_t) NULL;
sg->dma_length = 0;
}
}
-EXPORT_SYMBOL(sn_dma_unmap_sg_attrs);
/**
- * sn_dma_map_sg_attrs - map a scatterlist for DMA
+ * sn_dma_map_sg - map a scatterlist for DMA
* @dev: device to map for
* @sg: scatterlist to map
* @nhwentries: number of entries
@@ -272,8 +268,9 @@ EXPORT_SYMBOL(sn_dma_unmap_sg_attrs);
*
* Maps each entry of @sg for DMA.
*/
-int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
- int nhwentries, int direction, struct dma_attrs *attrs)
+static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl,
+ int nhwentries, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
unsigned long phys_addr;
struct scatterlist *saved_sg = sgl, *sg;
@@ -310,8 +307,7 @@ int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
* Free any successfully allocated entries.
*/
if (i > 0)
- sn_dma_unmap_sg_attrs(dev, saved_sg, i,
- direction, attrs);
+ sn_dma_unmap_sg(dev, saved_sg, i, dir, attrs);
return 0;
}
@@ -320,41 +316,36 @@ int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
return nhwentries;
}
-EXPORT_SYMBOL(sn_dma_map_sg_attrs);
-void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
- size_t size, int direction)
+static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir)
{
BUG_ON(dev->bus != &pci_bus_type);
}
-EXPORT_SYMBOL(sn_dma_sync_single_for_cpu);
-void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
- size_t size, int direction)
+static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
+ size_t size,
+ enum dma_data_direction dir)
{
BUG_ON(dev->bus != &pci_bus_type);
}
-EXPORT_SYMBOL(sn_dma_sync_single_for_device);
-void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
- int nelems, int direction)
+static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir)
{
BUG_ON(dev->bus != &pci_bus_type);
}
-EXPORT_SYMBOL(sn_dma_sync_sg_for_cpu);
-void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
- int nelems, int direction)
+static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir)
{
BUG_ON(dev->bus != &pci_bus_type);
}
-EXPORT_SYMBOL(sn_dma_sync_sg_for_device);
-int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
-EXPORT_SYMBOL(sn_dma_mapping_error);
u64 sn_dma_get_required_mask(struct device *dev)
{
@@ -471,3 +462,23 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
out:
return ret;
}
+
+static struct dma_map_ops sn_dma_ops = {
+ .alloc_coherent = sn_dma_alloc_coherent,
+ .free_coherent = sn_dma_free_coherent,
+ .map_page = sn_dma_map_page,
+ .unmap_page = sn_dma_unmap_page,
+ .map_sg = sn_dma_map_sg,
+ .unmap_sg = sn_dma_unmap_sg,
+ .sync_single_for_cpu = sn_dma_sync_single_for_cpu,
+ .sync_sg_for_cpu = sn_dma_sync_sg_for_cpu,
+ .sync_single_for_device = sn_dma_sync_single_for_device,
+ .sync_sg_for_device = sn_dma_sync_sg_for_device,
+ .mapping_error = sn_dma_mapping_error,
+ .dma_supported = sn_dma_supported,
+};
+
+void sn_dma_init(void)
+{
+ dma_ops = &sn_dma_ops;
+}
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
index 060df4aa991..c659ad5613a 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_dma.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
@@ -256,9 +256,7 @@ void sn_dma_flush(u64 addr)
hubinfo = (NODEPDA(nasid_to_cnodeid(nasid)))->pdinfo;
- if (!hubinfo) {
- BUG();
- }
+ BUG_ON(!hubinfo);
flush_nasid_list = &hubinfo->hdi_flush_nasid_list;
if (flush_nasid_list->widget_p == NULL)
diff --git a/arch/ia64/xen/Makefile b/arch/ia64/xen/Makefile
index 0ad0224693d..e6f4a0a7422 100644
--- a/arch/ia64/xen/Makefile
+++ b/arch/ia64/xen/Makefile
@@ -3,14 +3,29 @@
#
obj-y := hypercall.o xenivt.o xensetup.o xen_pv_ops.o irq_xen.o \
- hypervisor.o xencomm.o xcom_hcall.o grant-table.o time.o suspend.o
+ hypervisor.o xencomm.o xcom_hcall.o grant-table.o time.o suspend.o \
+ gate-data.o
obj-$(CONFIG_IA64_GENERIC) += machvec.o
+# The gate DSO image is built using a special linker script.
+include $(srctree)/arch/ia64/kernel/Makefile.gate
+
+# tell compiled for xen
+CPPFLAGS_gate.lds += -D__IA64_GATE_PARAVIRTUALIZED_XEN
+AFLAGS_gate.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN -D__IA64_GATE_PARAVIRTUALIZED_XEN
+
+# use same file of native.
+$(obj)/gate.o: $(src)/../kernel/gate.S FORCE
+ $(call if_changed_dep,as_o_S)
+$(obj)/gate.lds: $(src)/../kernel/gate.lds.S FORCE
+ $(call if_changed_dep,cpp_lds_S)
+
+
AFLAGS_xenivt.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN
# xen multi compile
-ASM_PARAVIRT_MULTI_COMPILE_SRCS = ivt.S entry.S
+ASM_PARAVIRT_MULTI_COMPILE_SRCS = ivt.S entry.S fsys.S
ASM_PARAVIRT_OBJS = $(addprefix xen-,$(ASM_PARAVIRT_MULTI_COMPILE_SRCS:.S=.o))
obj-y += $(ASM_PARAVIRT_OBJS)
define paravirtualized_xen
diff --git a/arch/ia64/xen/gate-data.S b/arch/ia64/xen/gate-data.S
new file mode 100644
index 00000000000..7d4830afc91
--- /dev/null
+++ b/arch/ia64/xen/gate-data.S
@@ -0,0 +1,3 @@
+ .section .data.gate.xen, "aw"
+
+ .incbin "arch/ia64/xen/gate.so"
diff --git a/arch/ia64/xen/hypercall.S b/arch/ia64/xen/hypercall.S
index 45e02bb64a9..e32dae444dd 100644
--- a/arch/ia64/xen/hypercall.S
+++ b/arch/ia64/xen/hypercall.S
@@ -9,6 +9,7 @@
#include <asm/intrinsics.h>
#include <asm/xen/privop.h>
+#ifdef __INTEL_COMPILER
/*
* Hypercalls without parameter.
*/
@@ -72,6 +73,7 @@ GLOBAL_ENTRY(xen_set_rr0_to_rr4)
br.ret.sptk.many rp
;;
END(xen_set_rr0_to_rr4)
+#endif
GLOBAL_ENTRY(xen_send_ipi)
mov r14=r32
diff --git a/arch/ia64/xen/time.c b/arch/ia64/xen/time.c
index 68d6204c3f1..fb833269017 100644
--- a/arch/ia64/xen/time.c
+++ b/arch/ia64/xen/time.c
@@ -175,10 +175,58 @@ static void xen_itc_jitter_data_reset(void)
} while (unlikely(ret != lcycle));
}
+/* based on xen_sched_clock() in arch/x86/xen/time.c. */
+/*
+ * This relies on HAVE_UNSTABLE_SCHED_CLOCK. If it can't be defined,
+ * something similar logic should be implemented here.
+ */
+/*
+ * Xen sched_clock implementation. Returns the number of unstolen
+ * nanoseconds, which is nanoseconds the VCPU spent in RUNNING+BLOCKED
+ * states.
+ */
+static unsigned long long xen_sched_clock(void)
+{
+ struct vcpu_runstate_info runstate;
+
+ unsigned long long now;
+ unsigned long long offset;
+ unsigned long long ret;
+
+ /*
+ * Ideally sched_clock should be called on a per-cpu basis
+ * anyway, so preempt should already be disabled, but that's
+ * not current practice at the moment.
+ */
+ preempt_disable();
+
+ /*
+ * both ia64_native_sched_clock() and xen's runstate are
+ * based on mAR.ITC. So difference of them makes sense.
+ */
+ now = ia64_native_sched_clock();
+
+ get_runstate_snapshot(&runstate);
+
+ WARN_ON(runstate.state != RUNSTATE_running);
+
+ offset = 0;
+ if (now > runstate.state_entry_time)
+ offset = now - runstate.state_entry_time;
+ ret = runstate.time[RUNSTATE_blocked] +
+ runstate.time[RUNSTATE_running] +
+ offset;
+
+ preempt_enable();
+
+ return ret;
+}
+
struct pv_time_ops xen_time_ops __initdata = {
.init_missing_ticks_accounting = xen_init_missing_ticks_accounting,
.do_steal_accounting = xen_do_steal_accounting,
.clocksource_resume = xen_itc_jitter_data_reset,
+ .sched_clock = xen_sched_clock,
};
/* Called after suspend, to resume time. */
diff --git a/arch/ia64/xen/xen_pv_ops.c b/arch/ia64/xen/xen_pv_ops.c
index 936cff3c96e..5e2270a999f 100644
--- a/arch/ia64/xen/xen_pv_ops.c
+++ b/arch/ia64/xen/xen_pv_ops.c
@@ -24,6 +24,7 @@
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/pm.h>
+#include <linux/unistd.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/xencomm.h>
@@ -153,6 +154,13 @@ xen_post_smp_prepare_boot_cpu(void)
xen_setup_vcpu_info_placement();
}
+#ifdef ASM_SUPPORTED
+static unsigned long __init_or_module
+xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type);
+#endif
+static void __init
+xen_patch_branch(unsigned long tag, unsigned long type);
+
static const struct pv_init_ops xen_init_ops __initconst = {
.banner = xen_banner,
@@ -163,6 +171,53 @@ static const struct pv_init_ops xen_init_ops __initconst = {
.arch_setup_nomca = xen_arch_setup_nomca,
.post_smp_prepare_boot_cpu = xen_post_smp_prepare_boot_cpu,
+#ifdef ASM_SUPPORTED
+ .patch_bundle = xen_patch_bundle,
+#endif
+ .patch_branch = xen_patch_branch,
+};
+
+/***************************************************************************
+ * pv_fsys_data
+ * addresses for fsys
+ */
+
+extern unsigned long xen_fsyscall_table[NR_syscalls];
+extern char xen_fsys_bubble_down[];
+struct pv_fsys_data xen_fsys_data __initdata = {
+ .fsyscall_table = (unsigned long *)xen_fsyscall_table,
+ .fsys_bubble_down = (void *)xen_fsys_bubble_down,
+};
+
+/***************************************************************************
+ * pv_patchdata
+ * patchdata addresses
+ */
+
+#define DECLARE(name) \
+ extern unsigned long __xen_start_gate_##name##_patchlist[]; \
+ extern unsigned long __xen_end_gate_##name##_patchlist[]
+
+DECLARE(fsyscall);
+DECLARE(brl_fsys_bubble_down);
+DECLARE(vtop);
+DECLARE(mckinley_e9);
+
+extern unsigned long __xen_start_gate_section[];
+
+#define ASSIGN(name) \
+ .start_##name##_patchlist = \
+ (unsigned long)__xen_start_gate_##name##_patchlist, \
+ .end_##name##_patchlist = \
+ (unsigned long)__xen_end_gate_##name##_patchlist
+
+static struct pv_patchdata xen_patchdata __initdata = {
+ ASSIGN(fsyscall),
+ ASSIGN(brl_fsys_bubble_down),
+ ASSIGN(vtop),
+ ASSIGN(mckinley_e9),
+
+ .gate_section = (void*)__xen_start_gate_section,
};
/***************************************************************************
@@ -170,6 +225,76 @@ static const struct pv_init_ops xen_init_ops __initconst = {
* intrinsics hooks.
*/
+#ifndef ASM_SUPPORTED
+static void
+xen_set_itm_with_offset(unsigned long val)
+{
+ /* ia64_cpu_local_tick() calls this with interrupt enabled. */
+ /* WARN_ON(!irqs_disabled()); */
+ xen_set_itm(val - XEN_MAPPEDREGS->itc_offset);
+}
+
+static unsigned long
+xen_get_itm_with_offset(void)
+{
+ /* unused at this moment */
+ printk(KERN_DEBUG "%s is called.\n", __func__);
+
+ WARN_ON(!irqs_disabled());
+ return ia64_native_getreg(_IA64_REG_CR_ITM) +
+ XEN_MAPPEDREGS->itc_offset;
+}
+
+/* ia64_set_itc() is only called by
+ * cpu_init() with ia64_set_itc(0) and ia64_sync_itc().
+ * So XEN_MAPPEDRESG->itc_offset cal be considered as almost constant.
+ */
+static void
+xen_set_itc(unsigned long val)
+{
+ unsigned long mitc;
+
+ WARN_ON(!irqs_disabled());
+ mitc = ia64_native_getreg(_IA64_REG_AR_ITC);
+ XEN_MAPPEDREGS->itc_offset = val - mitc;
+ XEN_MAPPEDREGS->itc_last = val;
+}
+
+static unsigned long
+xen_get_itc(void)
+{
+ unsigned long res;
+ unsigned long itc_offset;
+ unsigned long itc_last;
+ unsigned long ret_itc_last;
+
+ itc_offset = XEN_MAPPEDREGS->itc_offset;
+ do {
+ itc_last = XEN_MAPPEDREGS->itc_last;
+ res = ia64_native_getreg(_IA64_REG_AR_ITC);
+ res += itc_offset;
+ if (itc_last >= res)
+ res = itc_last + 1;
+ ret_itc_last = cmpxchg(&XEN_MAPPEDREGS->itc_last,
+ itc_last, res);
+ } while (unlikely(ret_itc_last != itc_last));
+ return res;
+
+#if 0
+ /* ia64_itc_udelay() calls ia64_get_itc() with interrupt enabled.
+ Should it be paravirtualized instead? */
+ WARN_ON(!irqs_disabled());
+ itc_offset = XEN_MAPPEDREGS->itc_offset;
+ itc_last = XEN_MAPPEDREGS->itc_last;
+ res = ia64_native_getreg(_IA64_REG_AR_ITC);
+ res += itc_offset;
+ if (itc_last >= res)
+ res = itc_last + 1;
+ XEN_MAPPEDREGS->itc_last = res;
+ return res;
+#endif
+}
+
static void xen_setreg(int regnum, unsigned long val)
{
switch (regnum) {
@@ -181,11 +306,14 @@ static void xen_setreg(int regnum, unsigned long val)
xen_set_eflag(val);
break;
#endif
+ case _IA64_REG_AR_ITC:
+ xen_set_itc(val);
+ break;
case _IA64_REG_CR_TPR:
xen_set_tpr(val);
break;
case _IA64_REG_CR_ITM:
- xen_set_itm(val);
+ xen_set_itm_with_offset(val);
break;
case _IA64_REG_CR_EOI:
xen_eoi(val);
@@ -209,6 +337,12 @@ static unsigned long xen_getreg(int regnum)
res = xen_get_eflag();
break;
#endif
+ case _IA64_REG_AR_ITC:
+ res = xen_get_itc();
+ break;
+ case _IA64_REG_CR_ITM:
+ res = xen_get_itm_with_offset();
+ break;
case _IA64_REG_CR_IVR:
res = xen_get_ivr();
break;
@@ -259,8 +393,417 @@ xen_intrin_local_irq_restore(unsigned long mask)
else
xen_rsm_i();
}
+#else
+#define __DEFINE_FUNC(name, code) \
+ extern const char xen_ ## name ## _direct_start[]; \
+ extern const char xen_ ## name ## _direct_end[]; \
+ asm (".align 32\n" \
+ ".proc xen_" #name "\n" \
+ "xen_" #name ":\n" \
+ "xen_" #name "_direct_start:\n" \
+ code \
+ "xen_" #name "_direct_end:\n" \
+ "br.cond.sptk.many b6\n" \
+ ".endp xen_" #name "\n")
+
+#define DEFINE_VOID_FUNC0(name, code) \
+ extern void \
+ xen_ ## name (void); \
+ __DEFINE_FUNC(name, code)
+
+#define DEFINE_VOID_FUNC1(name, code) \
+ extern void \
+ xen_ ## name (unsigned long arg); \
+ __DEFINE_FUNC(name, code)
+
+#define DEFINE_VOID_FUNC1_VOID(name, code) \
+ extern void \
+ xen_ ## name (void *arg); \
+ __DEFINE_FUNC(name, code)
+
+#define DEFINE_VOID_FUNC2(name, code) \
+ extern void \
+ xen_ ## name (unsigned long arg0, \
+ unsigned long arg1); \
+ __DEFINE_FUNC(name, code)
-static const struct pv_cpu_ops xen_cpu_ops __initdata = {
+#define DEFINE_FUNC0(name, code) \
+ extern unsigned long \
+ xen_ ## name (void); \
+ __DEFINE_FUNC(name, code)
+
+#define DEFINE_FUNC1(name, type, code) \
+ extern unsigned long \
+ xen_ ## name (type arg); \
+ __DEFINE_FUNC(name, code)
+
+#define XEN_PSR_I_ADDR_ADDR (XSI_BASE + XSI_PSR_I_ADDR_OFS)
+
+/*
+ * static void xen_set_itm_with_offset(unsigned long val)
+ * xen_set_itm(val - XEN_MAPPEDREGS->itc_offset);
+ */
+/* 2 bundles */
+DEFINE_VOID_FUNC1(set_itm_with_offset,
+ "mov r2 = " __stringify(XSI_BASE) " + "
+ __stringify(XSI_ITC_OFFSET_OFS) "\n"
+ ";;\n"
+ "ld8 r3 = [r2]\n"
+ ";;\n"
+ "sub r8 = r8, r3\n"
+ "break " __stringify(HYPERPRIVOP_SET_ITM) "\n");
+
+/*
+ * static unsigned long xen_get_itm_with_offset(void)
+ * return ia64_native_getreg(_IA64_REG_CR_ITM) + XEN_MAPPEDREGS->itc_offset;
+ */
+/* 2 bundles */
+DEFINE_FUNC0(get_itm_with_offset,
+ "mov r2 = " __stringify(XSI_BASE) " + "
+ __stringify(XSI_ITC_OFFSET_OFS) "\n"
+ ";;\n"
+ "ld8 r3 = [r2]\n"
+ "mov r8 = cr.itm\n"
+ ";;\n"
+ "add r8 = r8, r2\n");
+
+/*
+ * static void xen_set_itc(unsigned long val)
+ * unsigned long mitc;
+ *
+ * WARN_ON(!irqs_disabled());
+ * mitc = ia64_native_getreg(_IA64_REG_AR_ITC);
+ * XEN_MAPPEDREGS->itc_offset = val - mitc;
+ * XEN_MAPPEDREGS->itc_last = val;
+ */
+/* 2 bundles */
+DEFINE_VOID_FUNC1(set_itc,
+ "mov r2 = " __stringify(XSI_BASE) " + "
+ __stringify(XSI_ITC_LAST_OFS) "\n"
+ "mov r3 = ar.itc\n"
+ ";;\n"
+ "sub r3 = r8, r3\n"
+ "st8 [r2] = r8, "
+ __stringify(XSI_ITC_LAST_OFS) " - "
+ __stringify(XSI_ITC_OFFSET_OFS) "\n"
+ ";;\n"
+ "st8 [r2] = r3\n");
+
+/*
+ * static unsigned long xen_get_itc(void)
+ * unsigned long res;
+ * unsigned long itc_offset;
+ * unsigned long itc_last;
+ * unsigned long ret_itc_last;
+ *
+ * itc_offset = XEN_MAPPEDREGS->itc_offset;
+ * do {
+ * itc_last = XEN_MAPPEDREGS->itc_last;
+ * res = ia64_native_getreg(_IA64_REG_AR_ITC);
+ * res += itc_offset;
+ * if (itc_last >= res)
+ * res = itc_last + 1;
+ * ret_itc_last = cmpxchg(&XEN_MAPPEDREGS->itc_last,
+ * itc_last, res);
+ * } while (unlikely(ret_itc_last != itc_last));
+ * return res;
+ */
+/* 5 bundles */
+DEFINE_FUNC0(get_itc,
+ "mov r2 = " __stringify(XSI_BASE) " + "
+ __stringify(XSI_ITC_OFFSET_OFS) "\n"
+ ";;\n"
+ "ld8 r9 = [r2], " __stringify(XSI_ITC_LAST_OFS) " - "
+ __stringify(XSI_ITC_OFFSET_OFS) "\n"
+ /* r9 = itc_offset */
+ /* r2 = XSI_ITC_OFFSET */
+ "888:\n"
+ "mov r8 = ar.itc\n" /* res = ar.itc */
+ ";;\n"
+ "ld8 r3 = [r2]\n" /* r3 = itc_last */
+ "add r8 = r8, r9\n" /* res = ar.itc + itc_offset */
+ ";;\n"
+ "cmp.gtu p6, p0 = r3, r8\n"
+ ";;\n"
+ "(p6) add r8 = 1, r3\n" /* if (itc_last > res) itc_last + 1 */
+ ";;\n"
+ "mov ar.ccv = r8\n"
+ ";;\n"
+ "cmpxchg8.acq r10 = [r2], r8, ar.ccv\n"
+ ";;\n"
+ "cmp.ne p6, p0 = r10, r3\n"
+ "(p6) hint @pause\n"
+ "(p6) br.cond.spnt 888b\n");
+
+DEFINE_VOID_FUNC1_VOID(fc,
+ "break " __stringify(HYPERPRIVOP_FC) "\n");
+
+/*
+ * psr_i_addr_addr = XEN_PSR_I_ADDR_ADDR
+ * masked_addr = *psr_i_addr_addr
+ * pending_intr_addr = masked_addr - 1
+ * if (val & IA64_PSR_I) {
+ * masked = *masked_addr
+ * *masked_addr = 0:xen_set_virtual_psr_i(1)
+ * compiler barrier
+ * if (masked) {
+ * uint8_t pending = *pending_intr_addr;
+ * if (pending)
+ * XEN_HYPER_SSM_I
+ * }
+ * } else {
+ * *masked_addr = 1:xen_set_virtual_psr_i(0)
+ * }
+ */
+/* 6 bundles */
+DEFINE_VOID_FUNC1(intrin_local_irq_restore,
+ /* r8 = input value: 0 or IA64_PSR_I
+ * p6 = (flags & IA64_PSR_I)
+ * = if clause
+ * p7 = !(flags & IA64_PSR_I)
+ * = else clause
+ */
+ "cmp.ne p6, p7 = r8, r0\n"
+ "mov r9 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n"
+ ";;\n"
+ /* r9 = XEN_PSR_I_ADDR */
+ "ld8 r9 = [r9]\n"
+ ";;\n"
+
+ /* r10 = masked previous value */
+ "(p6) ld1.acq r10 = [r9]\n"
+ ";;\n"
+
+ /* p8 = !masked interrupt masked previously? */
+ "(p6) cmp.ne.unc p8, p0 = r10, r0\n"
+
+ /* p7 = else clause */
+ "(p7) mov r11 = 1\n"
+ ";;\n"
+ /* masked = 1 */
+ "(p7) st1.rel [r9] = r11\n"
+
+ /* p6 = if clause */
+ /* masked = 0
+ * r9 = masked_addr - 1
+ * = pending_intr_addr
+ */
+ "(p8) st1.rel [r9] = r0, -1\n"
+ ";;\n"
+ /* r8 = pending_intr */
+ "(p8) ld1.acq r11 = [r9]\n"
+ ";;\n"
+ /* p9 = interrupt pending? */
+ "(p8) cmp.ne.unc p9, p10 = r11, r0\n"
+ ";;\n"
+ "(p10) mf\n"
+ /* issue hypercall to trigger interrupt */
+ "(p9) break " __stringify(HYPERPRIVOP_SSM_I) "\n");
+
+DEFINE_VOID_FUNC2(ptcga,
+ "break " __stringify(HYPERPRIVOP_PTC_GA) "\n");
+DEFINE_VOID_FUNC2(set_rr,
+ "break " __stringify(HYPERPRIVOP_SET_RR) "\n");
+
+/*
+ * tmp = XEN_MAPPEDREGS->interrupt_mask_addr = XEN_PSR_I_ADDR_ADDR;
+ * tmp = *tmp
+ * tmp = *tmp;
+ * psr_i = tmp? 0: IA64_PSR_I;
+ */
+/* 4 bundles */
+DEFINE_FUNC0(get_psr_i,
+ "mov r9 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n"
+ ";;\n"
+ "ld8 r9 = [r9]\n" /* r9 = XEN_PSR_I_ADDR */
+ "mov r8 = 0\n" /* psr_i = 0 */
+ ";;\n"
+ "ld1.acq r9 = [r9]\n" /* r9 = XEN_PSR_I */
+ ";;\n"
+ "cmp.eq.unc p6, p0 = r9, r0\n" /* p6 = (XEN_PSR_I != 0) */
+ ";;\n"
+ "(p6) mov r8 = " __stringify(1 << IA64_PSR_I_BIT) "\n");
+
+DEFINE_FUNC1(thash, unsigned long,
+ "break " __stringify(HYPERPRIVOP_THASH) "\n");
+DEFINE_FUNC1(get_cpuid, int,
+ "break " __stringify(HYPERPRIVOP_GET_CPUID) "\n");
+DEFINE_FUNC1(get_pmd, int,
+ "break " __stringify(HYPERPRIVOP_GET_PMD) "\n");
+DEFINE_FUNC1(get_rr, unsigned long,
+ "break " __stringify(HYPERPRIVOP_GET_RR) "\n");
+
+/*
+ * void xen_privop_ssm_i(void)
+ *
+ * int masked = !xen_get_virtual_psr_i();
+ * // masked = *(*XEN_MAPPEDREGS->interrupt_mask_addr)
+ * xen_set_virtual_psr_i(1)
+ * // *(*XEN_MAPPEDREGS->interrupt_mask_addr) = 0
+ * // compiler barrier
+ * if (masked) {
+ * uint8_t* pend_int_addr =
+ * (uint8_t*)(*XEN_MAPPEDREGS->interrupt_mask_addr) - 1;
+ * uint8_t pending = *pend_int_addr;
+ * if (pending)
+ * XEN_HYPER_SSM_I
+ * }
+ */
+/* 4 bundles */
+DEFINE_VOID_FUNC0(ssm_i,
+ "mov r8 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n"
+ ";;\n"
+ "ld8 r8 = [r8]\n" /* r8 = XEN_PSR_I_ADDR */
+ ";;\n"
+ "ld1.acq r9 = [r8]\n" /* r9 = XEN_PSR_I */
+ ";;\n"
+ "st1.rel [r8] = r0, -1\n" /* psr_i = 0. enable interrupt
+ * r8 = XEN_PSR_I_ADDR - 1
+ * = pend_int_addr
+ */
+ "cmp.eq.unc p0, p6 = r9, r0\n"/* p6 = !XEN_PSR_I
+ * previously interrupt
+ * masked?
+ */
+ ";;\n"
+ "(p6) ld1.acq r8 = [r8]\n" /* r8 = xen_pend_int */
+ ";;\n"
+ "(p6) cmp.eq.unc p6, p7 = r8, r0\n" /*interrupt pending?*/
+ ";;\n"
+ /* issue hypercall to get interrupt */
+ "(p7) break " __stringify(HYPERPRIVOP_SSM_I) "\n"
+ ";;\n");
+
+/*
+ * psr_i_addr_addr = XEN_MAPPEDREGS->interrupt_mask_addr
+ * = XEN_PSR_I_ADDR_ADDR;
+ * psr_i_addr = *psr_i_addr_addr;
+ * *psr_i_addr = 1;
+ */
+/* 2 bundles */
+DEFINE_VOID_FUNC0(rsm_i,
+ "mov r8 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n"
+ /* r8 = XEN_PSR_I_ADDR */
+ "mov r9 = 1\n"
+ ";;\n"
+ "ld8 r8 = [r8]\n" /* r8 = XEN_PSR_I */
+ ";;\n"
+ "st1.rel [r8] = r9\n"); /* XEN_PSR_I = 1 */
+
+extern void
+xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
+ unsigned long val2, unsigned long val3,
+ unsigned long val4);
+__DEFINE_FUNC(set_rr0_to_rr4,
+ "break " __stringify(HYPERPRIVOP_SET_RR0_TO_RR4) "\n");
+
+
+extern unsigned long xen_getreg(int regnum);
+#define __DEFINE_GET_REG(id, privop) \
+ "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \
+ ";;\n" \
+ "cmp.eq p6, p0 = r2, r8\n" \
+ ";;\n" \
+ "(p6) break " __stringify(HYPERPRIVOP_GET_ ## privop) "\n" \
+ "(p6) br.cond.sptk.many b6\n" \
+ ";;\n"
+
+__DEFINE_FUNC(getreg,
+ __DEFINE_GET_REG(PSR, PSR)
+#ifdef CONFIG_IA32_SUPPORT
+ __DEFINE_GET_REG(AR_EFLAG, EFLAG)
+#endif
+
+ /* get_itc */
+ "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n"
+ ";;\n"
+ "cmp.eq p6, p0 = r2, r8\n"
+ ";;\n"
+ "(p6) br.cond.spnt xen_get_itc\n"
+ ";;\n"
+
+ /* get itm */
+ "mov r2 = " __stringify(_IA64_REG_CR_ITM) "\n"
+ ";;\n"
+ "cmp.eq p6, p0 = r2, r8\n"
+ ";;\n"
+ "(p6) br.cond.spnt xen_get_itm_with_offset\n"
+ ";;\n"
+
+ __DEFINE_GET_REG(CR_IVR, IVR)
+ __DEFINE_GET_REG(CR_TPR, TPR)
+
+ /* fall back */
+ "movl r2 = ia64_native_getreg_func\n"
+ ";;\n"
+ "mov b7 = r2\n"
+ ";;\n"
+ "br.cond.sptk.many b7\n");
+
+extern void xen_setreg(int regnum, unsigned long val);
+#define __DEFINE_SET_REG(id, privop) \
+ "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \
+ ";;\n" \
+ "cmp.eq p6, p0 = r2, r9\n" \
+ ";;\n" \
+ "(p6) break " __stringify(HYPERPRIVOP_ ## privop) "\n" \
+ "(p6) br.cond.sptk.many b6\n" \
+ ";;\n"
+
+__DEFINE_FUNC(setreg,
+ /* kr0 .. kr 7*/
+ /*
+ * if (_IA64_REG_AR_KR0 <= regnum &&
+ * regnum <= _IA64_REG_AR_KR7) {
+ * register __index asm ("r8") = regnum - _IA64_REG_AR_KR0
+ * register __val asm ("r9") = val
+ * "break HYPERPRIVOP_SET_KR"
+ * }
+ */
+ "mov r17 = r9\n"
+ "mov r2 = " __stringify(_IA64_REG_AR_KR0) "\n"
+ ";;\n"
+ "cmp.ge p6, p0 = r9, r2\n"
+ "sub r17 = r17, r2\n"
+ ";;\n"
+ "(p6) cmp.ge.unc p7, p0 = "
+ __stringify(_IA64_REG_AR_KR7) " - " __stringify(_IA64_REG_AR_KR0)
+ ", r17\n"
+ ";;\n"
+ "(p7) mov r9 = r8\n"
+ ";;\n"
+ "(p7) mov r8 = r17\n"
+ "(p7) break " __stringify(HYPERPRIVOP_SET_KR) "\n"
+
+ /* set itm */
+ "mov r2 = " __stringify(_IA64_REG_CR_ITM) "\n"
+ ";;\n"
+ "cmp.eq p6, p0 = r2, r8\n"
+ ";;\n"
+ "(p6) br.cond.spnt xen_set_itm_with_offset\n"
+
+ /* set itc */
+ "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n"
+ ";;\n"
+ "cmp.eq p6, p0 = r2, r8\n"
+ ";;\n"
+ "(p6) br.cond.spnt xen_set_itc\n"
+
+#ifdef CONFIG_IA32_SUPPORT
+ __DEFINE_SET_REG(AR_EFLAG, SET_EFLAG)
+#endif
+ __DEFINE_SET_REG(CR_TPR, SET_TPR)
+ __DEFINE_SET_REG(CR_EOI, EOI)
+
+ /* fall back */
+ "movl r2 = ia64_native_setreg_func\n"
+ ";;\n"
+ "mov b7 = r2\n"
+ ";;\n"
+ "br.cond.sptk.many b7\n");
+#endif
+
+static const struct pv_cpu_ops xen_cpu_ops __initconst = {
.fc = xen_fc,
.thash = xen_thash,
.get_cpuid = xen_get_cpuid,
@@ -337,7 +880,7 @@ xen_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op);
}
-static const struct pv_iosapic_ops xen_iosapic_ops __initconst = {
+static struct pv_iosapic_ops xen_iosapic_ops __initdata = {
.pcat_compat_init = xen_pcat_compat_init,
.__get_irq_chip = xen_iosapic_get_irq_chip,
@@ -355,6 +898,8 @@ xen_setup_pv_ops(void)
xen_info_init();
pv_info = xen_info;
pv_init_ops = xen_init_ops;
+ pv_fsys_data = xen_fsys_data;
+ pv_patchdata = xen_patchdata;
pv_cpu_ops = xen_cpu_ops;
pv_iosapic_ops = xen_iosapic_ops;
pv_irq_ops = xen_irq_ops;
@@ -362,3 +907,252 @@ xen_setup_pv_ops(void)
paravirt_cpu_asm_init(&xen_cpu_asm_switch);
}
+
+#ifdef ASM_SUPPORTED
+/***************************************************************************
+ * binary pacthing
+ * pv_init_ops.patch_bundle
+ */
+
+#define DEFINE_FUNC_GETREG(name, privop) \
+ DEFINE_FUNC0(get_ ## name, \
+ "break "__stringify(HYPERPRIVOP_GET_ ## privop) "\n")
+
+DEFINE_FUNC_GETREG(psr, PSR);
+DEFINE_FUNC_GETREG(eflag, EFLAG);
+DEFINE_FUNC_GETREG(ivr, IVR);
+DEFINE_FUNC_GETREG(tpr, TPR);
+
+#define DEFINE_FUNC_SET_KR(n) \
+ DEFINE_VOID_FUNC0(set_kr ## n, \
+ ";;\n" \
+ "mov r9 = r8\n" \
+ "mov r8 = " #n "\n" \
+ "break " __stringify(HYPERPRIVOP_SET_KR) "\n")
+
+DEFINE_FUNC_SET_KR(0);
+DEFINE_FUNC_SET_KR(1);
+DEFINE_FUNC_SET_KR(2);
+DEFINE_FUNC_SET_KR(3);
+DEFINE_FUNC_SET_KR(4);
+DEFINE_FUNC_SET_KR(5);
+DEFINE_FUNC_SET_KR(6);
+DEFINE_FUNC_SET_KR(7);
+
+#define __DEFINE_FUNC_SETREG(name, privop) \
+ DEFINE_VOID_FUNC0(name, \
+ "break "__stringify(HYPERPRIVOP_ ## privop) "\n")
+
+#define DEFINE_FUNC_SETREG(name, privop) \
+ __DEFINE_FUNC_SETREG(set_ ## name, SET_ ## privop)
+
+DEFINE_FUNC_SETREG(eflag, EFLAG);
+DEFINE_FUNC_SETREG(tpr, TPR);
+__DEFINE_FUNC_SETREG(eoi, EOI);
+
+extern const char xen_check_events[];
+extern const char __xen_intrin_local_irq_restore_direct_start[];
+extern const char __xen_intrin_local_irq_restore_direct_end[];
+extern const unsigned long __xen_intrin_local_irq_restore_direct_reloc;
+
+asm (
+ ".align 32\n"
+ ".proc xen_check_events\n"
+ "xen_check_events:\n"
+ /* masked = 0
+ * r9 = masked_addr - 1
+ * = pending_intr_addr
+ */
+ "st1.rel [r9] = r0, -1\n"
+ ";;\n"
+ /* r8 = pending_intr */
+ "ld1.acq r11 = [r9]\n"
+ ";;\n"
+ /* p9 = interrupt pending? */
+ "cmp.ne p9, p10 = r11, r0\n"
+ ";;\n"
+ "(p10) mf\n"
+ /* issue hypercall to trigger interrupt */
+ "(p9) break " __stringify(HYPERPRIVOP_SSM_I) "\n"
+ "br.cond.sptk.many b6\n"
+ ".endp xen_check_events\n"
+ "\n"
+ ".align 32\n"
+ ".proc __xen_intrin_local_irq_restore_direct\n"
+ "__xen_intrin_local_irq_restore_direct:\n"
+ "__xen_intrin_local_irq_restore_direct_start:\n"
+ "1:\n"
+ "{\n"
+ "cmp.ne p6, p7 = r8, r0\n"
+ "mov r17 = ip\n" /* get ip to calc return address */
+ "mov r9 = "__stringify(XEN_PSR_I_ADDR_ADDR) "\n"
+ ";;\n"
+ "}\n"
+ "{\n"
+ /* r9 = XEN_PSR_I_ADDR */
+ "ld8 r9 = [r9]\n"
+ ";;\n"
+ /* r10 = masked previous value */
+ "(p6) ld1.acq r10 = [r9]\n"
+ "adds r17 = 1f - 1b, r17\n" /* calculate return address */
+ ";;\n"
+ "}\n"
+ "{\n"
+ /* p8 = !masked interrupt masked previously? */
+ "(p6) cmp.ne.unc p8, p0 = r10, r0\n"
+ "\n"
+ /* p7 = else clause */
+ "(p7) mov r11 = 1\n"
+ ";;\n"
+ "(p8) mov b6 = r17\n" /* set return address */
+ "}\n"
+ "{\n"
+ /* masked = 1 */
+ "(p7) st1.rel [r9] = r11\n"
+ "\n"
+ "[99:]\n"
+ "(p8) brl.cond.dptk.few xen_check_events\n"
+ "}\n"
+ /* pv calling stub is 5 bundles. fill nop to adjust return address */
+ "{\n"
+ "nop 0\n"
+ "nop 0\n"
+ "nop 0\n"
+ "}\n"
+ "1:\n"
+ "__xen_intrin_local_irq_restore_direct_end:\n"
+ ".endp __xen_intrin_local_irq_restore_direct\n"
+ "\n"
+ ".align 8\n"
+ "__xen_intrin_local_irq_restore_direct_reloc:\n"
+ "data8 99b\n"
+);
+
+static struct paravirt_patch_bundle_elem xen_patch_bundle_elems[]
+__initdata_or_module =
+{
+#define XEN_PATCH_BUNDLE_ELEM(name, type) \
+ { \
+ (void*)xen_ ## name ## _direct_start, \
+ (void*)xen_ ## name ## _direct_end, \
+ PARAVIRT_PATCH_TYPE_ ## type, \
+ }
+
+ XEN_PATCH_BUNDLE_ELEM(fc, FC),
+ XEN_PATCH_BUNDLE_ELEM(thash, THASH),
+ XEN_PATCH_BUNDLE_ELEM(get_cpuid, GET_CPUID),
+ XEN_PATCH_BUNDLE_ELEM(get_pmd, GET_PMD),
+ XEN_PATCH_BUNDLE_ELEM(ptcga, PTCGA),
+ XEN_PATCH_BUNDLE_ELEM(get_rr, GET_RR),
+ XEN_PATCH_BUNDLE_ELEM(set_rr, SET_RR),
+ XEN_PATCH_BUNDLE_ELEM(set_rr0_to_rr4, SET_RR0_TO_RR4),
+ XEN_PATCH_BUNDLE_ELEM(ssm_i, SSM_I),
+ XEN_PATCH_BUNDLE_ELEM(rsm_i, RSM_I),
+ XEN_PATCH_BUNDLE_ELEM(get_psr_i, GET_PSR_I),
+ {
+ (void*)__xen_intrin_local_irq_restore_direct_start,
+ (void*)__xen_intrin_local_irq_restore_direct_end,
+ PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE,
+ },
+
+#define XEN_PATCH_BUNDLE_ELEM_GETREG(name, reg) \
+ { \
+ xen_get_ ## name ## _direct_start, \
+ xen_get_ ## name ## _direct_end, \
+ PARAVIRT_PATCH_TYPE_GETREG + _IA64_REG_ ## reg, \
+ }
+
+ XEN_PATCH_BUNDLE_ELEM_GETREG(psr, PSR),
+ XEN_PATCH_BUNDLE_ELEM_GETREG(eflag, AR_EFLAG),
+
+ XEN_PATCH_BUNDLE_ELEM_GETREG(ivr, CR_IVR),
+ XEN_PATCH_BUNDLE_ELEM_GETREG(tpr, CR_TPR),
+
+ XEN_PATCH_BUNDLE_ELEM_GETREG(itc, AR_ITC),
+ XEN_PATCH_BUNDLE_ELEM_GETREG(itm_with_offset, CR_ITM),
+
+
+#define __XEN_PATCH_BUNDLE_ELEM_SETREG(name, reg) \
+ { \
+ xen_ ## name ## _direct_start, \
+ xen_ ## name ## _direct_end, \
+ PARAVIRT_PATCH_TYPE_SETREG + _IA64_REG_ ## reg, \
+ }
+
+#define XEN_PATCH_BUNDLE_ELEM_SETREG(name, reg) \
+ __XEN_PATCH_BUNDLE_ELEM_SETREG(set_ ## name, reg)
+
+ XEN_PATCH_BUNDLE_ELEM_SETREG(kr0, AR_KR0),
+ XEN_PATCH_BUNDLE_ELEM_SETREG(kr1, AR_KR1),
+ XEN_PATCH_BUNDLE_ELEM_SETREG(kr2, AR_KR2),
+ XEN_PATCH_BUNDLE_ELEM_SETREG(kr3, AR_KR3),
+ XEN_PATCH_BUNDLE_ELEM_SETREG(kr4, AR_KR4),
+ XEN_PATCH_BUNDLE_ELEM_SETREG(kr5, AR_KR5),
+ XEN_PATCH_BUNDLE_ELEM_SETREG(kr6, AR_KR6),
+ XEN_PATCH_BUNDLE_ELEM_SETREG(kr7, AR_KR7),
+
+ XEN_PATCH_BUNDLE_ELEM_SETREG(eflag, AR_EFLAG),
+ XEN_PATCH_BUNDLE_ELEM_SETREG(tpr, CR_TPR),
+ __XEN_PATCH_BUNDLE_ELEM_SETREG(eoi, CR_EOI),
+
+ XEN_PATCH_BUNDLE_ELEM_SETREG(itc, AR_ITC),
+ XEN_PATCH_BUNDLE_ELEM_SETREG(itm_with_offset, CR_ITM),
+};
+
+static unsigned long __init_or_module
+xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type)
+{
+ const unsigned long nelems = sizeof(xen_patch_bundle_elems) /
+ sizeof(xen_patch_bundle_elems[0]);
+ unsigned long used;
+ const struct paravirt_patch_bundle_elem *found;
+
+ used = __paravirt_patch_apply_bundle(sbundle, ebundle, type,
+ xen_patch_bundle_elems, nelems,
+ &found);
+
+ if (found == NULL)
+ /* fallback */
+ return ia64_native_patch_bundle(sbundle, ebundle, type);
+ if (used == 0)
+ return used;
+
+ /* relocation */
+ switch (type) {
+ case PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE: {
+ unsigned long reloc =
+ __xen_intrin_local_irq_restore_direct_reloc;
+ unsigned long reloc_offset = reloc - (unsigned long)
+ __xen_intrin_local_irq_restore_direct_start;
+ unsigned long tag = (unsigned long)sbundle + reloc_offset;
+ paravirt_patch_reloc_brl(tag, xen_check_events);
+ break;
+ }
+ default:
+ /* nothing */
+ break;
+ }
+ return used;
+}
+#endif /* ASM_SUPPOTED */
+
+const struct paravirt_patch_branch_target xen_branch_target[]
+__initconst = {
+#define PARAVIRT_BR_TARGET(name, type) \
+ { \
+ &xen_ ## name, \
+ PARAVIRT_PATCH_TYPE_BR_ ## type, \
+ }
+ PARAVIRT_BR_TARGET(switch_to, SWITCH_TO),
+ PARAVIRT_BR_TARGET(leave_syscall, LEAVE_SYSCALL),
+ PARAVIRT_BR_TARGET(work_processed_syscall, WORK_PROCESSED_SYSCALL),
+ PARAVIRT_BR_TARGET(leave_kernel, LEAVE_KERNEL),
+};
+
+static void __init
+xen_patch_branch(unsigned long tag, unsigned long type)
+{
+ const unsigned long nelem =
+ sizeof(xen_branch_target) / sizeof(xen_branch_target[0]);
+ __paravirt_patch_apply_branch(tag, type, xen_branch_target, nelem);
+}