aboutsummaryrefslogtreecommitdiff
path: root/arch/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Kconfig7
-rw-r--r--arch/ia64/hp/sim/simeth.c2
-rw-r--r--arch/ia64/include/asm/agp.h4
-rw-r--r--arch/ia64/include/asm/device.h3
-rw-r--r--arch/ia64/include/asm/kvm_host.h4
-rw-r--r--arch/ia64/include/asm/kvm_para.h4
-rw-r--r--arch/ia64/include/asm/pci.h14
-rw-r--r--arch/ia64/include/asm/socket.h3
-rw-r--r--arch/ia64/include/asm/topology.h17
-rw-r--r--arch/ia64/kernel/process.c7
-rw-r--r--arch/ia64/kernel/setup.c6
-rw-r--r--arch/ia64/kernel/smp.c3
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S16
-rw-r--r--arch/ia64/kvm/Kconfig11
-rw-r--r--arch/ia64/kvm/kvm-ia64.c85
-rw-r--r--arch/ia64/kvm/vcpu.c4
-rw-r--r--arch/ia64/sn/kernel/setup.c2
17 files changed, 98 insertions, 94 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 170042b420d..011a1cdf0eb 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -89,6 +89,9 @@ config GENERIC_TIME_VSYSCALL
bool
default y
+config HAVE_LEGACY_PER_CPU_AREA
+ def_bool y
+
config HAVE_SETUP_PER_CPU_AREA
def_bool y
@@ -112,6 +115,10 @@ config IA64_UNCACHED_ALLOCATOR
bool
select GENERIC_ALLOCATOR
+config ARCH_USES_PG_UNCACHED
+ def_bool y
+ depends on IA64_UNCACHED_ALLOCATOR
+
config AUDIT_ARCH
bool
default y
diff --git a/arch/ia64/hp/sim/simeth.c b/arch/ia64/hp/sim/simeth.c
index e4d8fde6810..7e81966ce48 100644
--- a/arch/ia64/hp/sim/simeth.c
+++ b/arch/ia64/hp/sim/simeth.c
@@ -412,7 +412,7 @@ simeth_tx(struct sk_buff *skb, struct net_device *dev)
*/
dev_kfree_skb(skb);
- return 0;
+ return NETDEV_TX_OK;
}
static inline struct sk_buff *
diff --git a/arch/ia64/include/asm/agp.h b/arch/ia64/include/asm/agp.h
index c11fdd8ab4d..01d09c401c5 100644
--- a/arch/ia64/include/asm/agp.h
+++ b/arch/ia64/include/asm/agp.h
@@ -17,10 +17,6 @@
#define unmap_page_from_agp(page) /* nothing */
#define flush_agp_cache() mb()
-/* Convert a physical address to an address suitable for the GART. */
-#define phys_to_gart(x) (x)
-#define gart_to_phys(x) (x)
-
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
diff --git a/arch/ia64/include/asm/device.h b/arch/ia64/include/asm/device.h
index 41ab85d66f3..d66d446b127 100644
--- a/arch/ia64/include/asm/device.h
+++ b/arch/ia64/include/asm/device.h
@@ -15,4 +15,7 @@ struct dev_archdata {
#endif
};
+struct pdev_archdata {
+};
+
#endif /* _ASM_IA64_DEVICE_H */
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
index 5f43697aed3..d9b6325a932 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -235,7 +235,8 @@ struct kvm_vm_data {
#define KVM_REQ_PTC_G 32
#define KVM_REQ_RESUME 33
-#define KVM_PAGES_PER_HPAGE 1
+#define KVM_NR_PAGE_SIZES 1
+#define KVM_PAGES_PER_HPAGE(x) 1
struct kvm;
struct kvm_vcpu;
@@ -465,7 +466,6 @@ struct kvm_arch {
unsigned long metaphysical_rr4;
unsigned long vmm_init_rr;
- int online_vcpus;
int is_sn2;
struct kvm_ioapic *vioapic;
diff --git a/arch/ia64/include/asm/kvm_para.h b/arch/ia64/include/asm/kvm_para.h
index 0d6d8ca07b8..1588aee781a 100644
--- a/arch/ia64/include/asm/kvm_para.h
+++ b/arch/ia64/include/asm/kvm_para.h
@@ -19,9 +19,13 @@
*
*/
+#ifdef __KERNEL__
+
static inline unsigned int kvm_arch_para_features(void)
{
return 0;
}
#endif
+
+#endif
diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h
index fcfca56bb85..55281aabe5f 100644
--- a/arch/ia64/include/asm/pci.h
+++ b/arch/ia64/include/asm/pci.h
@@ -17,7 +17,6 @@
* loader.
*/
#define pcibios_assign_all_busses() 0
-#define pcibios_scan_all_fns(a, b) 0
#define PCIBIOS_MIN_IO 0x1000
#define PCIBIOS_MIN_MEM 0x10000000
@@ -135,7 +134,18 @@ extern void pcibios_resource_to_bus(struct pci_dev *dev,
extern void pcibios_bus_to_resource(struct pci_dev *dev,
struct resource *res, struct pci_bus_region *region);
-#define pcibios_scan_all_fns(a, b) 0
+static inline struct resource *
+pcibios_select_root(struct pci_dev *pdev, struct resource *res)
+{
+ struct resource *root = NULL;
+
+ if (res->flags & IORESOURCE_IO)
+ root = &ioport_resource;
+ if (res->flags & IORESOURCE_MEM)
+ root = &iomem_resource;
+
+ return root;
+}
#define HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
diff --git a/arch/ia64/include/asm/socket.h b/arch/ia64/include/asm/socket.h
index 745421225ec..0b0d5ff062e 100644
--- a/arch/ia64/include/asm/socket.h
+++ b/arch/ia64/include/asm/socket.h
@@ -66,4 +66,7 @@
#define SO_TIMESTAMPING 37
#define SCM_TIMESTAMPING SO_TIMESTAMPING
+#define SO_PROTOCOL 38
+#define SO_DOMAIN 39
+
#endif /* _ASM_IA64_SOCKET_H */
diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h
index 7b4c8c70b2d..d0141fbf51d 100644
--- a/arch/ia64/include/asm/topology.h
+++ b/arch/ia64/include/asm/topology.h
@@ -61,12 +61,13 @@ void build_cpu_to_node_map(void);
.cache_nice_tries = 2, \
.busy_idx = 2, \
.idle_idx = 1, \
- .newidle_idx = 2, \
- .wake_idx = 1, \
- .forkexec_idx = 1, \
+ .newidle_idx = 0, \
+ .wake_idx = 0, \
+ .forkexec_idx = 0, \
.flags = SD_LOAD_BALANCE \
| SD_BALANCE_NEWIDLE \
| SD_BALANCE_EXEC \
+ | SD_BALANCE_FORK \
| SD_WAKE_AFFINE, \
.last_balance = jiffies, \
.balance_interval = 1, \
@@ -85,14 +86,14 @@ void build_cpu_to_node_map(void);
.cache_nice_tries = 2, \
.busy_idx = 3, \
.idle_idx = 2, \
- .newidle_idx = 2, \
- .wake_idx = 1, \
- .forkexec_idx = 1, \
+ .newidle_idx = 0, \
+ .wake_idx = 0, \
+ .forkexec_idx = 0, \
.flags = SD_LOAD_BALANCE \
+ | SD_BALANCE_NEWIDLE \
| SD_BALANCE_EXEC \
| SD_BALANCE_FORK \
- | SD_SERIALIZE \
- | SD_WAKE_BALANCE, \
+ | SD_SERIALIZE, \
.last_balance = jiffies, \
.balance_interval = 64, \
.nr_balance_failed = 0, \
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 89969e95004..9bcec9945c1 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -161,6 +161,13 @@ show_regs (struct pt_regs *regs)
show_stack(NULL, NULL);
}
+/* local support for deprecated console_print */
+void
+console_print(const char *s)
+{
+ printk(KERN_EMERG "%s", s);
+}
+
void
do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall)
{
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 1b23ec126b6..1de86c96801 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -855,11 +855,17 @@ identify_cpu (struct cpuinfo_ia64 *c)
c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
}
+/*
+ * In UP configuration, setup_per_cpu_areas() is defined in
+ * include/linux/percpu.h
+ */
+#ifdef CONFIG_SMP
void __init
setup_per_cpu_areas (void)
{
/* start_kernel() requires this... */
}
+#endif
/*
* Do the following calculations:
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index f0c521b0ba4..93ebfea43c6 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -58,7 +58,8 @@ static struct local_tlb_flush_counts {
unsigned int count;
} __attribute__((__aligned__(32))) local_tlb_flush_counts[NR_CPUS];
-static DEFINE_PER_CPU(unsigned short, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned;
+static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned short [NR_CPUS],
+ shadow_flush_counts);
#define IPI_CALL_FUNC 0
#define IPI_CPU_STOP 1
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 4a95e86b9ac..eb4214d1c5a 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -24,14 +24,14 @@ PHDRS {
}
SECTIONS
{
- /* Sections to be discarded */
+ /* unwind exit sections must be discarded before the rest of the
+ sections get included. */
/DISCARD/ : {
- EXIT_TEXT
- EXIT_DATA
- *(.exitcall.exit)
*(.IA_64.unwind.exit.text)
*(.IA_64.unwind_info.exit.text)
- }
+ *(.comment)
+ *(.note)
+ }
v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */
phys_start = _start - LOAD_OFFSET;
@@ -316,7 +316,7 @@ SECTIONS
.debug_funcnames 0 : { *(.debug_funcnames) }
.debug_typenames 0 : { *(.debug_typenames) }
.debug_varnames 0 : { *(.debug_varnames) }
- /* These must appear regardless of . */
- /DISCARD/ : { *(.comment) }
- /DISCARD/ : { *(.note) }
+
+ /* Default discards */
+ DISCARDS
}
diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig
index 64d52093787..ef3e7be29ca 100644
--- a/arch/ia64/kvm/Kconfig
+++ b/arch/ia64/kvm/Kconfig
@@ -1,12 +1,8 @@
#
# KVM configuration
#
-config HAVE_KVM
- bool
-config HAVE_KVM_IRQCHIP
- bool
- default y
+source "virt/kvm/Kconfig"
menuconfig VIRTUALIZATION
bool "Virtualization"
@@ -28,6 +24,8 @@ config KVM
depends on PCI
select PREEMPT_NOTIFIERS
select ANON_INODES
+ select HAVE_KVM_IRQCHIP
+ select KVM_APIC_ARCHITECTURE
---help---
Support hosting fully virtualized guest machines using hardware
virtualization extensions. You will need a fairly recent
@@ -49,9 +47,6 @@ config KVM_INTEL
Provides support for KVM on Itanium 2 processors equipped with the VT
extensions.
-config KVM_TRACE
- bool
-
source drivers/virtio/Kconfig
endif # VIRTUALIZATION
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 80c57b0a21c..0ad09f05efa 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -210,16 +210,6 @@ int kvm_dev_ioctl_check_extension(long ext)
}
-static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
- gpa_t addr, int len, int is_write)
-{
- struct kvm_io_device *dev;
-
- dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr, len, is_write);
-
- return dev;
-}
-
static int handle_vm_error(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
@@ -231,6 +221,7 @@ static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
struct kvm_mmio_req *p;
struct kvm_io_device *mmio_dev;
+ int r;
p = kvm_get_vcpu_ioreq(vcpu);
@@ -247,16 +238,13 @@ static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
kvm_run->exit_reason = KVM_EXIT_MMIO;
return 0;
mmio:
- mmio_dev = vcpu_find_mmio_dev(vcpu, p->addr, p->size, !p->dir);
- if (mmio_dev) {
- if (!p->dir)
- kvm_iodevice_write(mmio_dev, p->addr, p->size,
- &p->data);
- else
- kvm_iodevice_read(mmio_dev, p->addr, p->size,
- &p->data);
-
- } else
+ if (p->dir)
+ r = kvm_io_bus_read(&vcpu->kvm->mmio_bus, p->addr,
+ p->size, &p->data);
+ else
+ r = kvm_io_bus_write(&vcpu->kvm->mmio_bus, p->addr,
+ p->size, &p->data);
+ if (r)
printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr);
p->state = STATE_IORESP_READY;
@@ -337,13 +325,12 @@ static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id,
{
union ia64_lid lid;
int i;
+ struct kvm_vcpu *vcpu;
- for (i = 0; i < kvm->arch.online_vcpus; i++) {
- if (kvm->vcpus[i]) {
- lid.val = VCPU_LID(kvm->vcpus[i]);
- if (lid.id == id && lid.eid == eid)
- return kvm->vcpus[i];
- }
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ lid.val = VCPU_LID(vcpu);
+ if (lid.id == id && lid.eid == eid)
+ return vcpu;
}
return NULL;
@@ -409,21 +396,21 @@ static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
struct kvm *kvm = vcpu->kvm;
struct call_data call_data;
int i;
+ struct kvm_vcpu *vcpui;
call_data.ptc_g_data = p->u.ptc_g_data;
- for (i = 0; i < kvm->arch.online_vcpus; i++) {
- if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state ==
- KVM_MP_STATE_UNINITIALIZED ||
- vcpu == kvm->vcpus[i])
+ kvm_for_each_vcpu(i, vcpui, kvm) {
+ if (vcpui->arch.mp_state == KVM_MP_STATE_UNINITIALIZED ||
+ vcpu == vcpui)
continue;
- if (waitqueue_active(&kvm->vcpus[i]->wq))
- wake_up_interruptible(&kvm->vcpus[i]->wq);
+ if (waitqueue_active(&vcpui->wq))
+ wake_up_interruptible(&vcpui->wq);
- if (kvm->vcpus[i]->cpu != -1) {
- call_data.vcpu = kvm->vcpus[i];
- smp_call_function_single(kvm->vcpus[i]->cpu,
+ if (vcpui->cpu != -1) {
+ call_data.vcpu = vcpui;
+ smp_call_function_single(vcpui->cpu,
vcpu_global_purge, &call_data, 1);
} else
printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n");
@@ -852,8 +839,6 @@ struct kvm *kvm_arch_create_vm(void)
kvm_init_vm(kvm);
- kvm->arch.online_vcpus = 0;
-
return kvm;
}
@@ -1000,10 +985,10 @@ long kvm_arch_vm_ioctl(struct file *filp,
goto out;
if (irqchip_in_kernel(kvm)) {
__s32 status;
- mutex_lock(&kvm->lock);
+ mutex_lock(&kvm->irq_lock);
status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
irq_event.irq, irq_event.level);
- mutex_unlock(&kvm->lock);
+ mutex_unlock(&kvm->irq_lock);
if (ioctl == KVM_IRQ_LINE_STATUS) {
irq_event.status = status;
if (copy_to_user(argp, &irq_event,
@@ -1216,7 +1201,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
if (IS_ERR(vmm_vcpu))
return PTR_ERR(vmm_vcpu);
- if (vcpu->vcpu_id == 0) {
+ if (kvm_vcpu_is_bsp(vcpu)) {
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
/*Set entry address for first run.*/
@@ -1224,7 +1209,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
/*Initialize itc offset for vcpus*/
itc_offset = 0UL - kvm_get_itc(vcpu);
- for (i = 0; i < kvm->arch.online_vcpus; i++) {
+ for (i = 0; i < KVM_MAX_VCPUS; i++) {
v = (struct kvm_vcpu *)((char *)vcpu +
sizeof(struct kvm_vcpu_data) * i);
v->arch.itc_offset = itc_offset;
@@ -1356,8 +1341,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
goto fail;
}
- kvm->arch.online_vcpus++;
-
return vcpu;
fail:
return ERR_PTR(r);
@@ -1952,19 +1935,6 @@ int kvm_highest_pending_irq(struct kvm_vcpu *vcpu)
return find_highest_bits((int *)&vpd->irr[0]);
}
-int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
-{
- if (kvm_highest_pending_irq(vcpu) != -1)
- return 1;
- return 0;
-}
-
-int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
-{
- /* do real check here */
- return 1;
-}
-
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
return vcpu->arch.timer_fired;
@@ -1977,7 +1947,8 @@ gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE;
+ return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) ||
+ (kvm_highest_pending_irq(vcpu) != -1);
}
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c
index cc406d064a0..dce75b70cdd 100644
--- a/arch/ia64/kvm/vcpu.c
+++ b/arch/ia64/kvm/vcpu.c
@@ -830,8 +830,8 @@ static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
kvm = (struct kvm *)KVM_VM_BASE;
- if (vcpu->vcpu_id == 0) {
- for (i = 0; i < kvm->arch.online_vcpus; i++) {
+ if (kvm_vcpu_is_bsp(vcpu)) {
+ for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) {
v = (struct kvm_vcpu *)((char *)vcpu +
sizeof(struct kvm_vcpu_data) * i);
VMX(v, itc_offset) = itc_offset;
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index e456f062f24..ece1bf99449 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -71,7 +71,7 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second);
DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
EXPORT_PER_CPU_SYMBOL(__sn_hub_info);
-DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]);
+DEFINE_PER_CPU(short [MAX_COMPACT_NODES], __sn_cnodeid_to_nasid);
EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid);
DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda);