diff options
Diffstat (limited to 'arch')
362 files changed, 19891 insertions, 3396 deletions
diff --git a/arch/alpha/include/asm/8253pit.h b/arch/alpha/include/asm/8253pit.h index fef5c1450e4..a71c9c1455a 100644 --- a/arch/alpha/include/asm/8253pit.h +++ b/arch/alpha/include/asm/8253pit.h @@ -1,10 +1,3 @@ /* * 8253/8254 Programmable Interval Timer */ - -#ifndef _8253PIT_H -#define _8253PIT_H - -#define PIT_TICK_RATE 1193180UL - -#endif diff --git a/arch/alpha/include/asm/errno.h b/arch/alpha/include/asm/errno.h index 69e2655249d..98099bda937 100644 --- a/arch/alpha/include/asm/errno.h +++ b/arch/alpha/include/asm/errno.h @@ -120,4 +120,6 @@ #define EOWNERDEAD 136 /* Owner died */ #define ENOTRECOVERABLE 137 /* State not recoverable */ +#define ERFKILL 138 /* Operation not possible due to RF-kill */ + #endif diff --git a/arch/alpha/include/asm/kmap_types.h b/arch/alpha/include/asm/kmap_types.h index 3e6735a34c5..a8d4ec8ea4b 100644 --- a/arch/alpha/include/asm/kmap_types.h +++ b/arch/alpha/include/asm/kmap_types.h @@ -3,30 +3,12 @@ /* Dummy header just to define km_type. */ - #ifdef CONFIG_DEBUG_HIGHMEM -# define D(n) __KM_FENCE_##n , -#else -# define D(n) +#define __WITH_KM_FENCE #endif -enum km_type { -D(0) KM_BOUNCE_READ, -D(1) KM_SKB_SUNRPC_DATA, -D(2) KM_SKB_DATA_SOFTIRQ, -D(3) KM_USER0, -D(4) KM_USER1, -D(5) KM_BIO_SRC_IRQ, -D(6) KM_BIO_DST_IRQ, -D(7) KM_PTE0, -D(8) KM_PTE1, -D(9) KM_IRQ0, -D(10) KM_IRQ1, -D(11) KM_SOFTIRQ0, -D(12) KM_SOFTIRQ1, -D(13) KM_TYPE_NR -}; +#include <asm-generic/kmap_types.h> -#undef D +#undef __WITH_KM_FENCE #endif diff --git a/arch/alpha/kernel/init_task.c b/arch/alpha/kernel/init_task.c index c2938e574a4..19b86328ffd 100644 --- a/arch/alpha/kernel/init_task.c +++ b/arch/alpha/kernel/init_task.c @@ -10,10 +10,7 @@ static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); -struct mm_struct init_mm = INIT_MM(init_mm); struct task_struct init_task = INIT_TASK(init_task); - -EXPORT_SYMBOL(init_mm); EXPORT_SYMBOL(init_task); union thread_union init_thread_union diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c index 67c19f8a994..38c805dfc54 100644 --- a/arch/alpha/kernel/irq_alpha.c +++ b/arch/alpha/kernel/irq_alpha.c @@ -227,7 +227,7 @@ struct irqaction timer_irqaction = { .name = "timer", }; -static struct hw_interrupt_type rtc_irq_type = { +static struct irq_chip rtc_irq_type = { .typename = "RTC", .startup = rtc_startup, .shutdown = rtc_enable_disable, diff --git a/arch/alpha/kernel/irq_i8259.c b/arch/alpha/kernel/irq_i8259.c index 9405bee9894..50bfec9b588 100644 --- a/arch/alpha/kernel/irq_i8259.c +++ b/arch/alpha/kernel/irq_i8259.c @@ -83,7 +83,7 @@ i8259a_end_irq(unsigned int irq) i8259a_enable_irq(irq); } -struct hw_interrupt_type i8259a_irq_type = { +struct irq_chip i8259a_irq_type = { .typename = "XT-PIC", .startup = i8259a_startup_irq, .shutdown = i8259a_disable_irq, diff --git a/arch/alpha/kernel/irq_impl.h b/arch/alpha/kernel/irq_impl.h index cc9a8a7aa27..b63ccd7386f 100644 --- a/arch/alpha/kernel/irq_impl.h +++ b/arch/alpha/kernel/irq_impl.h @@ -36,7 +36,7 @@ extern void i8259a_disable_irq(unsigned int); extern void i8259a_mask_and_ack_irq(unsigned int); extern unsigned int i8259a_startup_irq(unsigned int); extern void i8259a_end_irq(unsigned int); -extern struct hw_interrupt_type i8259a_irq_type; +extern struct irq_chip i8259a_irq_type; extern void init_i8259a_irqs(void); extern void handle_irq(int irq); diff --git a/arch/alpha/kernel/irq_pyxis.c b/arch/alpha/kernel/irq_pyxis.c index d53edbccbfe..69199a76ec4 100644 --- a/arch/alpha/kernel/irq_pyxis.c +++ b/arch/alpha/kernel/irq_pyxis.c @@ -70,7 +70,7 @@ pyxis_mask_and_ack_irq(unsigned int irq) *(vulp)PYXIS_INT_MASK; } -static struct hw_interrupt_type pyxis_irq_type = { +static struct irq_chip pyxis_irq_type = { .typename = "PYXIS", .startup = pyxis_startup_irq, .shutdown = pyxis_disable_irq, diff --git a/arch/alpha/kernel/irq_srm.c b/arch/alpha/kernel/irq_srm.c index a03fbca4940..85229369a1f 100644 --- a/arch/alpha/kernel/irq_srm.c +++ b/arch/alpha/kernel/irq_srm.c @@ -48,7 +48,7 @@ srm_end_irq(unsigned int irq) } /* Handle interrupts from the SRM, assuming no additional weirdness. */ -static struct hw_interrupt_type srm_irq_type = { +static struct irq_chip srm_irq_type = { .typename = "SRM", .startup = srm_startup_irq, .shutdown = srm_disable_irq, diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c index 80df86cd746..d2634e4476b 100644 --- a/arch/alpha/kernel/setup.c +++ b/arch/alpha/kernel/setup.c @@ -252,9 +252,9 @@ reserve_std_resources(void) } #define PFN_MAX PFN_DOWN(0x80000000) -#define for_each_mem_cluster(memdesc, cluster, i) \ - for ((cluster) = (memdesc)->cluster, (i) = 0; \ - (i) < (memdesc)->numclusters; (i)++, (cluster)++) +#define for_each_mem_cluster(memdesc, _cluster, i) \ + for ((_cluster) = (memdesc)->cluster, (i) = 0; \ + (i) < (memdesc)->numclusters; (i)++, (_cluster)++) static unsigned long __init get_mem_size_limit(char *s) diff --git a/arch/alpha/kernel/sys_alcor.c b/arch/alpha/kernel/sys_alcor.c index e53a1e1c2f2..382035ef739 100644 --- a/arch/alpha/kernel/sys_alcor.c +++ b/arch/alpha/kernel/sys_alcor.c @@ -89,7 +89,7 @@ alcor_end_irq(unsigned int irq) alcor_enable_irq(irq); } -static struct hw_interrupt_type alcor_irq_type = { +static struct irq_chip alcor_irq_type = { .typename = "ALCOR", .startup = alcor_startup_irq, .shutdown = alcor_disable_irq, diff --git a/arch/alpha/kernel/sys_cabriolet.c b/arch/alpha/kernel/sys_cabriolet.c index ace475c124f..ed349436732 100644 --- a/arch/alpha/kernel/sys_cabriolet.c +++ b/arch/alpha/kernel/sys_cabriolet.c @@ -71,7 +71,7 @@ cabriolet_end_irq(unsigned int irq) cabriolet_enable_irq(irq); } -static struct hw_interrupt_type cabriolet_irq_type = { +static struct irq_chip cabriolet_irq_type = { .typename = "CABRIOLET", .startup = cabriolet_startup_irq, .shutdown = cabriolet_disable_irq, diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c index 5bd5259324b..46e70ece517 100644 --- a/arch/alpha/kernel/sys_dp264.c +++ b/arch/alpha/kernel/sys_dp264.c @@ -198,7 +198,7 @@ clipper_set_affinity(unsigned int irq, const struct cpumask *affinity) return 0; } -static struct hw_interrupt_type dp264_irq_type = { +static struct irq_chip dp264_irq_type = { .typename = "DP264", .startup = dp264_startup_irq, .shutdown = dp264_disable_irq, @@ -209,7 +209,7 @@ static struct hw_interrupt_type dp264_irq_type = { .set_affinity = dp264_set_affinity, }; -static struct hw_interrupt_type clipper_irq_type = { +static struct irq_chip clipper_irq_type = { .typename = "CLIPPER", .startup = clipper_startup_irq, .shutdown = clipper_disable_irq, @@ -298,7 +298,7 @@ clipper_srm_device_interrupt(unsigned long vector) } static void __init -init_tsunami_irqs(struct hw_interrupt_type * ops, int imin, int imax) +init_tsunami_irqs(struct irq_chip * ops, int imin, int imax) { long i; for (i = imin; i <= imax; ++i) { diff --git a/arch/alpha/kernel/sys_eb64p.c b/arch/alpha/kernel/sys_eb64p.c index 9c5a306dc0e..660c23ef661 100644 --- a/arch/alpha/kernel/sys_eb64p.c +++ b/arch/alpha/kernel/sys_eb64p.c @@ -69,7 +69,7 @@ eb64p_end_irq(unsigned int irq) eb64p_enable_irq(irq); } -static struct hw_interrupt_type eb64p_irq_type = { +static struct irq_chip eb64p_irq_type = { .typename = "EB64P", .startup = eb64p_startup_irq, .shutdown = eb64p_disable_irq, diff --git a/arch/alpha/kernel/sys_eiger.c b/arch/alpha/kernel/sys_eiger.c index baf60f36cbd..b99ea488d84 100644 --- a/arch/alpha/kernel/sys_eiger.c +++ b/arch/alpha/kernel/sys_eiger.c @@ -80,7 +80,7 @@ eiger_end_irq(unsigned int irq) eiger_enable_irq(irq); } -static struct hw_interrupt_type eiger_irq_type = { +static struct irq_chip eiger_irq_type = { .typename = "EIGER", .startup = eiger_startup_irq, .shutdown = eiger_disable_irq, diff --git a/arch/alpha/kernel/sys_jensen.c b/arch/alpha/kernel/sys_jensen.c index 2b5caf3d9b1..ef0b83a070a 100644 --- a/arch/alpha/kernel/sys_jensen.c +++ b/arch/alpha/kernel/sys_jensen.c @@ -118,7 +118,7 @@ jensen_local_end(unsigned int irq) i8259a_end_irq(1); } -static struct hw_interrupt_type jensen_local_irq_type = { +static struct irq_chip jensen_local_irq_type = { .typename = "LOCAL", .startup = jensen_local_startup, .shutdown = jensen_local_shutdown, diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c index c5a1a2438c6..bbfc4f20ca7 100644 --- a/arch/alpha/kernel/sys_marvel.c +++ b/arch/alpha/kernel/sys_marvel.c @@ -169,7 +169,7 @@ marvel_irq_noop_return(unsigned int irq) return 0; } -static struct hw_interrupt_type marvel_legacy_irq_type = { +static struct irq_chip marvel_legacy_irq_type = { .typename = "LEGACY", .startup = marvel_irq_noop_return, .shutdown = marvel_irq_noop, @@ -179,7 +179,7 @@ static struct hw_interrupt_type marvel_legacy_irq_type = { .end = marvel_irq_noop, }; -static struct hw_interrupt_type io7_lsi_irq_type = { +static struct irq_chip io7_lsi_irq_type = { .typename = "LSI", .startup = io7_startup_irq, .shutdown = io7_disable_irq, @@ -189,7 +189,7 @@ static struct hw_interrupt_type io7_lsi_irq_type = { .end = io7_end_irq, }; -static struct hw_interrupt_type io7_msi_irq_type = { +static struct irq_chip io7_msi_irq_type = { .typename = "MSI", .startup = io7_startup_irq, .shutdown = io7_disable_irq, @@ -273,8 +273,8 @@ init_one_io7_msi(struct io7 *io7, unsigned int which, unsigned int where) static void __init init_io7_irqs(struct io7 *io7, - struct hw_interrupt_type *lsi_ops, - struct hw_interrupt_type *msi_ops) + struct irq_chip *lsi_ops, + struct irq_chip *msi_ops) { long base = (io7->pe << MARVEL_IRQ_VEC_PE_SHIFT) + 16; long i; diff --git a/arch/alpha/kernel/sys_mikasa.c b/arch/alpha/kernel/sys_mikasa.c index 8d3e9429c5e..4e366641a08 100644 --- a/arch/alpha/kernel/sys_mikasa.c +++ b/arch/alpha/kernel/sys_mikasa.c @@ -68,7 +68,7 @@ mikasa_end_irq(unsigned int irq) mikasa_enable_irq(irq); } -static struct hw_interrupt_type mikasa_irq_type = { +static struct irq_chip mikasa_irq_type = { .typename = "MIKASA", .startup = mikasa_startup_irq, .shutdown = mikasa_disable_irq, diff --git a/arch/alpha/kernel/sys_noritake.c b/arch/alpha/kernel/sys_noritake.c index 538876b6244..35753a173ba 100644 --- a/arch/alpha/kernel/sys_noritake.c +++ b/arch/alpha/kernel/sys_noritake.c @@ -73,7 +73,7 @@ noritake_end_irq(unsigned int irq) noritake_enable_irq(irq); } -static struct hw_interrupt_type noritake_irq_type = { +static struct irq_chip noritake_irq_type = { .typename = "NORITAKE", .startup = noritake_startup_irq, .shutdown = noritake_disable_irq, diff --git a/arch/alpha/kernel/sys_rawhide.c b/arch/alpha/kernel/sys_rawhide.c index 672cb2df53d..f3aec7e085c 100644 --- a/arch/alpha/kernel/sys_rawhide.c +++ b/arch/alpha/kernel/sys_rawhide.c @@ -135,7 +135,7 @@ rawhide_end_irq(unsigned int irq) rawhide_enable_irq(irq); } -static struct hw_interrupt_type rawhide_irq_type = { +static struct irq_chip rawhide_irq_type = { .typename = "RAWHIDE", .startup = rawhide_startup_irq, .shutdown = rawhide_disable_irq, diff --git a/arch/alpha/kernel/sys_ruffian.c b/arch/alpha/kernel/sys_ruffian.c index f15a329b601..d9f9cfeb993 100644 --- a/arch/alpha/kernel/sys_ruffian.c +++ b/arch/alpha/kernel/sys_ruffian.c @@ -14,6 +14,7 @@ #include <linux/sched.h> #include <linux/pci.h> #include <linux/ioport.h> +#include <linux/timex.h> #include <linux/init.h> #include <asm/ptrace.h> diff --git a/arch/alpha/kernel/sys_rx164.c b/arch/alpha/kernel/sys_rx164.c index ce1faa6f1df..fc924637345 100644 --- a/arch/alpha/kernel/sys_rx164.c +++ b/arch/alpha/kernel/sys_rx164.c @@ -72,7 +72,7 @@ rx164_end_irq(unsigned int irq) rx164_enable_irq(irq); } -static struct hw_interrupt_type rx164_irq_type = { +static struct irq_chip rx164_irq_type = { .typename = "RX164", .startup = rx164_startup_irq, .shutdown = rx164_disable_irq, diff --git a/arch/alpha/kernel/sys_sable.c b/arch/alpha/kernel/sys_sable.c index 9e263256a42..426eb6906d0 100644 --- a/arch/alpha/kernel/sys_sable.c +++ b/arch/alpha/kernel/sys_sable.c @@ -501,7 +501,7 @@ sable_lynx_mask_and_ack_irq(unsigned int irq) spin_unlock(&sable_lynx_irq_lock); } -static struct hw_interrupt_type sable_lynx_irq_type = { +static struct irq_chip sable_lynx_irq_type = { .typename = "SABLE/LYNX", .startup = sable_lynx_startup_irq, .shutdown = sable_lynx_disable_irq, diff --git a/arch/alpha/kernel/sys_takara.c b/arch/alpha/kernel/sys_takara.c index 9bd9a31450c..830318c2166 100644 --- a/arch/alpha/kernel/sys_takara.c +++ b/arch/alpha/kernel/sys_takara.c @@ -74,7 +74,7 @@ takara_end_irq(unsigned int irq) takara_enable_irq(irq); } -static struct hw_interrupt_type takara_irq_type = { +static struct irq_chip takara_irq_type = { .typename = "TAKARA", .startup = takara_startup_irq, .shutdown = takara_disable_irq, diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c index 8dd239ebdb9..88978fc60f8 100644 --- a/arch/alpha/kernel/sys_titan.c +++ b/arch/alpha/kernel/sys_titan.c @@ -185,7 +185,7 @@ titan_srm_device_interrupt(unsigned long vector) static void __init -init_titan_irqs(struct hw_interrupt_type * ops, int imin, int imax) +init_titan_irqs(struct irq_chip * ops, int imin, int imax) { long i; for (i = imin; i <= imax; ++i) { @@ -194,7 +194,7 @@ init_titan_irqs(struct hw_interrupt_type * ops, int imin, int imax) } } -static struct hw_interrupt_type titan_irq_type = { +static struct irq_chip titan_irq_type = { .typename = "TITAN", .startup = titan_startup_irq, .shutdown = titan_disable_irq, diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c index 42c3eede4d0..e91b4c3838a 100644 --- a/arch/alpha/kernel/sys_wildfire.c +++ b/arch/alpha/kernel/sys_wildfire.c @@ -157,7 +157,7 @@ wildfire_end_irq(unsigned int irq) wildfire_enable_irq(irq); } -static struct hw_interrupt_type wildfire_irq_type = { +static struct irq_chip wildfire_irq_type = { .typename = "WILDFIRE", .startup = wildfire_startup_irq, .shutdown = wildfire_disable_irq, diff --git a/arch/alpha/mm/numa.c b/arch/alpha/mm/numa.c index a13de49d126..0eab5574942 100644 --- a/arch/alpha/mm/numa.c +++ b/arch/alpha/mm/numa.c @@ -28,9 +28,9 @@ EXPORT_SYMBOL(node_data); #define DBGDCONT(args...) #endif -#define for_each_mem_cluster(memdesc, cluster, i) \ - for ((cluster) = (memdesc)->cluster, (i) = 0; \ - (i) < (memdesc)->numclusters; (i)++, (cluster)++) +#define for_each_mem_cluster(memdesc, _cluster, i) \ + for ((_cluster) = (memdesc)->cluster, (i) = 0; \ + (i) < (memdesc)->numclusters; (i)++, (_cluster)++) static void __init show_mem_layout(void) { diff --git a/arch/arm/kernel/init_task.c b/arch/arm/kernel/init_task.c index e859af34946..3f470866bb8 100644 --- a/arch/arm/kernel/init_task.c +++ b/arch/arm/kernel/init_task.c @@ -14,10 +14,6 @@ static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); -struct mm_struct init_mm = INIT_MM(init_mm); - -EXPORT_SYMBOL(init_mm); - /* * Initial thread structure. * diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 442b87476f9..93bb4247b7e 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -536,7 +536,7 @@ setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, return err; } -static inline void restart_syscall(struct pt_regs *regs) +static inline void setup_syscall_restart(struct pt_regs *regs) { regs->ARM_r0 = regs->ARM_ORIG_r0; regs->ARM_pc -= thumb_mode(regs) ? 2 : 4; @@ -571,7 +571,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, } /* fallthrough */ case -ERESTARTNOINTR: - restart_syscall(regs); + setup_syscall_restart(regs); } } @@ -695,7 +695,7 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall) if (regs->ARM_r0 == -ERESTARTNOHAND || regs->ARM_r0 == -ERESTARTSYS || regs->ARM_r0 == -ERESTARTNOINTR) { - restart_syscall(regs); + setup_syscall_restart(regs); } } single_step_set(current); diff --git a/arch/arm/mach-pxa/pcm990-baseboard.c b/arch/arm/mach-pxa/pcm990-baseboard.c index 095521e9ee2..01791d74e08 100644 --- a/arch/arm/mach-pxa/pcm990-baseboard.c +++ b/arch/arm/mach-pxa/pcm990-baseboard.c @@ -380,12 +380,12 @@ static struct pca953x_platform_data pca9536_data = { .gpio_base = NR_BUILTIN_GPIO, }; -static int gpio_bus_switch; +static int gpio_bus_switch = -EINVAL; static int pcm990_camera_set_bus_param(struct soc_camera_link *link, - unsigned long flags) + unsigned long flags) { - if (gpio_bus_switch <= 0) { + if (gpio_bus_switch < 0) { if (flags == SOCAM_DATAWIDTH_10) return 0; else @@ -404,25 +404,34 @@ static unsigned long pcm990_camera_query_bus_param(struct soc_camera_link *link) { int ret; - if (!gpio_bus_switch) { + if (gpio_bus_switch < 0) { ret = gpio_request(NR_BUILTIN_GPIO, "camera"); if (!ret) { gpio_bus_switch = NR_BUILTIN_GPIO; gpio_direction_output(gpio_bus_switch, 0); - } else - gpio_bus_switch = -EINVAL; + } } - if (gpio_bus_switch > 0) + if (gpio_bus_switch >= 0) return SOCAM_DATAWIDTH_8 | SOCAM_DATAWIDTH_10; else return SOCAM_DATAWIDTH_10; } +static void pcm990_camera_free_bus(struct soc_camera_link *link) +{ + if (gpio_bus_switch < 0) + return; + + gpio_free(gpio_bus_switch); + gpio_bus_switch = -EINVAL; +} + static struct soc_camera_link iclink = { .bus_id = 0, /* Must match with the camera ID above */ .query_bus_param = pcm990_camera_query_bus_param, .set_bus_param = pcm990_camera_set_bus_param, + .free_bus = pcm990_camera_free_bus, }; /* Board I2C devices. */ diff --git a/arch/arm/mach-pxa/tosa-bt.c b/arch/arm/mach-pxa/tosa-bt.c index fb0294bd431..c31e601eb49 100644 --- a/arch/arm/mach-pxa/tosa-bt.c +++ b/arch/arm/mach-pxa/tosa-bt.c @@ -35,21 +35,25 @@ static void tosa_bt_off(struct tosa_bt_data *data) gpio_set_value(data->gpio_reset, 0); } -static int tosa_bt_toggle_radio(void *data, enum rfkill_state state) +static int tosa_bt_set_block(void *data, bool blocked) { - pr_info("BT_RADIO going: %s\n", - state == RFKILL_STATE_ON ? "on" : "off"); + pr_info("BT_RADIO going: %s\n", blocked ? "off" : "on"); - if (state == RFKILL_STATE_ON) { + if (!blocked) { pr_info("TOSA_BT: going ON\n"); tosa_bt_on(data); } else { pr_info("TOSA_BT: going OFF\n"); tosa_bt_off(data); } + return 0; } +static const struct rfkill_ops tosa_bt_rfkill_ops = { + .set_block = tosa_bt_set_block, +}; + static int tosa_bt_probe(struct platform_device *dev) { int rc; @@ -70,18 +74,14 @@ static int tosa_bt_probe(struct platform_device *dev) if (rc) goto err_pwr_dir; - rfk = rfkill_allocate(&dev->dev, RFKILL_TYPE_BLUETOOTH); + rfk = rfkill_alloc("tosa-bt", &dev->dev, RFKILL_TYPE_BLUETOOTH, + &tosa_bt_rfkill_ops, data); if (!rfk) { rc = -ENOMEM; goto err_rfk_alloc; } - rfk->name = "tosa-bt"; - rfk->toggle_radio = tosa_bt_toggle_radio; - rfk->data = data; -#ifdef CONFIG_RFKILL_LEDS - rfk->led_trigger.name = "tosa-bt"; -#endif + rfkill_set_led_trigger_name(rfk, "tosa-bt"); rc = rfkill_register(rfk); if (rc) @@ -92,9 +92,7 @@ static int tosa_bt_probe(struct platform_device *dev) return 0; err_rfkill: - if (rfk) - rfkill_free(rfk); - rfk = NULL; + rfkill_destroy(rfk); err_rfk_alloc: tosa_bt_off(data); err_pwr_dir: @@ -113,8 +111,10 @@ static int __devexit tosa_bt_remove(struct platform_device *dev) platform_set_drvdata(dev, NULL); - if (rfk) + if (rfk) { rfkill_unregister(rfk); + rfkill_destroy(rfk); + } rfk = NULL; tosa_bt_off(data); diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c index 168267a5dfb..117ad5920e5 100644 --- a/arch/arm/mach-pxa/tosa.c +++ b/arch/arm/mach-pxa/tosa.c @@ -31,7 +31,6 @@ #include <linux/input.h> #include <linux/gpio.h> #include <linux/pda_power.h> -#include <linux/rfkill.h> #include <linux/spi/spi.h> #include <asm/setup.h> diff --git a/arch/arm/plat-s3c/include/plat/regs-usb-hsotg-phy.h b/arch/arm/plat-s3c/include/plat/regs-usb-hsotg-phy.h new file mode 100644 index 00000000000..36a85f5000c --- /dev/null +++ b/arch/arm/plat-s3c/include/plat/regs-usb-hsotg-phy.h @@ -0,0 +1,50 @@ +/* arch/arm/plat-s3c/include/plat/regs-usb-hsotg-phy.h + * + * Copyright 2008 Openmoko, Inc. + * Copyright 2008 Simtec Electronics + * http://armlinux.simtec.co.uk/ + * Ben Dooks <ben@simtec.co.uk> + * + * S3C - USB2.0 Highspeed/OtG device PHY registers + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +/* Note, this is a seperate header file as some of the clock framework + * needs to touch this if the clk_48m is used as the USB OHCI or other + * peripheral source. +*/ + +#ifndef __PLAT_S3C64XX_REGS_USB_HSOTG_PHY_H +#define __PLAT_S3C64XX_REGS_USB_HSOTG_PHY_H __FILE__ + +/* S3C64XX_PA_USB_HSPHY */ + +#define S3C_HSOTG_PHYREG(x) ((x) + S3C_VA_USB_HSPHY) + +#define S3C_PHYPWR S3C_HSOTG_PHYREG(0x00) +#define SRC_PHYPWR_OTG_DISABLE (1 << 4) +#define SRC_PHYPWR_ANALOG_POWERDOWN (1 << 3) +#define SRC_PHYPWR_FORCE_SUSPEND (1 << 1) + +#define S3C_PHYCLK S3C_HSOTG_PHYREG(0x04) +#define S3C_PHYCLK_MODE_USB11 (1 << 6) +#define S3C_PHYCLK_EXT_OSC (1 << 5) +#define S3C_PHYCLK_CLK_FORCE (1 << 4) +#define S3C_PHYCLK_ID_PULL (1 << 2) +#define S3C_PHYCLK_CLKSEL_MASK (0x3 << 0) +#define S3C_PHYCLK_CLKSEL_SHIFT (0) +#define S3C_PHYCLK_CLKSEL_48M (0x0 << 0) +#define S3C_PHYCLK_CLKSEL_12M (0x2 << 0) +#define S3C_PHYCLK_CLKSEL_24M (0x3 << 0) + +#define S3C_RSTCON S3C_HSOTG_PHYREG(0x08) +#define S3C_RSTCON_PHYCLK (1 << 2) +#define S3C_RSTCON_HCLK (1 << 2) +#define S3C_RSTCON_PHY (1 << 0) + +#define S3C_PHYTUNE S3C_HSOTG_PHYREG(0x20) + +#endif /* __PLAT_S3C64XX_REGS_USB_HSOTG_PHY_H */ diff --git a/arch/arm/plat-s3c/include/plat/regs-usb-hsotg.h b/arch/arm/plat-s3c/include/plat/regs-usb-hsotg.h new file mode 100644 index 00000000000..8d18d9d4d14 --- /dev/null +++ b/arch/arm/plat-s3c/include/plat/regs-usb-hsotg.h @@ -0,0 +1,377 @@ +/* arch/arm/plat-s3c/include/plat/regs-usb-hsotg.h + * + * Copyright 2008 Openmoko, Inc. + * Copyright 2008 Simtec Electronics + * http://armlinux.simtec.co.uk/ + * Ben Dooks <ben@simtec.co.uk> + * + * S3C - USB2.0 Highspeed/OtG device block registers + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#ifndef __PLAT_S3C64XX_REGS_USB_HSOTG_H +#define __PLAT_S3C64XX_REGS_USB_HSOTG_H __FILE__ + +#define S3C_HSOTG_REG(x) (x) + +#define S3C_GOTGCTL S3C_HSOTG_REG(0x000) +#define S3C_GOTGCTL_BSESVLD (1 << 19) +#define S3C_GOTGCTL_ASESVLD (1 << 18) +#define S3C_GOTGCTL_DBNC_SHORT (1 << 17) +#define S3C_GOTGCTL_CONID_B (1 << 16) +#define S3C_GOTGCTL_DEVHNPEN (1 << 11) +#define S3C_GOTGCTL_HSSETHNPEN (1 << 10) +#define S3C_GOTGCTL_HNPREQ (1 << 9) +#define S3C_GOTGCTL_HSTNEGSCS (1 << 8) +#define S3C_GOTGCTL_SESREQ (1 << 1) +#define S3C_GOTGCTL_SESREQSCS (1 << 0) + +#define S3C_GOTGINT S3C_HSOTG_REG(0x004) +#define S3C_GOTGINT_DbnceDone (1 << 19) +#define S3C_GOTGINT_ADevTOUTChg (1 << 18) +#define S3C_GOTGINT_HstNegDet (1 << 17) +#define S3C_GOTGINT_HstnegSucStsChng (1 << 9) +#define S3C_GOTGINT_SesReqSucStsChng (1 << 8) +#define S3C_GOTGINT_SesEndDet (1 << 2) + +#define S3C_GAHBCFG S3C_HSOTG_REG(0x008) +#define S3C_GAHBCFG_PTxFEmpLvl (1 << 8) +#define S3C_GAHBCFG_NPTxFEmpLvl (1 << 7) +#define S3C_GAHBCFG_DMAEn (1 << 5) +#define S3C_GAHBCFG_HBstLen_MASK (0xf << 1) +#define S3C_GAHBCFG_HBstLen_SHIFT (1) +#define S3C_GAHBCFG_HBstLen_Single (0x0 << 1) +#define S3C_GAHBCFG_HBstLen_Incr (0x1 << 1) +#define S3C_GAHBCFG_HBstLen_Incr4 (0x3 << 1) +#define S3C_GAHBCFG_HBstLen_Incr8 (0x5 << 1) +#define S3C_GAHBCFG_HBstLen_Incr16 (0x7 << 1) +#define S3C_GAHBCFG_GlblIntrEn (1 << 0) + +#define S3C_GUSBCFG S3C_HSOTG_REG(0x00C) +#define S3C_GUSBCFG_PHYLPClkSel (1 << 15) +#define S3C_GUSBCFG_HNPCap (1 << 9) +#define S3C_GUSBCFG_SRPCap (1 << 8) +#define S3C_GUSBCFG_PHYIf16 (1 << 3) +#define S3C_GUSBCFG_TOutCal_MASK (0x7 << 0) +#define S3C_GUSBCFG_TOutCal_SHIFT (0) +#define S3C_GUSBCFG_TOutCal_LIMIT (0x7) +#define S3C_GUSBCFG_TOutCal(_x) ((_x) << 0) + +#define S3C_GRSTCTL S3C_HSOTG_REG(0x010) + +#define S3C_GRSTCTL_AHBIdle (1 << 31) +#define S3C_GRSTCTL_DMAReq (1 << 30) +#define S3C_GRSTCTL_TxFNum_MASK (0x1f << 6) +#define S3C_GRSTCTL_TxFNum_SHIFT (6) +#define S3C_GRSTCTL_TxFNum_LIMIT (0x1f) +#define S3C_GRSTCTL_TxFNum(_x) ((_x) << 6) +#define S3C_GRSTCTL_TxFFlsh (1 << 5) +#define S3C_GRSTCTL_RxFFlsh (1 << 4) +#define S3C_GRSTCTL_INTknQFlsh (1 << 3) +#define S3C_GRSTCTL_FrmCntrRst (1 << 2) +#define S3C_GRSTCTL_HSftRst (1 << 1) +#define S3C_GRSTCTL_CSftRst (1 << 0) + +#define S3C_GINTSTS S3C_HSOTG_REG(0x014) +#define S3C_GINTMSK S3C_HSOTG_REG(0x018) + +#define S3C_GINTSTS_WkUpInt (1 << 31) +#define S3C_GINTSTS_SessReqInt (1 << 30) +#define S3C_GINTSTS_DisconnInt (1 << 29) +#define S3C_GINTSTS_ConIDStsChng (1 << 28) +#define S3C_GINTSTS_PTxFEmp (1 << 26) +#define S3C_GINTSTS_HChInt (1 << 25) +#define S3C_GINTSTS_PrtInt (1 << 24) +#define S3C_GINTSTS_FetSusp (1 << 22) +#define S3C_GINTSTS_incompIP (1 << 21) +#define S3C_GINTSTS_IncomplSOIN (1 << 20) +#define S3C_GINTSTS_OEPInt (1 << 19) +#define S3C_GINTSTS_IEPInt (1 << 18) +#define S3C_GINTSTS_EPMis (1 << 17) +#define S3C_GINTSTS_EOPF (1 << 15) +#define S3C_GINTSTS_ISOutDrop (1 << 14) +#define S3C_GINTSTS_EnumDone (1 << 13) +#define S3C_GINTSTS_USBRst (1 << 12) +#define S3C_GINTSTS_USBSusp (1 << 11) +#define S3C_GINTSTS_ErlySusp (1 << 10) +#define S3C_GINTSTS_GOUTNakEff (1 << 7) +#define S3C_GINTSTS_GINNakEff (1 << 6) +#define S3C_GINTSTS_NPTxFEmp (1 << 5) +#define S3C_GINTSTS_RxFLvl (1 << 4) +#define S3C_GINTSTS_SOF (1 << 3) +#define S3C_GINTSTS_OTGInt (1 << 2) +#define S3C_GINTSTS_ModeMis (1 << 1) +#define S3C_GINTSTS_CurMod_Host (1 << 0) + +#define S3C_GRXSTSR S3C_HSOTG_REG(0x01C) +#define S3C_GRXSTSP S3C_HSOTG_REG(0x020) + +#define S3C_GRXSTS_FN_MASK (0x7f << 25) +#define S3C_GRXSTS_FN_SHIFT (25) + +#define S3C_GRXSTS_PktSts_MASK (0xf << 17) +#define S3C_GRXSTS_PktSts_SHIFT (17) +#define S3C_GRXSTS_PktSts_GlobalOutNAK (0x1 << 17) +#define S3C_GRXSTS_PktSts_OutRX (0x2 << 17) +#define S3C_GRXSTS_PktSts_OutDone (0x3 << 17) +#define S3C_GRXSTS_PktSts_SetupDone (0x4 << 17) +#define S3C_GRXSTS_PktSts_SetupRX (0x6 << 17) + +#define S3C_GRXSTS_DPID_MASK (0x3 << 15) +#define S3C_GRXSTS_DPID_SHIFT (15) +#define S3C_GRXSTS_ByteCnt_MASK (0x7ff << 4) +#define S3C_GRXSTS_ByteCnt_SHIFT (4) +#define S3C_GRXSTS_EPNum_MASK (0xf << 0) +#define S3C_GRXSTS_EPNum_SHIFT (0) + +#define S3C_GRXFSIZ S3C_HSOTG_REG(0x024) + +#define S3C_GNPTXFSIZ S3C_HSOTG_REG(0x028) + +#define S3C_GNPTXFSIZ_NPTxFDep_MASK (0xffff << 16) +#define S3C_GNPTXFSIZ_NPTxFDep_SHIFT (16) +#define S3C_GNPTXFSIZ_NPTxFDep_LIMIT (0xffff) +#define S3C_GNPTXFSIZ_NPTxFDep(_x) ((_x) << 16) +#define S3C_GNPTXFSIZ_NPTxFStAddr_MASK (0xffff << 0) +#define S3C_GNPTXFSIZ_NPTxFStAddr_SHIFT (0) +#define S3C_GNPTXFSIZ_NPTxFStAddr_LIMIT (0xffff) +#define S3C_GNPTXFSIZ_NPTxFStAddr(_x) ((_x) << 0) + +#define S3C_GNPTXSTS S3C_HSOTG_REG(0x02C) + +#define S3C_GNPTXSTS_NPtxQTop_MASK (0x7f << 24) +#define S3C_GNPTXSTS_NPtxQTop_SHIFT (24) + +#define S3C_GNPTXSTS_NPTxQSpcAvail_MASK (0xff << 16) +#define S3C_GNPTXSTS_NPTxQSpcAvail_SHIFT (16) +#define S3C_GNPTXSTS_NPTxQSpcAvail_GET(_v) (((_v) >> 16) & 0xff) + +#define S3C_GNPTXSTS_NPTxFSpcAvail_MASK (0xffff << 0) +#define S3C_GNPTXSTS_NPTxFSpcAvail_SHIFT (0) +#define S3C_GNPTXSTS_NPTxFSpcAvail_GET(_v) (((_v) >> 0) & 0xffff) + + +#define S3C_HPTXFSIZ S3C_HSOTG_REG(0x100) + +#define S3C_DPTXFSIZn(_a) S3C_HSOTG_REG(0x104 + (((_a) - 1) * 4)) + +#define S3C_DPTXFSIZn_DPTxFSize_MASK (0xffff << 16) +#define S3C_DPTXFSIZn_DPTxFSize_SHIFT (16) +#define S3C_DPTXFSIZn_DPTxFSize_GET(_v) (((_v) >> 16) & 0xffff) +#define S3C_DPTXFSIZn_DPTxFSize_LIMIT (0xffff) +#define S3C_DPTXFSIZn_DPTxFSize(_x) ((_x) << 16) + +#define S3C_DPTXFSIZn_DPTxFStAddr_MASK (0xffff << 0) +#define S3C_DPTXFSIZn_DPTxFStAddr_SHIFT (0) + +/* Device mode registers */ +#define S3C_DCFG S3C_HSOTG_REG(0x800) + +#define S3C_DCFG_EPMisCnt_MASK (0x1f << 18) +#define S3C_DCFG_EPMisCnt_SHIFT (18) +#define S3C_DCFG_EPMisCnt_LIMIT (0x1f) +#define S3C_DCFG_EPMisCnt(_x) ((_x) << 18) + +#define S3C_DCFG_PerFrInt_MASK (0x3 << 11) +#define S3C_DCFG_PerFrInt_SHIFT (11) +#define S3C_DCFG_PerFrInt_LIMIT (0x3) +#define S3C_DCFG_PerFrInt(_x) ((_x) << 11) + +#define S3C_DCFG_DevAddr_MASK (0x7f << 4) +#define S3C_DCFG_DevAddr_SHIFT (4) +#define S3C_DCFG_DevAddr_LIMIT (0x7f) +#define S3C_DCFG_DevAddr(_x) ((_x) << 4) + +#define S3C_DCFG_NZStsOUTHShk (1 << 2) + +#define S3C_DCFG_DevSpd_MASK (0x3 << 0) +#define S3C_DCFG_DevSpd_SHIFT (0) +#define S3C_DCFG_DevSpd_HS (0x0 << 0) +#define S3C_DCFG_DevSpd_FS (0x1 << 0) +#define S3C_DCFG_DevSpd_LS (0x2 << 0) +#define S3C_DCFG_DevSpd_FS48 (0x3 << 0) + +#define S3C_DCTL S3C_HSOTG_REG(0x804) + +#define S3C_DCTL_PWROnPrgDone (1 << 11) +#define S3C_DCTL_CGOUTNak (1 << 10) +#define S3C_DCTL_SGOUTNak (1 << 9) +#define S3C_DCTL_CGNPInNAK (1 << 8) +#define S3C_DCTL_SGNPInNAK (1 << 7) +#define S3C_DCTL_TstCtl_MASK (0x7 << 4) +#define S3C_DCTL_TstCtl_SHIFT (4) +#define S3C_DCTL_GOUTNakSts (1 << 3) +#define S3C_DCTL_GNPINNakSts (1 << 2) +#define S3C_DCTL_SftDiscon (1 << 1) +#define S3C_DCTL_RmtWkUpSig (1 << 0) + +#define S3C_DSTS S3C_HSOTG_REG(0x808) + +#define S3C_DSTS_SOFFN_MASK (0x3fff << 8) +#define S3C_DSTS_SOFFN_SHIFT (8) +#define S3C_DSTS_SOFFN_LIMIT (0x3fff) +#define S3C_DSTS_SOFFN(_x) ((_x) << 8) +#define S3C_DSTS_ErraticErr (1 << 3) +#define S3C_DSTS_EnumSpd_MASK (0x3 << 1) +#define S3C_DSTS_EnumSpd_SHIFT (1) +#define S3C_DSTS_EnumSpd_HS (0x0 << 1) +#define S3C_DSTS_EnumSpd_FS (0x1 << 1) +#define S3C_DSTS_EnumSpd_LS (0x2 << 1) +#define S3C_DSTS_EnumSpd_FS48 (0x3 << 1) + +#define S3C_DSTS_SuspSts (1 << 0) + +#define S3C_DIEPMSK S3C_HSOTG_REG(0x810) + +#define S3C_DIEPMSK_INEPNakEffMsk (1 << 6) +#define S3C_DIEPMSK_INTknEPMisMsk (1 << 5) +#define S3C_DIEPMSK_INTknTXFEmpMsk (1 << 4) +#define S3C_DIEPMSK_TimeOUTMsk (1 << 3) +#define S3C_DIEPMSK_AHBErrMsk (1 << 2) +#define S3C_DIEPMSK_EPDisbldMsk (1 << 1) +#define S3C_DIEPMSK_XferComplMsk (1 << 0) + +#define S3C_DOEPMSK S3C_HSOTG_REG(0x814) + +#define S3C_DOEPMSK_Back2BackSetup (1 << 6) +#define S3C_DOEPMSK_OUTTknEPdisMsk (1 << 4) +#define S3C_DOEPMSK_SetupMsk (1 << 3) +#define S3C_DOEPMSK_AHBErrMsk (1 << 2) +#define S3C_DOEPMSK_EPDisbldMsk (1 << 1) +#define S3C_DOEPMSK_XferComplMsk (1 << 0) + +#define S3C_DAINT S3C_HSOTG_REG(0x818) +#define S3C_DAINTMSK S3C_HSOTG_REG(0x81C) + +#define S3C_DAINT_OutEP_SHIFT (16) +#define S3C_DAINT_OutEP(x) (1 << ((x) + 16)) +#define S3C_DAINT_InEP(x) (1 << (x)) + +#define S3C_DTKNQR1 S3C_HSOTG_REG(0x820) +#define S3C_DTKNQR2 S3C_HSOTG_REG(0x824) +#define S3C_DTKNQR3 S3C_HSOTG_REG(0x830) +#define S3C_DTKNQR4 S3C_HSOTG_REG(0x834) + +#define S3C_DVBUSDIS S3C_HSOTG_REG(0x828) +#define S3C_DVBUSPULSE S3C_HSOTG_REG(0x82C) + +#define S3C_DIEPCTL0 S3C_HSOTG_REG(0x900) +#define S3C_DOEPCTL0 S3C_HSOTG_REG(0xB00) +#define S3C_DIEPCTL(_a) S3C_HSOTG_REG(0x900 + ((_a) * 0x20)) +#define S3C_DOEPCTL(_a) S3C_HSOTG_REG(0xB00 + ((_a) * 0x20)) + +/* EP0 specialness: + * bits[29..28] - reserved (no SetD0PID, SetD1PID) + * bits[25..22] - should always be zero, this isn't a periodic endpoint + * bits[10..0] - MPS setting differenct for EP0 +*/ +#define S3C_D0EPCTL_MPS_MASK (0x3 << 0) +#define S3C_D0EPCTL_MPS_SHIFT (0) +#define S3C_D0EPCTL_MPS_64 (0x0 << 0) +#define S3C_D0EPCTL_MPS_32 (0x1 << 0) +#define S3C_D0EPCTL_MPS_16 (0x2 << 0) +#define S3C_D0EPCTL_MPS_8 (0x3 << 0) + +#define S3C_DxEPCTL_EPEna (1 << 31) +#define S3C_DxEPCTL_EPDis (1 << 30) +#define S3C_DxEPCTL_SetD1PID (1 << 29) +#define S3C_DxEPCTL_SetOddFr (1 << 29) +#define S3C_DxEPCTL_SetD0PID (1 << 28) +#define S3C_DxEPCTL_SetEvenFr (1 << 28) +#define S3C_DxEPCTL_SNAK (1 << 27) +#define S3C_DxEPCTL_CNAK (1 << 26) +#define S3C_DxEPCTL_TxFNum_MASK (0xf << 22) +#define S3C_DxEPCTL_TxFNum_SHIFT (22) +#define S3C_DxEPCTL_TxFNum_LIMIT (0xf) +#define S3C_DxEPCTL_TxFNum(_x) ((_x) << 22) + +#define S3C_DxEPCTL_Stall (1 << 21) +#define S3C_DxEPCTL_Snp (1 << 20) +#define S3C_DxEPCTL_EPType_MASK (0x3 << 18) +#define S3C_DxEPCTL_EPType_SHIFT (18) +#define S3C_DxEPCTL_EPType_Control (0x0 << 18) +#define S3C_DxEPCTL_EPType_Iso (0x1 << 18) +#define S3C_DxEPCTL_EPType_Bulk (0x2 << 18) +#define S3C_DxEPCTL_EPType_Intterupt (0x3 << 18) + +#define S3C_DxEPCTL_NAKsts (1 << 17) +#define S3C_DxEPCTL_DPID (1 << 16) +#define S3C_DxEPCTL_EOFrNum (1 << 16) +#define S3C_DxEPCTL_USBActEp (1 << 15) +#define S3C_DxEPCTL_NextEp_MASK (0xf << 11) +#define S3C_DxEPCTL_NextEp_SHIFT (11) +#define S3C_DxEPCTL_NextEp_LIMIT (0xf) +#define S3C_DxEPCTL_NextEp(_x) ((_x) << 11) + +#define S3C_DxEPCTL_MPS_MASK (0x7ff << 0) +#define S3C_DxEPCTL_MPS_SHIFT (0) +#define S3C_DxEPCTL_MPS_LIMIT (0x7ff) +#define S3C_DxEPCTL_MPS(_x) ((_x) << 0) + +#define S3C_DIEPINT(_a) S3C_HSOTG_REG(0x908 + ((_a) * 0x20)) +#define S3C_DOEPINT(_a) S3C_HSOTG_REG(0xB08 + ((_a) * 0x20)) + +#define S3C_DxEPINT_INEPNakEff (1 << 6) +#define S3C_DxEPINT_Back2BackSetup (1 << 6) +#define S3C_DxEPINT_INTknEPMis (1 << 5) +#define S3C_DxEPINT_INTknTXFEmp (1 << 4) +#define S3C_DxEPINT_OUTTknEPdis (1 << 4) +#define S3C_DxEPINT_Timeout (1 << 3) +#define S3C_DxEPINT_Setup (1 << 3) +#define S3C_DxEPINT_AHBErr (1 << 2) +#define S3C_DxEPINT_EPDisbld (1 << 1) +#define S3C_DxEPINT_XferCompl (1 << 0) + +#define S3C_DIEPTSIZ0 S3C_HSOTG_REG(0x910) + +#define S3C_DIEPTSIZ0_PktCnt_MASK (0x3 << 19) +#define S3C_DIEPTSIZ0_PktCnt_SHIFT (19) +#define S3C_DIEPTSIZ0_PktCnt_LIMIT (0x3) +#define S3C_DIEPTSIZ0_PktCnt(_x) ((_x) << 19) + +#define S3C_DIEPTSIZ0_XferSize_MASK (0x7f << 0) +#define S3C_DIEPTSIZ0_XferSize_SHIFT (0) +#define S3C_DIEPTSIZ0_XferSize_LIMIT (0x7f) +#define S3C_DIEPTSIZ0_XferSize(_x) ((_x) << 0) + + +#define DOEPTSIZ0 S3C_HSOTG_REG(0xB10) +#define S3C_DOEPTSIZ0_SUPCnt_MASK (0x3 << 29) +#define S3C_DOEPTSIZ0_SUPCnt_SHIFT (29) +#define S3C_DOEPTSIZ0_SUPCnt_LIMIT (0x3) +#define S3C_DOEPTSIZ0_SUPCnt(_x) ((_x) << 29) + +#define S3C_DOEPTSIZ0_PktCnt (1 << 19) +#define S3C_DOEPTSIZ0_XferSize_MASK (0x7f << 0) +#define S3C_DOEPTSIZ0_XferSize_SHIFT (0) + +#define S3C_DIEPTSIZ(_a) S3C_HSOTG_REG(0x910 + ((_a) * 0x20)) +#define S3C_DOEPTSIZ(_a) S3C_HSOTG_REG(0xB10 + ((_a) * 0x20)) + +#define S3C_DxEPTSIZ_MC_MASK (0x3 << 29) +#define S3C_DxEPTSIZ_MC_SHIFT (29) +#define S3C_DxEPTSIZ_MC_LIMIT (0x3) +#define S3C_DxEPTSIZ_MC(_x) ((_x) << 29) + +#define S3C_DxEPTSIZ_PktCnt_MASK (0x3ff << 19) +#define S3C_DxEPTSIZ_PktCnt_SHIFT (19) +#define S3C_DxEPTSIZ_PktCnt_GET(_v) (((_v) >> 19) & 0x3ff) +#define S3C_DxEPTSIZ_PktCnt_LIMIT (0x3ff) +#define S3C_DxEPTSIZ_PktCnt(_x) ((_x) << 19) + +#define S3C_DxEPTSIZ_XferSize_MASK (0x7ffff << 0) +#define S3C_DxEPTSIZ_XferSize_SHIFT (0) +#define S3C_DxEPTSIZ_XferSize_GET(_v) (((_v) >> 0) & 0x7ffff) +#define S3C_DxEPTSIZ_XferSize_LIMIT (0x7ffff) +#define S3C_DxEPTSIZ_XferSize(_x) ((_x) << 0) + + +#define S3C_DIEPDMA(_a) S3C_HSOTG_REG(0x914 + ((_a) * 0x20)) +#define S3C_DOEPDMA(_a) S3C_HSOTG_REG(0xB14 + ((_a) * 0x20)) + +#define S3C_EPFIFO(_a) S3C_HSOTG_REG(0x1000 + ((_a) * 0x1000)) + +#endif /* __PLAT_S3C64XX_REGS_USB_HSOTG_H */ diff --git a/arch/avr32/kernel/init_task.c b/arch/avr32/kernel/init_task.c index 993d56ee3cf..57ec9f2dcd9 100644 --- a/arch/avr32/kernel/init_task.c +++ b/arch/avr32/kernel/init_task.c @@ -15,10 +15,6 @@ static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); -struct mm_struct init_mm = INIT_MM(init_mm); - -EXPORT_SYMBOL(init_mm); - /* * Initial thread structure. Must be aligned on an 8192-byte boundary. */ diff --git a/arch/avr32/kernel/signal.c b/arch/avr32/kernel/signal.c index 803d7be0938..27227561bad 100644 --- a/arch/avr32/kernel/signal.c +++ b/arch/avr32/kernel/signal.c @@ -212,7 +212,7 @@ out: return err; } -static inline void restart_syscall(struct pt_regs *regs) +static inline void setup_syscall_restart(struct pt_regs *regs) { if (regs->r12 == -ERESTART_RESTARTBLOCK) regs->r8 = __NR_restart_syscall; @@ -296,7 +296,7 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset, int syscall) } /* fall through */ case -ERESTARTNOINTR: - restart_syscall(regs); + setup_syscall_restart(regs); } } diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index a60cfe75791..8ea0d942cde 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig @@ -6,59 +6,65 @@ mainmenu "Blackfin Kernel Configuration" config MMU - bool - default n + def_bool n config FPU - bool - default n + def_bool n config RWSEM_GENERIC_SPINLOCK - bool - default y + def_bool y config RWSEM_XCHGADD_ALGORITHM - bool - default n + def_bool n config BLACKFIN - bool - default y + def_bool y + select HAVE_FUNCTION_GRAPH_TRACER + select HAVE_FUNCTION_TRACER select HAVE_IDE + select HAVE_KERNEL_GZIP + select HAVE_KERNEL_BZIP2 + select HAVE_KERNEL_LZMA select HAVE_OPROFILE select ARCH_WANT_OPTIONAL_GPIOLIB +config GENERIC_BUG + def_bool y + depends on BUG + config ZONE_DMA - bool - default y + def_bool y config GENERIC_FIND_NEXT_BIT - bool - default y + def_bool y config GENERIC_HWEIGHT - bool - default y + def_bool y config GENERIC_HARDIRQS - bool - default y + def_bool y config GENERIC_IRQ_PROBE - bool - default y + def_bool y config GENERIC_GPIO - bool - default y + def_bool y config FORCE_MAX_ZONEORDER int default "14" config GENERIC_CALIBRATE_DELAY - bool - default y + def_bool y + +config LOCKDEP_SUPPORT + def_bool y + +config STACKTRACE_SUPPORT + def_bool y + +config TRACE_IRQFLAGS_SUPPORT + def_bool y source "init/Kconfig" @@ -408,12 +414,12 @@ comment "Clock/PLL Setup" config CLKIN_HZ int "Frequency of the crystal on the board in Hz" + default "10000000" if BFIN532_IP0X default "11059200" if BFIN533_STAMP + default "24576000" if PNAV10 + default "25000000" # most people use this default "27000000" if BFIN533_EZKIT - default "25000000" if (BFIN537_STAMP || BFIN527_EZKIT || H8606_HVSISTEMAS || BLACKSTAMP || BFIN526_EZBRD || BFIN538_EZKIT || BFIN518F-EZBRD) default "30000000" if BFIN561_EZKIT - default "24576000" if PNAV10 - default "10000000" if BFIN532_IP0X help The frequency of CLKIN crystal oscillator on the board in Hz. Warning: This value should match the crystal on the board. Otherwise, diff --git a/arch/blackfin/Makefile b/arch/blackfin/Makefile index d54c8283825..6f9533c3d75 100644 --- a/arch/blackfin/Makefile +++ b/arch/blackfin/Makefile @@ -137,7 +137,7 @@ archclean: INSTALL_PATH ?= /tftpboot boot := arch/$(ARCH)/boot -BOOT_TARGETS = vmImage +BOOT_TARGETS = vmImage vmImage.bz2 vmImage.gz vmImage.lzma PHONY += $(BOOT_TARGETS) install KBUILD_IMAGE := $(boot)/vmImage @@ -150,7 +150,10 @@ install: $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) install define archhelp - echo '* vmImage - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage)' + echo '* vmImage - Alias to selected kernel format (vmImage.gz by default)' + echo ' vmImage.bz2 - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.bz2)' + echo '* vmImage.gz - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.gz)' + echo ' vmImage.lzma - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.lzma)' echo ' install - Install kernel using' echo ' (your) ~/bin/$(CROSS_COMPILE)installkernel or' echo ' (distribution) PATH: $(CROSS_COMPILE)installkernel or' diff --git a/arch/blackfin/boot/.gitignore b/arch/blackfin/boot/.gitignore index 3ae03994b88..229e5080867 100644 --- a/arch/blackfin/boot/.gitignore +++ b/arch/blackfin/boot/.gitignore @@ -1 +1,2 @@ -+vmImage +vmImage* +vmlinux* diff --git a/arch/blackfin/boot/Makefile b/arch/blackfin/boot/Makefile index e028d13481a..3ab6f23561d 100644 --- a/arch/blackfin/boot/Makefile +++ b/arch/blackfin/boot/Makefile @@ -8,24 +8,41 @@ MKIMAGE := $(srctree)/scripts/mkuboot.sh -targets := vmImage -extra-y += vmlinux.bin vmlinux.gz +targets := vmImage vmImage.bz2 vmImage.gz vmImage.lzma +extra-y += vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma quiet_cmd_uimage = UIMAGE $@ cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A $(ARCH) -O linux -T kernel \ - -C gzip -n 'Linux-$(KERNELRELEASE)' -a $(CONFIG_BOOT_LOAD) \ + -C $(2) -n 'Linux-$(KERNELRELEASE)' -a $(CONFIG_BOOT_LOAD) \ -e $(shell $(NM) vmlinux | awk '$$NF == "__start" {print $$1}') \ -d $< $@ $(obj)/vmlinux.bin: vmlinux FORCE $(call if_changed,objcopy) -$(obj)/vmlinux.gz: $(obj)/vmlinux.bin FORCE +$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE $(call if_changed,gzip) -$(obj)/vmImage: $(obj)/vmlinux.gz - $(call if_changed,uimage) - @$(kecho) 'Kernel: $@ is ready' +$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE + $(call if_changed,bzip2) + +$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE + $(call if_changed,lzma) + +$(obj)/vmImage.bz2: $(obj)/vmlinux.bin.bz2 + $(call if_changed,uimage,bzip2) + +$(obj)/vmImage.gz: $(obj)/vmlinux.bin.gz + $(call if_changed,uimage,gzip) + +$(obj)/vmImage.lzma: $(obj)/vmlinux.bin.lzma + $(call if_changed,uimage,lzma) + +suffix-$(CONFIG_KERNEL_GZIP) := gz +suffix-$(CONFIG_KERNEL_BZIP2) := bz2 +suffix-$(CONFIG_KERNEL_LZMA) := lzma +$(obj)/vmImage: $(obj)/vmImage.$(suffix-y) + @ln -sf $(notdir $<) $@ install: sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(BOOTIMAGE) System.map "$(INSTALL_PATH)" diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h index 7bbf44e4ddf..b1d92f13ef9 100644 --- a/arch/blackfin/include/asm/atomic.h +++ b/arch/blackfin/include/asm/atomic.h @@ -90,7 +90,7 @@ static inline int atomic_test_mask(int mask, atomic_t *v) static inline void atomic_add(int i, atomic_t *v) { - long flags; + unsigned long flags; local_irq_save_hw(flags); v->counter += i; @@ -99,7 +99,7 @@ static inline void atomic_add(int i, atomic_t *v) static inline void atomic_sub(int i, atomic_t *v) { - long flags; + unsigned long flags; local_irq_save_hw(flags); v->counter -= i; @@ -110,7 +110,7 @@ static inline void atomic_sub(int i, atomic_t *v) static inline int atomic_add_return(int i, atomic_t *v) { int __temp = 0; - long flags; + unsigned long flags; local_irq_save_hw(flags); v->counter += i; @@ -124,7 +124,7 @@ static inline int atomic_add_return(int i, atomic_t *v) static inline int atomic_sub_return(int i, atomic_t *v) { int __temp = 0; - long flags; + unsigned long flags; local_irq_save_hw(flags); v->counter -= i; @@ -136,7 +136,7 @@ static inline int atomic_sub_return(int i, atomic_t *v) static inline void atomic_inc(volatile atomic_t *v) { - long flags; + unsigned long flags; local_irq_save_hw(flags); v->counter++; @@ -145,7 +145,7 @@ static inline void atomic_inc(volatile atomic_t *v) static inline void atomic_dec(volatile atomic_t *v) { - long flags; + unsigned long flags; local_irq_save_hw(flags); v->counter--; @@ -154,7 +154,7 @@ static inline void atomic_dec(volatile atomic_t *v) static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) { - long flags; + unsigned long flags; local_irq_save_hw(flags); v->counter &= ~mask; @@ -163,7 +163,7 @@ static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) static inline void atomic_set_mask(unsigned int mask, atomic_t *v) { - long flags; + unsigned long flags; local_irq_save_hw(flags); v->counter |= mask; diff --git a/arch/blackfin/include/asm/bfin-global.h b/arch/blackfin/include/asm/bfin-global.h index daffc0684e7..e39277ea43e 100644 --- a/arch/blackfin/include/asm/bfin-global.h +++ b/arch/blackfin/include/asm/bfin-global.h @@ -31,7 +31,7 @@ #ifndef __ASSEMBLY__ -#include <asm-generic/sections.h> +#include <asm/sections.h> #include <asm/ptrace.h> #include <asm/user.h> #include <linux/linkage.h> @@ -99,15 +99,6 @@ extern const char bfin_board_name[]; extern unsigned long bfin_sic_iwr[]; extern unsigned vr_wakeup; extern u16 _bfin_swrst; /* shadow for Software Reset Register (SWRST) */ -extern unsigned long _ramstart, _ramend, _rambase; -extern unsigned long memory_start, memory_end, physical_mem_end; -extern char _stext_l1[], _etext_l1[], _sdata_l1[], _edata_l1[], _sbss_l1[], - _ebss_l1[], _l1_lma_start[], _sdata_b_l1[], _sbss_b_l1[], _ebss_b_l1[], - _stext_l2[], _etext_l2[], _sdata_l2[], _edata_l2[], _sbss_l2[], - _ebss_l2[], _l2_lma_start[]; - -/* only used when MTD_UCLINUX */ -extern unsigned long memory_mtd_start, memory_mtd_end, mtd_size; #ifdef CONFIG_BFIN_ICACHE_LOCK extern void cache_grab_lock(int way); diff --git a/arch/blackfin/include/asm/bitops.h b/arch/blackfin/include/asm/bitops.h index 21b036eadab..75fee2f7d9f 100644 --- a/arch/blackfin/include/asm/bitops.h +++ b/arch/blackfin/include/asm/bitops.h @@ -109,7 +109,8 @@ static inline void clear_bit(int nr, volatile unsigned long *addr) static inline void change_bit(int nr, volatile unsigned long *addr) { - int mask, flags; + int mask; + unsigned long flags; unsigned long *ADDR = (unsigned long *)addr; ADDR += nr >> 5; diff --git a/arch/blackfin/include/asm/bug.h b/arch/blackfin/include/asm/bug.h index 6d3e11b1fc5..655e49540e4 100644 --- a/arch/blackfin/include/asm/bug.h +++ b/arch/blackfin/include/asm/bug.h @@ -2,13 +2,58 @@ #define _BLACKFIN_BUG_H #ifdef CONFIG_BUG -#define HAVE_ARCH_BUG -#define BUG() do { \ - dump_bfin_trace_buffer(); \ - printk(KERN_EMERG "BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \ - panic("BUG!"); \ -} while (0) +#define BFIN_BUG_OPCODE 0xefcd + +#ifdef CONFIG_DEBUG_BUGVERBOSE + +#define _BUG_OR_WARN(flags) \ + asm volatile( \ + "1: .hword %0\n" \ + " .section __bug_table,\"a\",@progbits\n" \ + "2: .long 1b\n" \ + " .long %1\n" \ + " .short %2\n" \ + " .short %3\n" \ + " .org 2b + %4\n" \ + " .previous" \ + : \ + : "i"(BFIN_BUG_OPCODE), "i"(__FILE__), \ + "i"(__LINE__), "i"(flags), \ + "i"(sizeof(struct bug_entry))) + +#else + +#define _BUG_OR_WARN(flags) \ + asm volatile( \ + "1: .hword %0\n" \ + " .section __bug_table,\"a\",@progbits\n" \ + "2: .long 1b\n" \ + " .short %1\n" \ + " .org 2b + %2\n" \ + " .previous" \ + : \ + : "i"(BFIN_BUG_OPCODE), "i"(flags), \ + "i"(sizeof(struct bug_entry))) + +#endif /* CONFIG_DEBUG_BUGVERBOSE */ + +#define BUG() \ + do { \ + _BUG_OR_WARN(0); \ + for (;;); \ + } while (0) + +#define WARN_ON(condition) \ + ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + _BUG_OR_WARN(BUGFLAG_WARNING); \ + unlikely(__ret_warn_on); \ + }) + +#define HAVE_ARCH_BUG +#define HAVE_ARCH_WARN_ON #endif diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h index 86637814cf2..2ef669ed922 100644 --- a/arch/blackfin/include/asm/cache.h +++ b/arch/blackfin/include/asm/cache.h @@ -34,9 +34,13 @@ #define L1_CACHE_SHIFT_MAX 5 #if defined(CONFIG_SMP) && \ - !defined(CONFIG_BFIN_CACHE_COHERENT) && \ - defined(CONFIG_BFIN_DCACHE) -#define __ARCH_SYNC_CORE_DCACHE + !defined(CONFIG_BFIN_CACHE_COHERENT) +# if defined(CONFIG_BFIN_ICACHE) +# define __ARCH_SYNC_CORE_ICACHE +# endif +# if defined(CONFIG_BFIN_DCACHE) +# define __ARCH_SYNC_CORE_DCACHE +# endif #ifndef __ASSEMBLY__ asmlinkage void __raw_smp_mark_barrier_asm(void); asmlinkage void __raw_smp_check_barrier_asm(void); @@ -51,6 +55,7 @@ static inline void smp_check_barrier(void) } void resync_core_dcache(void); +void resync_core_icache(void); #endif #endif diff --git a/arch/blackfin/include/asm/cacheflush.h b/arch/blackfin/include/asm/cacheflush.h index 94697f0f6f4..5c17dee53b5 100644 --- a/arch/blackfin/include/asm/cacheflush.h +++ b/arch/blackfin/include/asm/cacheflush.h @@ -37,6 +37,7 @@ extern void blackfin_dcache_flush_range(unsigned long start_address, unsigned lo extern void blackfin_dcache_invalidate_range(unsigned long start_address, unsigned long end_address); extern void blackfin_dflush_page(void *page); extern void blackfin_invalidate_entire_dcache(void); +extern void blackfin_invalidate_entire_icache(void); #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) @@ -97,7 +98,7 @@ do { memcpy(dst, src, len); \ extern unsigned long reserved_mem_dcache_on; extern unsigned long reserved_mem_icache_on; -static inline int bfin_addr_dcachable(unsigned long addr) +static inline int bfin_addr_dcacheable(unsigned long addr) { #ifdef CONFIG_BFIN_DCACHE if (addr < (_ramend - DMA_UNCACHED_REGION)) diff --git a/arch/blackfin/include/asm/cpu.h b/arch/blackfin/include/asm/cpu.h index c2594ef877f..565b8136855 100644 --- a/arch/blackfin/include/asm/cpu.h +++ b/arch/blackfin/include/asm/cpu.h @@ -34,6 +34,7 @@ struct blackfin_cpudata { unsigned int dmemctl; unsigned long loops_per_jiffy; unsigned long dcache_invld_count; + unsigned long icache_invld_count; }; DECLARE_PER_CPU(struct blackfin_cpudata, cpu_data); diff --git a/arch/blackfin/include/asm/ftrace.h b/arch/blackfin/include/asm/ftrace.h index 40a8c178f10..8643680f0f7 100644 --- a/arch/blackfin/include/asm/ftrace.h +++ b/arch/blackfin/include/asm/ftrace.h @@ -1 +1,13 @@ -/* empty */ +/* + * Blackfin ftrace code + * + * Copyright 2009 Analog Devices Inc. + * Licensed under the GPL-2 or later. + */ + +#ifndef __ASM_BFIN_FTRACE_H__ +#define __ASM_BFIN_FTRACE_H__ + +#define MCOUNT_INSN_SIZE 8 /* sizeof mcount call: LINK + CALL */ + +#endif diff --git a/arch/blackfin/include/asm/ipipe.h b/arch/blackfin/include/asm/ipipe.h index 51d0bf5e289..bbe1c3726b6 100644 --- a/arch/blackfin/include/asm/ipipe.h +++ b/arch/blackfin/include/asm/ipipe.h @@ -35,10 +35,10 @@ #include <asm/atomic.h> #include <asm/traps.h> -#define IPIPE_ARCH_STRING "1.9-01" +#define IPIPE_ARCH_STRING "1.10-00" #define IPIPE_MAJOR_NUMBER 1 -#define IPIPE_MINOR_NUMBER 9 -#define IPIPE_PATCH_NUMBER 1 +#define IPIPE_MINOR_NUMBER 10 +#define IPIPE_PATCH_NUMBER 0 #ifdef CONFIG_SMP #error "I-pipe/blackfin: SMP not implemented" @@ -54,10 +54,11 @@ do { \ #define task_hijacked(p) \ ({ \ - int __x__ = ipipe_current_domain != ipipe_root_domain; \ - /* We would need to clear the SYNC flag for the root domain */ \ - /* over the current processor in SMP mode. */ \ - local_irq_enable_hw(); __x__; \ + int __x__ = __ipipe_root_domain_p; \ + __clear_bit(IPIPE_SYNC_FLAG, &ipipe_root_cpudom_var(status)); \ + if (__x__) \ + local_irq_enable_hw(); \ + !__x__; \ }) struct ipipe_domain; @@ -179,23 +180,24 @@ static inline unsigned long __ipipe_ffnz(unsigned long ul) #define __ipipe_run_isr(ipd, irq) \ do { \ - if (ipd == ipipe_root_domain) { \ + if (!__ipipe_pipeline_head_p(ipd)) \ local_irq_enable_hw(); \ - if (ipipe_virtual_irq_p(irq)) \ + if (ipd == ipipe_root_domain) { \ + if (unlikely(ipipe_virtual_irq_p(irq))) { \ + irq_enter(); \ ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \ - else \ + irq_exit(); \ + } else \ ipd->irqs[irq].handler(irq, &__raw_get_cpu_var(__ipipe_tick_regs)); \ - local_irq_disable_hw(); \ } else { \ __clear_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \ - local_irq_enable_nohead(ipd); \ ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \ /* Attempt to exit the outer interrupt level before \ * starting the deferred IRQ processing. */ \ - local_irq_disable_nohead(ipd); \ __ipipe_run_irqtail(); \ __set_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \ } \ + local_irq_disable_hw(); \ } while (0) #define __ipipe_syscall_watched_p(p, sc) \ diff --git a/arch/blackfin/include/asm/irq.h b/arch/blackfin/include/asm/irq.h index 7645e85a5f6..400bdd52ce8 100644 --- a/arch/blackfin/include/asm/irq.h +++ b/arch/blackfin/include/asm/irq.h @@ -17,270 +17,17 @@ #ifndef _BFIN_IRQ_H_ #define _BFIN_IRQ_H_ -/* SYS_IRQS and NR_IRQS are defined in <mach-bf5xx/irq.h>*/ -#include <mach/irq.h> -#include <asm/pda.h> -#include <asm/processor.h> - -#ifdef CONFIG_SMP -/* Forward decl needed due to cdef inter dependencies */ -static inline uint32_t __pure bfin_dspid(void); -# define blackfin_core_id() (bfin_dspid() & 0xff) -# define bfin_irq_flags cpu_pda[blackfin_core_id()].imask -#else -extern unsigned long bfin_irq_flags; -#endif - -#ifdef CONFIG_IPIPE - -#include <linux/ipipe_trace.h> +#include <linux/irqflags.h> -void __ipipe_unstall_root(void); - -void __ipipe_restore_root(unsigned long flags); - -#ifdef CONFIG_DEBUG_HWERR -# define __all_masked_irq_flags 0x3f -# define __save_and_cli_hw(x) \ - __asm__ __volatile__( \ - "cli %0;" \ - "sti %1;" \ - : "=&d"(x) \ - : "d" (0x3F) \ - ) -#else -# define __all_masked_irq_flags 0x1f -# define __save_and_cli_hw(x) \ - __asm__ __volatile__( \ - "cli %0;" \ - : "=&d"(x) \ - ) -#endif - -#define irqs_enabled_from_flags_hw(x) ((x) != __all_masked_irq_flags) -#define raw_irqs_disabled_flags(flags) (!irqs_enabled_from_flags_hw(flags)) -#define local_test_iflag_hw(x) irqs_enabled_from_flags_hw(x) - -#define local_save_flags(x) \ - do { \ - (x) = __ipipe_test_root() ? \ - __all_masked_irq_flags : bfin_irq_flags; \ - barrier(); \ - } while (0) - -#define local_irq_save(x) \ - do { \ - (x) = __ipipe_test_and_stall_root() ? \ - __all_masked_irq_flags : bfin_irq_flags; \ - barrier(); \ - } while (0) - -static inline void local_irq_restore(unsigned long x) -{ - barrier(); - __ipipe_restore_root(x == __all_masked_irq_flags); -} - -#define local_irq_disable() \ - do { \ - __ipipe_stall_root(); \ - barrier(); \ - } while (0) - -static inline void local_irq_enable(void) -{ - barrier(); - __ipipe_unstall_root(); -} - -#define irqs_disabled() __ipipe_test_root() - -#define local_save_flags_hw(x) \ - __asm__ __volatile__( \ - "cli %0;" \ - "sti %0;" \ - : "=d"(x) \ - ) - -#define irqs_disabled_hw() \ - ({ \ - unsigned long flags; \ - local_save_flags_hw(flags); \ - !irqs_enabled_from_flags_hw(flags); \ - }) - -static inline unsigned long raw_mangle_irq_bits(int virt, unsigned long real) -{ - /* Merge virtual and real interrupt mask bits into a single - 32bit word. */ - return (real & ~(1 << 31)) | ((virt != 0) << 31); -} - -static inline int raw_demangle_irq_bits(unsigned long *x) -{ - int virt = (*x & (1 << 31)) != 0; - *x &= ~(1L << 31); - return virt; -} - -#ifdef CONFIG_IPIPE_TRACE_IRQSOFF - -#define local_irq_disable_hw() \ - do { \ - int _tmp_dummy; \ - if (!irqs_disabled_hw()) \ - ipipe_trace_begin(0x80000000); \ - __asm__ __volatile__ ("cli %0;" : "=d" (_tmp_dummy) : ); \ - } while (0) - -#define local_irq_enable_hw() \ - do { \ - if (irqs_disabled_hw()) \ - ipipe_trace_end(0x80000000); \ - __asm__ __volatile__ ("sti %0;" : : "d"(bfin_irq_flags)); \ - } while (0) - -#define local_irq_save_hw(x) \ - do { \ - __save_and_cli_hw(x); \ - if (local_test_iflag_hw(x)) \ - ipipe_trace_begin(0x80000001); \ - } while (0) - -#define local_irq_restore_hw(x) \ - do { \ - if (local_test_iflag_hw(x)) { \ - ipipe_trace_end(0x80000001); \ - local_irq_enable_hw_notrace(); \ - } \ - } while (0) - -#define local_irq_disable_hw_notrace() \ - do { \ - int _tmp_dummy; \ - __asm__ __volatile__ ("cli %0;" : "=d" (_tmp_dummy) : ); \ - } while (0) - -#define local_irq_enable_hw_notrace() \ - __asm__ __volatile__( \ - "sti %0;" \ - : \ - : "d"(bfin_irq_flags) \ - ) - -#define local_irq_save_hw_notrace(x) __save_and_cli_hw(x) - -#define local_irq_restore_hw_notrace(x) \ - do { \ - if (local_test_iflag_hw(x)) \ - local_irq_enable_hw_notrace(); \ - } while (0) - -#else /* CONFIG_IPIPE_TRACE_IRQSOFF */ - -#define local_irq_enable_hw() \ - __asm__ __volatile__( \ - "sti %0;" \ - : \ - : "d"(bfin_irq_flags) \ - ) - -#define local_irq_disable_hw() \ - do { \ - int _tmp_dummy; \ - __asm__ __volatile__ ( \ - "cli %0;" \ - : "=d" (_tmp_dummy)); \ - } while (0) - -#define local_irq_restore_hw(x) \ - do { \ - if (irqs_enabled_from_flags_hw(x)) \ - local_irq_enable_hw(); \ - } while (0) - -#define local_irq_save_hw(x) __save_and_cli_hw(x) - -#define local_irq_disable_hw_notrace() local_irq_disable_hw() -#define local_irq_enable_hw_notrace() local_irq_enable_hw() -#define local_irq_save_hw_notrace(x) local_irq_save_hw(x) -#define local_irq_restore_hw_notrace(x) local_irq_restore_hw(x) - -#endif /* CONFIG_IPIPE_TRACE_IRQSOFF */ - -#else /* !CONFIG_IPIPE */ - -/* - * Interrupt configuring macros. - */ -#define local_irq_disable() \ - do { \ - int __tmp_dummy; \ - __asm__ __volatile__( \ - "cli %0;" \ - : "=d" (__tmp_dummy) \ - ); \ - } while (0) - -#define local_irq_enable() \ - __asm__ __volatile__( \ - "sti %0;" \ - : \ - : "d" (bfin_irq_flags) \ - ) - -#ifdef CONFIG_DEBUG_HWERR -# define __save_and_cli(x) \ - __asm__ __volatile__( \ - "cli %0;" \ - "sti %1;" \ - : "=&d" (x) \ - : "d" (0x3F) \ - ) -#else -# define __save_and_cli(x) \ - __asm__ __volatile__( \ - "cli %0;" \ - : "=&d" (x) \ - ) -#endif - -#define local_save_flags(x) \ - __asm__ __volatile__( \ - "cli %0;" \ - "sti %0;" \ - : "=d" (x) \ - ) - -#ifdef CONFIG_DEBUG_HWERR -#define irqs_enabled_from_flags(x) (((x) & ~0x3f) != 0) -#else -#define irqs_enabled_from_flags(x) ((x) != 0x1f) -#endif - -#define local_irq_restore(x) \ - do { \ - if (irqs_enabled_from_flags(x)) \ - local_irq_enable(); \ - } while (0) - -/* For spinlocks etc */ -#define local_irq_save(x) __save_and_cli(x) - -#define irqs_disabled() \ -({ \ - unsigned long flags; \ - local_save_flags(flags); \ - !irqs_enabled_from_flags(flags); \ -}) - -#define local_irq_save_hw(x) local_irq_save(x) -#define local_irq_restore_hw(x) local_irq_restore(x) -#define local_irq_enable_hw() local_irq_enable() -#define local_irq_disable_hw() local_irq_disable() -#define irqs_disabled_hw() irqs_disabled() +/* SYS_IRQS and NR_IRQS are defined in <mach-bf5xx/irq.h> */ +#include <mach/irq.h> -#endif /* !CONFIG_IPIPE */ +/* Xenomai IPIPE helpers */ +#define local_irq_restore_hw(x) local_irq_restore(x) +#define local_irq_save_hw(x) local_irq_save(x) +#define local_irq_enable_hw(x) local_irq_enable(x) +#define local_irq_disable_hw(x) local_irq_disable(x) +#define irqs_disabled_hw(x) irqs_disabled(x) #if ANOMALY_05000244 && defined(CONFIG_BFIN_ICACHE) # define NOP_PAD_ANOMALY_05000244 "nop; nop;" diff --git a/arch/blackfin/include/asm/irqflags.h b/arch/blackfin/include/asm/irqflags.h new file mode 100644 index 00000000000..139cba4651b --- /dev/null +++ b/arch/blackfin/include/asm/irqflags.h @@ -0,0 +1,63 @@ +/* + * interface to Blackfin CEC + * + * Copyright 2009 Analog Devices Inc. + * Licensed under the GPL-2 or later. + */ + +#ifndef __ASM_BFIN_IRQFLAGS_H__ +#define __ASM_BFIN_IRQFLAGS_H__ + +#ifdef CONFIG_SMP +# include <asm/pda.h> +# include <asm/processor.h> +/* Forward decl needed due to cdef inter dependencies */ +static inline uint32_t __pure bfin_dspid(void); +# define blackfin_core_id() (bfin_dspid() & 0xff) +# define bfin_irq_flags cpu_pda[blackfin_core_id()].imask +#else +extern unsigned long bfin_irq_flags; +#endif + +static inline void bfin_sti(unsigned long flags) +{ + asm volatile("sti %0;" : : "d" (flags)); +} + +static inline unsigned long bfin_cli(void) +{ + unsigned long flags; + asm volatile("cli %0;" : "=d" (flags)); + return flags; +} + +static inline void raw_local_irq_disable(void) +{ + bfin_cli(); +} +static inline void raw_local_irq_enable(void) +{ + bfin_sti(bfin_irq_flags); +} + +#define raw_local_save_flags(flags) do { (flags) = bfin_read_IMASK(); } while (0) + +#define raw_irqs_disabled_flags(flags) (((flags) & ~0x3f) == 0) + +static inline void raw_local_irq_restore(unsigned long flags) +{ + if (!raw_irqs_disabled_flags(flags)) + raw_local_irq_enable(); +} + +static inline unsigned long __raw_local_irq_save(void) +{ + unsigned long flags = bfin_cli(); +#ifdef CONFIG_DEBUG_HWERR + bfin_sti(0x3f); +#endif + return flags; +} +#define raw_local_irq_save(flags) do { (flags) = __raw_local_irq_save(); } while (0) + +#endif diff --git a/arch/blackfin/include/asm/kmap_types.h b/arch/blackfin/include/asm/kmap_types.h index e215f710497..0a88622339e 100644 --- a/arch/blackfin/include/asm/kmap_types.h +++ b/arch/blackfin/include/asm/kmap_types.h @@ -1,21 +1,6 @@ #ifndef _ASM_KMAP_TYPES_H #define _ASM_KMAP_TYPES_H -enum km_type { - KM_BOUNCE_READ, - KM_SKB_SUNRPC_DATA, - KM_SKB_DATA_SOFTIRQ, - KM_USER0, - KM_USER1, - KM_BIO_SRC_IRQ, - KM_BIO_DST_IRQ, - KM_PTE0, - KM_PTE1, - KM_IRQ0, - KM_IRQ1, - KM_SOFTIRQ0, - KM_SOFTIRQ1, - KM_TYPE_NR -}; +#include <asm-generic/kmap_types.h> #endif diff --git a/arch/blackfin/include/asm/mutex-dec.h b/arch/blackfin/include/asm/mutex-dec.h deleted file mode 100644 index 0134151656a..00000000000 --- a/arch/blackfin/include/asm/mutex-dec.h +++ /dev/null @@ -1,112 +0,0 @@ -/* - * include/asm-generic/mutex-dec.h - * - * Generic implementation of the mutex fastpath, based on atomic - * decrement/increment. - */ -#ifndef _ASM_GENERIC_MUTEX_DEC_H -#define _ASM_GENERIC_MUTEX_DEC_H - -/** - * __mutex_fastpath_lock - try to take the lock by moving the count - * from 1 to a 0 value - * @count: pointer of type atomic_t - * @fail_fn: function to call if the original value was not 1 - * - * Change the count from 1 to a value lower than 1, and call <fail_fn> if - * it wasn't 1 originally. This function MUST leave the value lower than - * 1 even when the "1" assertion wasn't true. - */ -static inline void -__mutex_fastpath_lock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *)) -{ - if (unlikely(atomic_dec_return(count) < 0)) - fail_fn(count); - else - smp_mb(); -} - -/** - * __mutex_fastpath_lock_retval - try to take the lock by moving the count - * from 1 to a 0 value - * @count: pointer of type atomic_t - * @fail_fn: function to call if the original value was not 1 - * - * Change the count from 1 to a value lower than 1, and call <fail_fn> if - * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, - * or anything the slow path function returns. - */ -static inline int -__mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t *)) -{ - if (unlikely(atomic_dec_return(count) < 0)) - return fail_fn(count); - else { - smp_mb(); - return 0; - } -} - -/** - * __mutex_fastpath_unlock - try to promote the count from 0 to 1 - * @count: pointer of type atomic_t - * @fail_fn: function to call if the original value was not 0 - * - * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>. - * In the failure case, this function is allowed to either set the value to - * 1, or to set it to a value lower than 1. - * - * If the implementation sets it to a value of lower than 1, then the - * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs - * to return 0 otherwise. - */ -static inline void -__mutex_fastpath_unlock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *)) -{ - smp_mb(); - if (unlikely(atomic_inc_return(count) <= 0)) - fail_fn(count); -} - -#define __mutex_slowpath_needs_to_unlock() 1 - -/** - * __mutex_fastpath_trylock - try to acquire the mutex, without waiting - * - * @count: pointer of type atomic_t - * @fail_fn: fallback function - * - * Change the count from 1 to a value lower than 1, and return 0 (failure) - * if it wasn't 1 originally, or return 1 (success) otherwise. This function - * MUST leave the value lower than 1 even when the "1" assertion wasn't true. - * Additionally, if the value was < 0 originally, this function must not leave - * it to 0 on failure. - * - * If the architecture has no effective trylock variant, it should call the - * <fail_fn> spinlock-based trylock variant unconditionally. - */ -static inline int -__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) -{ - /* - * We have two variants here. The cmpxchg based one is the best one - * because it never induce a false contention state. It is included - * here because architectures using the inc/dec algorithms over the - * xchg ones are much more likely to support cmpxchg natively. - * - * If not we fall back to the spinlock based variant - that is - * just as efficient (and simpler) as a 'destructive' probing of - * the mutex state would be. - */ -#ifdef __HAVE_ARCH_CMPXCHG - if (likely(atomic_cmpxchg(count, 1, 0) == 1)) { - smp_mb(); - return 1; - } - return 0; -#else - return fail_fn(count); -#endif -} - -#endif diff --git a/arch/blackfin/include/asm/sections.h b/arch/blackfin/include/asm/sections.h index 1443c3353a8..e7fd0ecd73f 100644 --- a/arch/blackfin/include/asm/sections.h +++ b/arch/blackfin/include/asm/sections.h @@ -4,4 +4,15 @@ /* nothing to see, move along */ #include <asm-generic/sections.h> +/* only used when MTD_UCLINUX */ +extern unsigned long memory_mtd_start, memory_mtd_end, mtd_size; + +extern unsigned long _ramstart, _ramend, _rambase; +extern unsigned long memory_start, memory_end, physical_mem_end; + +extern char _stext_l1[], _etext_l1[], _sdata_l1[], _edata_l1[], _sbss_l1[], + _ebss_l1[], _l1_lma_start[], _sdata_b_l1[], _sbss_b_l1[], _ebss_b_l1[], + _stext_l2[], _etext_l2[], _sdata_l2[], _edata_l2[], _sbss_l2[], + _ebss_l2[], _l2_lma_start[]; + #endif diff --git a/arch/blackfin/include/asm/system.h b/arch/blackfin/include/asm/system.h index a4c8254bec5..294dbda2416 100644 --- a/arch/blackfin/include/asm/system.h +++ b/arch/blackfin/include/asm/system.h @@ -35,10 +35,10 @@ #define _BLACKFIN_SYSTEM_H #include <linux/linkage.h> -#include <linux/compiler.h> +#include <linux/irqflags.h> #include <mach/anomaly.h> +#include <asm/cache.h> #include <asm/pda.h> -#include <asm/processor.h> #include <asm/irq.h> /* diff --git a/arch/blackfin/include/asm/unistd.h b/arch/blackfin/include/asm/unistd.h index cf5066d3efd..da35133c171 100644 --- a/arch/blackfin/include/asm/unistd.h +++ b/arch/blackfin/include/asm/unistd.h @@ -380,8 +380,9 @@ #define __NR_inotify_init1 365 #define __NR_preadv 366 #define __NR_pwritev 367 +#define __NR_rt_tgsigqueueinfo 368 -#define __NR_syscall 368 +#define __NR_syscall 369 #define NR_syscalls __NR_syscall /* Old optional stuff no one actually uses */ diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile index fd4d4328a0f..3731088e181 100644 --- a/arch/blackfin/kernel/Makefile +++ b/arch/blackfin/kernel/Makefile @@ -15,6 +15,10 @@ else obj-y += time.o endif +obj-$(CONFIG_FUNCTION_TRACER) += ftrace-entry.o +obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o +CFLAGS_REMOVE_ftrace.o = -pg + obj-$(CONFIG_IPIPE) += ipipe.o obj-$(CONFIG_IPIPE_TRACE_MCOUNT) += mcount.o obj-$(CONFIG_BFIN_GPTIMERS) += gptimers.o @@ -23,6 +27,7 @@ obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_KGDB_TESTS) += kgdb_test.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o +obj-$(CONFIG_STACKTRACE) += stacktrace.o # the kgdb test puts code into L2 and without linker # relaxation, we need to force long calls to/from it diff --git a/arch/blackfin/kernel/bfin_dma_5xx.c b/arch/blackfin/kernel/bfin_dma_5xx.c index 763ed84ba45..e0bf8cc0690 100644 --- a/arch/blackfin/kernel/bfin_dma_5xx.c +++ b/arch/blackfin/kernel/bfin_dma_5xx.c @@ -453,10 +453,10 @@ void *dma_memcpy(void *pdst, const void *psrc, size_t size) unsigned long src = (unsigned long)psrc; size_t bulk, rest; - if (bfin_addr_dcachable(src)) + if (bfin_addr_dcacheable(src)) blackfin_dcache_flush_range(src, src + size); - if (bfin_addr_dcachable(dst)) + if (bfin_addr_dcacheable(dst)) blackfin_dcache_invalidate_range(dst, dst + size); bulk = size & ~0xffff; diff --git a/arch/blackfin/kernel/bfin_ksyms.c b/arch/blackfin/kernel/bfin_ksyms.c index 53e893ff708..aa05e638fb7 100644 --- a/arch/blackfin/kernel/bfin_ksyms.c +++ b/arch/blackfin/kernel/bfin_ksyms.c @@ -103,3 +103,8 @@ EXPORT_SYMBOL(__raw_smp_mark_barrier_asm); EXPORT_SYMBOL(__raw_smp_check_barrier_asm); #endif #endif + +#ifdef CONFIG_FUNCTION_TRACER +extern void _mcount(void); +EXPORT_SYMBOL(_mcount); +#endif diff --git a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c index 87463ce87f5..784923e52a9 100644 --- a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c +++ b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c @@ -151,7 +151,7 @@ static noinline int dcplb_miss(unsigned int cpu) d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB; #ifdef CONFIG_BFIN_DCACHE - if (bfin_addr_dcachable(addr)) { + if (bfin_addr_dcacheable(addr)) { d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND; #ifdef CONFIG_BFIN_WT d_data |= CPLB_L1_AOW | CPLB_WT; diff --git a/arch/blackfin/kernel/cplb-nompu/cplbmgr.c b/arch/blackfin/kernel/cplb-nompu/cplbmgr.c index 8cbb47c7b66..12b030842fd 100644 --- a/arch/blackfin/kernel/cplb-nompu/cplbmgr.c +++ b/arch/blackfin/kernel/cplb-nompu/cplbmgr.c @@ -28,6 +28,7 @@ #include <asm/cplbinit.h> #include <asm/cplb.h> #include <asm/mmu_context.h> +#include <asm/traps.h> /* * WARNING @@ -100,28 +101,6 @@ static inline void write_icplb_data(int cpu, int idx, unsigned long data, #endif } -/* - * Given the contents of the status register, return the index of the - * CPLB that caused the fault. - */ -static inline int faulting_cplb_index(int status) -{ - int signbits = __builtin_bfin_norm_fr1x32(status & 0xFFFF); - return 30 - signbits; -} - -/* - * Given the contents of the status register and the DCPLB_DATA contents, - * return true if a write access should be permitted. - */ -static inline int write_permitted(int status, unsigned long data) -{ - if (status & FAULT_USERSUPV) - return !!(data & CPLB_SUPV_WR); - else - return !!(data & CPLB_USER_WR); -} - /* Counters to implement round-robin replacement. */ static int icplb_rr_index[NR_CPUS] PDT_ATTR; static int dcplb_rr_index[NR_CPUS] PDT_ATTR; @@ -245,43 +224,16 @@ MGR_ATTR static int dcplb_miss(int cpu) return CPLB_RELOADED; } -MGR_ATTR static noinline int dcplb_protection_fault(int cpu) -{ - int status = bfin_read_DCPLB_STATUS(); - - nr_dcplb_prot[cpu]++; - - if (likely(status & FAULT_RW)) { - int idx = faulting_cplb_index(status); - unsigned long regaddr = DCPLB_DATA0 + idx * 4; - unsigned long data = bfin_read32(regaddr); - - /* Check if fault is to dirty a clean page */ - if (!(data & CPLB_WT) && !(data & CPLB_DIRTY) && - write_permitted(status, data)) { - - dcplb_tbl[cpu][idx].data = data; - bfin_write32(regaddr, data); - return CPLB_RELOADED; - } - } - - return CPLB_PROT_VIOL; -} - MGR_ATTR int cplb_hdr(int seqstat, struct pt_regs *regs) { int cause = seqstat & 0x3f; unsigned int cpu = smp_processor_id(); switch (cause) { - case 0x2C: + case VEC_CPLB_I_M: return icplb_miss(cpu); - case 0x26: + case VEC_CPLB_M: return dcplb_miss(cpu); default: - if (unlikely(cause == 0x23)) - return dcplb_protection_fault(cpu); - return CPLB_UNKNOWN_ERR; } } diff --git a/arch/blackfin/kernel/early_printk.c b/arch/blackfin/kernel/early_printk.c index 3302719173c..2ab56811841 100644 --- a/arch/blackfin/kernel/early_printk.c +++ b/arch/blackfin/kernel/early_printk.c @@ -202,11 +202,15 @@ asmlinkage void __init init_early_exception_vectors(void) asmlinkage void __init early_trap_c(struct pt_regs *fp, void *retaddr) { /* This can happen before the uart is initialized, so initialize - * the UART now + * the UART now (but only if we are running on the processor we think + * we are compiled for - otherwise we write to MMRs that don't exist, + * and cause other problems. Nothing comes out the UART, but it does + * end up in the __buf_log. */ - if (likely(early_console == NULL)) + if (likely(early_console == NULL) && CPUID == bfin_cpuid()) setup_early_printk(DEFAULT_EARLY_PORT); + printk(KERN_EMERG "Early panic\n"); dump_bfin_mem(fp); show_regs(fp); dump_bfin_trace_buffer(); diff --git a/arch/blackfin/kernel/ftrace-entry.S b/arch/blackfin/kernel/ftrace-entry.S new file mode 100644 index 00000000000..6980b7a0615 --- /dev/null +++ b/arch/blackfin/kernel/ftrace-entry.S @@ -0,0 +1,140 @@ +/* + * mcount and friends -- ftrace stuff + * + * Copyright (C) 2009 Analog Devices Inc. + * Licensed under the GPL-2 or later. + */ + +#include <linux/linkage.h> +#include <asm/ftrace.h> + +.text + +/* GCC will have called us before setting up the function prologue, so we + * can clobber the normal scratch registers, but we need to make sure to + * save/restore the registers used for argument passing (R0-R2) in case + * the profiled function is using them. With data registers, R3 is the + * only one we can blow away. With pointer registers, we have P0-P2. + * + * Upon entry, the RETS will point to the top of the current profiled + * function. And since GCC setup the frame for us, the previous function + * will be waiting there. mmmm pie. + */ +ENTRY(__mcount) + /* save third function arg early so we can do testing below */ + [--sp] = r2; + + /* load the function pointer to the tracer */ + p0.l = _ftrace_trace_function; + p0.h = _ftrace_trace_function; + r3 = [p0]; + + /* optional micro optimization: don't call the stub tracer */ + r2.l = _ftrace_stub; + r2.h = _ftrace_stub; + cc = r2 == r3; + if ! cc jump .Ldo_trace; + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + /* if the ftrace_graph_return function pointer is not set to + * the ftrace_stub entry, call prepare_ftrace_return(). + */ + p0.l = _ftrace_graph_return; + p0.h = _ftrace_graph_return; + r3 = [p0]; + cc = r2 == r3; + if ! cc jump _ftrace_graph_caller; + + /* similarly, if the ftrace_graph_entry function pointer is not + * set to the ftrace_graph_entry_stub entry, ... + */ + p0.l = _ftrace_graph_entry; + p0.h = _ftrace_graph_entry; + r2.l = _ftrace_graph_entry_stub; + r2.h = _ftrace_graph_entry_stub; + r3 = [p0]; + cc = r2 == r3; + if ! cc jump _ftrace_graph_caller; +#endif + + r2 = [sp++]; + rts; + +.Ldo_trace: + + /* save first/second function arg and the return register */ + [--sp] = r0; + [--sp] = r1; + [--sp] = rets; + + /* setup the tracer function */ + p0 = r3; + + /* tracer(ulong frompc, ulong selfpc): + * frompc: the pc that did the call to ... + * selfpc: ... this location + * the selfpc itself will need adjusting for the mcount call + */ + r1 = rets; + r0 = [fp + 4]; + r1 += -MCOUNT_INSN_SIZE; + + /* call the tracer */ + call (p0); + + /* restore state and get out of dodge */ +.Lfinish_trace: + rets = [sp++]; + r1 = [sp++]; + r0 = [sp++]; + r2 = [sp++]; + +.globl _ftrace_stub +_ftrace_stub: + rts; +ENDPROC(__mcount) + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +/* The prepare_ftrace_return() function is similar to the trace function + * except it takes a pointer to the location of the frompc. This is so + * the prepare_ftrace_return() can hijack it temporarily for probing + * purposes. + */ +ENTRY(_ftrace_graph_caller) + /* save first/second function arg and the return register */ + [--sp] = r0; + [--sp] = r1; + [--sp] = rets; + + r0 = fp; + r1 = rets; + r0 += 4; + r1 += -MCOUNT_INSN_SIZE; + call _prepare_ftrace_return; + + jump .Lfinish_trace; +ENDPROC(_ftrace_graph_caller) + +/* Undo the rewrite caused by ftrace_graph_caller(). The common function + * ftrace_return_to_handler() will return the original rets so we can + * restore it and be on our way. + */ +ENTRY(_return_to_handler) + /* make sure original return values are saved */ + [--sp] = p0; + [--sp] = r0; + [--sp] = r1; + + /* get original return address */ + call _ftrace_return_to_handler; + rets = r0; + + /* anomaly 05000371 - make sure we have at least three instructions + * between rets setting and the return + */ + r1 = [sp++]; + r0 = [sp++]; + p0 = [sp++]; + rts; +ENDPROC(_return_to_handler) +#endif diff --git a/arch/blackfin/kernel/ftrace.c b/arch/blackfin/kernel/ftrace.c new file mode 100644 index 00000000000..905bfc40a00 --- /dev/null +++ b/arch/blackfin/kernel/ftrace.c @@ -0,0 +1,42 @@ +/* + * ftrace graph code + * + * Copyright (C) 2009 Analog Devices Inc. + * Licensed under the GPL-2 or later. + */ + +#include <linux/ftrace.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <asm/atomic.h> + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + +/* + * Hook the return address and push it in the stack of return addrs + * in current thread info. + */ +void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) +{ + struct ftrace_graph_ent trace; + unsigned long return_hooker = (unsigned long)&return_to_handler; + + if (unlikely(atomic_read(¤t->tracing_graph_pause))) + return; + + if (ftrace_push_return_trace(*parent, self_addr, &trace.depth) == -EBUSY) + return; + + trace.func = self_addr; + + /* Only trace if the calling function expects to */ + if (!ftrace_graph_entry(&trace)) { + current->curr_ret_stack--; + return; + } + + /* all is well in the world ! hijack RETS ... */ + *parent = return_hooker; +} + +#endif diff --git a/arch/blackfin/kernel/init_task.c b/arch/blackfin/kernel/init_task.c index 2c228c02097..c26c34de9f3 100644 --- a/arch/blackfin/kernel/init_task.c +++ b/arch/blackfin/kernel/init_task.c @@ -35,10 +35,6 @@ static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); - -struct mm_struct init_mm = INIT_MM(init_mm); -EXPORT_SYMBOL(init_mm); - /* * Initial task structure. * diff --git a/arch/blackfin/kernel/ipipe.c b/arch/blackfin/kernel/ipipe.c index 5fc424803a1..d8cde1fc5cb 100644 --- a/arch/blackfin/kernel/ipipe.c +++ b/arch/blackfin/kernel/ipipe.c @@ -99,7 +99,7 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs) * interrupt. */ m_ack = (regs == NULL || irq == IRQ_SYSTMR || irq == IRQ_CORETMR); - this_domain = ipipe_current_domain; + this_domain = __ipipe_current_domain; if (unlikely(test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control))) head = &this_domain->p_link; @@ -212,7 +212,9 @@ void __ipipe_unstall_root_raw(void) int __ipipe_syscall_root(struct pt_regs *regs) { + struct ipipe_percpu_domain_data *p; unsigned long flags; + int ret; /* * We need to run the IRQ tail hook whenever we don't @@ -231,29 +233,31 @@ int __ipipe_syscall_root(struct pt_regs *regs) /* * This routine either returns: * 0 -- if the syscall is to be passed to Linux; - * 1 -- if the syscall should not be passed to Linux, and no + * >0 -- if the syscall should not be passed to Linux, and no * tail work should be performed; - * -1 -- if the syscall should not be passed to Linux but the + * <0 -- if the syscall should not be passed to Linux but the * tail work has to be performed (for handling signals etc). */ - if (__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL) && - __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs) > 0) { - if (ipipe_root_domain_p && !in_atomic()) { - /* - * Sync pending VIRQs before _TIF_NEED_RESCHED - * is tested. - */ - local_irq_save_hw(flags); - if ((ipipe_root_cpudom_var(irqpend_himask) & IPIPE_IRQMASK_VIRT) != 0) - __ipipe_sync_pipeline(IPIPE_IRQMASK_VIRT); - local_irq_restore_hw(flags); - return -1; - } + if (!__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL)) + return 0; + + ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs); + + local_irq_save_hw(flags); + + if (!__ipipe_root_domain_p) { + local_irq_restore_hw(flags); return 1; } - return 0; + p = ipipe_root_cpudom_ptr(); + if ((p->irqpend_himask & IPIPE_IRQMASK_VIRT) != 0) + __ipipe_sync_pipeline(IPIPE_IRQMASK_VIRT); + + local_irq_restore_hw(flags); + + return -ret; } unsigned long ipipe_critical_enter(void (*syncfn) (void)) @@ -329,9 +333,7 @@ asmlinkage void __ipipe_sync_root(void) void ___ipipe_sync_pipeline(unsigned long syncmask) { - struct ipipe_domain *ipd = ipipe_current_domain; - - if (ipd == ipipe_root_domain) { + if (__ipipe_root_domain_p) { if (test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status))) return; } diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c index 80447f99c2b..6454babdfaf 100644 --- a/arch/blackfin/kernel/setup.c +++ b/arch/blackfin/kernel/setup.c @@ -1098,7 +1098,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) CPUID, bfin_cpuid()); seq_printf(m, "model name\t: ADSP-%s %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n" - "stepping\t: %d\n", + "stepping\t: %d ", cpu, cclk/1000000, sclk/1000000, #ifdef CONFIG_MPU "mpu on", @@ -1107,7 +1107,16 @@ static int show_cpuinfo(struct seq_file *m, void *v) #endif revid); - seq_printf(m, "cpu MHz\t\t: %lu.%03lu/%lu.%03lu\n", + if (bfin_revid() != bfin_compiled_revid()) { + if (bfin_compiled_revid() == -1) + seq_printf(m, "(Compiled for Rev none)"); + else if (bfin_compiled_revid() == 0xffff) + seq_printf(m, "(Compiled for Rev any)"); + else + seq_printf(m, "(Compiled for Rev %d)", bfin_compiled_revid()); + } + + seq_printf(m, "\ncpu MHz\t\t: %lu.%03lu/%lu.%03lu\n", cclk/1000000, cclk%1000000, sclk/1000000, sclk%1000000); seq_printf(m, "bogomips\t: %lu.%02lu\n" @@ -1172,6 +1181,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) #ifdef __ARCH_SYNC_CORE_DCACHE seq_printf(m, "SMP Dcache Flushes\t: %lu\n\n", cpudata->dcache_invld_count); #endif +#ifdef __ARCH_SYNC_CORE_ICACHE + seq_printf(m, "SMP Icache Flushes\t: %lu\n\n", cpudata->icache_invld_count); +#endif #ifdef CONFIG_BFIN_ICACHE_LOCK switch ((cpudata->imemctl >> 3) & WAYALL_L) { case WAY0_L: diff --git a/arch/blackfin/kernel/stacktrace.c b/arch/blackfin/kernel/stacktrace.c new file mode 100644 index 00000000000..30301e1eace --- /dev/null +++ b/arch/blackfin/kernel/stacktrace.c @@ -0,0 +1,53 @@ +/* + * Blackfin stacktrace code (mostly copied from avr32) + * + * Copyright 2009 Analog Devices Inc. + * Licensed under the GPL-2 or later. + */ + +#include <linux/sched.h> +#include <linux/stacktrace.h> +#include <linux/thread_info.h> +#include <linux/module.h> + +register unsigned long current_frame_pointer asm("FP"); + +struct stackframe { + unsigned long fp; + unsigned long rets; +}; + +/* + * Save stack-backtrace addresses into a stack_trace buffer. + */ +void save_stack_trace(struct stack_trace *trace) +{ + unsigned long low, high; + unsigned long fp; + struct stackframe *frame; + int skip = trace->skip; + + low = (unsigned long)task_stack_page(current); + high = low + THREAD_SIZE; + fp = current_frame_pointer; + + while (fp >= low && fp <= (high - sizeof(*frame))) { + frame = (struct stackframe *)fp; + + if (skip) { + skip--; + } else { + trace->entries[trace->nr_entries++] = frame->rets; + if (trace->nr_entries >= trace->max_entries) + break; + } + + /* + * The next frame must be at a higher address than the + * current frame. + */ + low = fp + sizeof(*frame); + fp = frame->fp; + } +} +EXPORT_SYMBOL_GPL(save_stack_trace); diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c index aa76dfb0226..d279552fe9b 100644 --- a/arch/blackfin/kernel/traps.c +++ b/arch/blackfin/kernel/traps.c @@ -27,6 +27,7 @@ * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ +#include <linux/bug.h> #include <linux/uaccess.h> #include <linux/interrupt.h> #include <linux/module.h> @@ -238,6 +239,11 @@ asmlinkage void double_fault_c(struct pt_regs *fp) } +static int kernel_mode_regs(struct pt_regs *regs) +{ + return regs->ipend & 0xffc0; +} + asmlinkage void trap_c(struct pt_regs *fp) { #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON @@ -246,6 +252,7 @@ asmlinkage void trap_c(struct pt_regs *fp) #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO unsigned int cpu = smp_processor_id(); #endif + const char *strerror = NULL; int sig = 0; siginfo_t info; unsigned long trapnr = fp->seqstat & SEQSTAT_EXCAUSE; @@ -259,27 +266,10 @@ asmlinkage void trap_c(struct pt_regs *fp) * double faults if the stack has become corrupt */ - /* If the fault was caused by a kernel thread, or interrupt handler - * we will kernel panic, so the system reboots. - * If KGDB is enabled, don't set this for kernel breakpoints - */ - - /* TODO: check to see if we are in some sort of deferred HWERR - * that we should be able to recover from, not kernel panic - */ - if ((bfin_read_IPEND() & 0xFFC0) && (trapnr != VEC_STEP) -#ifdef CONFIG_KGDB - && (trapnr != VEC_EXCPT02) +#ifndef CONFIG_KGDB + /* IPEND is skipped if KGDB isn't enabled (see entry code) */ + fp->ipend = bfin_read_IPEND(); #endif - ){ - console_verbose(); - oops_in_progress = 1; - } else if (current) { - if (current->mm == NULL) { - console_verbose(); - oops_in_progress = 1; - } - } /* trap_c() will be called for exceptions. During exceptions * processing, the pc value should be set with retx value. @@ -307,15 +297,15 @@ asmlinkage void trap_c(struct pt_regs *fp) sig = SIGTRAP; CHK_DEBUGGER_TRAP_MAYBE(); /* Check if this is a breakpoint in kernel space */ - if (fp->ipend & 0xffc0) - return; + if (kernel_mode_regs(fp)) + goto traps_done; else break; /* 0x03 - User Defined, userspace stack overflow */ case VEC_EXCPT03: info.si_code = SEGV_STACKFLOW; sig = SIGSEGV; - verbose_printk(KERN_NOTICE EXC_0x03(KERN_NOTICE)); + strerror = KERN_NOTICE EXC_0x03(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x02 - KGDB initial connection and break signal trap */ @@ -324,7 +314,7 @@ asmlinkage void trap_c(struct pt_regs *fp) info.si_code = TRAP_ILLTRAP; sig = SIGTRAP; CHK_DEBUGGER_TRAP(); - return; + goto traps_done; #endif /* 0x04 - User Defined */ /* 0x05 - User Defined */ @@ -344,7 +334,7 @@ asmlinkage void trap_c(struct pt_regs *fp) case VEC_EXCPT04 ... VEC_EXCPT15: info.si_code = ILL_ILLPARAOP; sig = SIGILL; - verbose_printk(KERN_NOTICE EXC_0x04(KERN_NOTICE)); + strerror = KERN_NOTICE EXC_0x04(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x10 HW Single step, handled here */ @@ -353,15 +343,15 @@ asmlinkage void trap_c(struct pt_regs *fp) sig = SIGTRAP; CHK_DEBUGGER_TRAP_MAYBE(); /* Check if this is a single step in kernel space */ - if (fp->ipend & 0xffc0) - return; + if (kernel_mode_regs(fp)) + goto traps_done; else break; /* 0x11 - Trace Buffer Full, handled here */ case VEC_OVFLOW: info.si_code = TRAP_TRACEFLOW; sig = SIGTRAP; - verbose_printk(KERN_NOTICE EXC_0x11(KERN_NOTICE)); + strerror = KERN_NOTICE EXC_0x11(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x12 - Reserved, Caught by default */ @@ -381,37 +371,54 @@ asmlinkage void trap_c(struct pt_regs *fp) /* 0x20 - Reserved, Caught by default */ /* 0x21 - Undefined Instruction, handled here */ case VEC_UNDEF_I: +#ifdef CONFIG_BUG + if (kernel_mode_regs(fp)) { + switch (report_bug(fp->pc, fp)) { + case BUG_TRAP_TYPE_NONE: + break; + case BUG_TRAP_TYPE_WARN: + dump_bfin_trace_buffer(); + fp->pc += 2; + goto traps_done; + case BUG_TRAP_TYPE_BUG: + /* call to panic() will dump trace, and it is + * off at this point, so it won't be clobbered + */ + panic("BUG()"); + } + } +#endif info.si_code = ILL_ILLOPC; sig = SIGILL; - verbose_printk(KERN_NOTICE EXC_0x21(KERN_NOTICE)); + strerror = KERN_NOTICE EXC_0x21(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x22 - Illegal Instruction Combination, handled here */ case VEC_ILGAL_I: info.si_code = ILL_ILLPARAOP; sig = SIGILL; - verbose_printk(KERN_NOTICE EXC_0x22(KERN_NOTICE)); + strerror = KERN_NOTICE EXC_0x22(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x23 - Data CPLB protection violation, handled here */ case VEC_CPLB_VL: info.si_code = ILL_CPLB_VI; sig = SIGBUS; - verbose_printk(KERN_NOTICE EXC_0x23(KERN_NOTICE)); + strerror = KERN_NOTICE EXC_0x23(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x24 - Data access misaligned, handled here */ case VEC_MISALI_D: info.si_code = BUS_ADRALN; sig = SIGBUS; - verbose_printk(KERN_NOTICE EXC_0x24(KERN_NOTICE)); + strerror = KERN_NOTICE EXC_0x24(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x25 - Unrecoverable Event, handled here */ case VEC_UNCOV: info.si_code = ILL_ILLEXCPT; sig = SIGILL; - verbose_printk(KERN_NOTICE EXC_0x25(KERN_NOTICE)); + strerror = KERN_NOTICE EXC_0x25(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x26 - Data CPLB Miss, normal case is handled in _cplb_hdr, @@ -419,7 +426,7 @@ asmlinkage void trap_c(struct pt_regs *fp) case VEC_CPLB_M: info.si_code = BUS_ADRALN; sig = SIGBUS; - verbose_printk(KERN_NOTICE EXC_0x26(KERN_NOTICE)); + strerror = KERN_NOTICE EXC_0x26(KERN_NOTICE); break; /* 0x27 - Data CPLB Multiple Hits - Linux Trap Zero, handled here */ case VEC_CPLB_MHIT: @@ -427,10 +434,10 @@ asmlinkage void trap_c(struct pt_regs *fp) sig = SIGSEGV; #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO if (cpu_pda[cpu].dcplb_fault_addr < FIXED_CODE_START) - verbose_printk(KERN_NOTICE "NULL pointer access\n"); + strerror = KERN_NOTICE "NULL pointer access\n"; else #endif - verbose_printk(KERN_NOTICE EXC_0x27(KERN_NOTICE)); + strerror = KERN_NOTICE EXC_0x27(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x28 - Emulation Watchpoint, handled here */ @@ -440,8 +447,8 @@ asmlinkage void trap_c(struct pt_regs *fp) pr_debug(EXC_0x28(KERN_DEBUG)); CHK_DEBUGGER_TRAP_MAYBE(); /* Check if this is a watchpoint in kernel space */ - if (fp->ipend & 0xffc0) - return; + if (kernel_mode_regs(fp)) + goto traps_done; else break; #ifdef CONFIG_BF535 @@ -449,7 +456,7 @@ asmlinkage void trap_c(struct pt_regs *fp) case VEC_ISTRU_VL: /* ADSP-BF535 only (MH) */ info.si_code = BUS_OPFETCH; sig = SIGBUS; - verbose_printk(KERN_NOTICE "BF535: VEC_ISTRU_VL\n"); + strerror = KERN_NOTICE "BF535: VEC_ISTRU_VL\n"; CHK_DEBUGGER_TRAP_MAYBE(); break; #else @@ -459,21 +466,21 @@ asmlinkage void trap_c(struct pt_regs *fp) case VEC_MISALI_I: info.si_code = BUS_ADRALN; sig = SIGBUS; - verbose_printk(KERN_NOTICE EXC_0x2A(KERN_NOTICE)); + strerror = KERN_NOTICE EXC_0x2A(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x2B - Instruction CPLB protection violation, handled here */ case VEC_CPLB_I_VL: info.si_code = ILL_CPLB_VI; sig = SIGBUS; - verbose_printk(KERN_NOTICE EXC_0x2B(KERN_NOTICE)); + strerror = KERN_NOTICE EXC_0x2B(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x2C - Instruction CPLB miss, handled in _cplb_hdr */ case VEC_CPLB_I_M: info.si_code = ILL_CPLB_MISS; sig = SIGBUS; - verbose_printk(KERN_NOTICE EXC_0x2C(KERN_NOTICE)); + strerror = KERN_NOTICE EXC_0x2C(KERN_NOTICE); break; /* 0x2D - Instruction CPLB Multiple Hits, handled here */ case VEC_CPLB_I_MHIT: @@ -481,17 +488,17 @@ asmlinkage void trap_c(struct pt_regs *fp) sig = SIGSEGV; #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO if (cpu_pda[cpu].icplb_fault_addr < FIXED_CODE_START) - verbose_printk(KERN_NOTICE "Jump to NULL address\n"); + strerror = KERN_NOTICE "Jump to NULL address\n"; else #endif - verbose_printk(KERN_NOTICE EXC_0x2D(KERN_NOTICE)); + strerror = KERN_NOTICE EXC_0x2D(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x2E - Illegal use of Supervisor Resource, handled here */ case VEC_ILL_RES: info.si_code = ILL_PRVOPC; sig = SIGILL; - verbose_printk(KERN_NOTICE EXC_0x2E(KERN_NOTICE)); + strerror = KERN_NOTICE EXC_0x2E(KERN_NOTICE); CHK_DEBUGGER_TRAP_MAYBE(); break; /* 0x2F - Reserved, Caught by default */ @@ -519,17 +526,17 @@ asmlinkage void trap_c(struct pt_regs *fp) case (SEQSTAT_HWERRCAUSE_SYSTEM_MMR): info.si_code = BUS_ADRALN; sig = SIGBUS; - verbose_printk(KERN_NOTICE HWC_x2(KERN_NOTICE)); + strerror = KERN_NOTICE HWC_x2(KERN_NOTICE); break; /* External Memory Addressing Error */ case (SEQSTAT_HWERRCAUSE_EXTERN_ADDR): info.si_code = BUS_ADRERR; sig = SIGBUS; - verbose_printk(KERN_NOTICE HWC_x3(KERN_NOTICE)); + strerror = KERN_NOTICE HWC_x3(KERN_NOTICE); break; /* Performance Monitor Overflow */ case (SEQSTAT_HWERRCAUSE_PERF_FLOW): - verbose_printk(KERN_NOTICE HWC_x12(KERN_NOTICE)); + strerror = KERN_NOTICE HWC_x12(KERN_NOTICE); break; /* RAISE 5 instruction */ case (SEQSTAT_HWERRCAUSE_RAISE_5): @@ -546,7 +553,6 @@ asmlinkage void trap_c(struct pt_regs *fp) * if we get here we hit a reserved one, so panic */ default: - oops_in_progress = 1; info.si_code = ILL_ILLPARAOP; sig = SIGILL; verbose_printk(KERN_EMERG "Caught Unhandled Exception, code = %08lx\n", @@ -557,6 +563,16 @@ asmlinkage void trap_c(struct pt_regs *fp) BUG_ON(sig == 0); + /* If the fault was caused by a kernel thread, or interrupt handler + * we will kernel panic, so the system reboots. + */ + if (kernel_mode_regs(fp) || (current && !current->mm)) { + console_verbose(); + oops_in_progress = 1; + if (strerror) + verbose_printk(strerror); + } + if (sig != SIGTRAP) { dump_bfin_process(fp); dump_bfin_mem(fp); @@ -606,8 +622,8 @@ asmlinkage void trap_c(struct pt_regs *fp) if (ANOMALY_05000461 && trapnr == VEC_HWERR && !access_ok(VERIFY_READ, fp->pc, 8)) fp->pc = SAFE_USER_INSTRUCTION; + traps_done: trace_buffer_restore(j); - return; } /* Typical exception handling routines */ @@ -792,6 +808,18 @@ void dump_bfin_trace_buffer(void) } EXPORT_SYMBOL(dump_bfin_trace_buffer); +#ifdef CONFIG_BUG +int is_valid_bugaddr(unsigned long addr) +{ + unsigned short opcode; + + if (!get_instruction(&opcode, (unsigned short *)addr)) + return 0; + + return opcode == BFIN_BUG_OPCODE; +} +#endif + /* * Checks to see if the address pointed to is either a * 16-bit CALL instruction, or a 32-bit CALL instruction diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S index 8b67167cb4f..6ac307ca0d8 100644 --- a/arch/blackfin/kernel/vmlinux.lds.S +++ b/arch/blackfin/kernel/vmlinux.lds.S @@ -54,6 +54,7 @@ SECTIONS SCHED_TEXT #endif LOCK_TEXT + IRQENTRY_TEXT KPROBES_TEXT *(.text.*) *(.fixup) @@ -166,6 +167,20 @@ SECTIONS } PERCPU(4) SECURITY_INIT + + /* we have to discard exit text and such at runtime, not link time, to + * handle embedded cross-section references (alt instructions, bug + * table, eh_frame, etc...) + */ + .exit.text : + { + EXIT_TEXT + } + .exit.data : + { + EXIT_DATA + } + .init.ramfs : { . = ALIGN(4); @@ -264,8 +279,6 @@ SECTIONS /DISCARD/ : { - EXIT_TEXT - EXIT_DATA *(.exitcall.exit) } } diff --git a/arch/blackfin/lib/checksum.c b/arch/blackfin/lib/checksum.c index 762a7f02970..cd605e7d851 100644 --- a/arch/blackfin/lib/checksum.c +++ b/arch/blackfin/lib/checksum.c @@ -116,6 +116,7 @@ __sum16 ip_compute_csum(const void *buff, int len) { return (__force __sum16)~do_csum(buff, len); } +EXPORT_SYMBOL(ip_compute_csum); /* * copy from fs while checksumming, otherwise like csum_partial @@ -130,6 +131,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst, memcpy(dst, (__force void *)src, len); return csum_partial(dst, len, sum); } +EXPORT_SYMBOL(csum_partial_copy_from_user); /* * copy from ds while checksumming, otherwise like csum_partial diff --git a/arch/blackfin/mach-bf518/boards/ezbrd.c b/arch/blackfin/mach-bf518/boards/ezbrd.c index 62bba09bcce..1382f038235 100644 --- a/arch/blackfin/mach-bf518/boards/ezbrd.c +++ b/arch/blackfin/mach-bf518/boards/ezbrd.c @@ -246,7 +246,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = { .modalias = "m25p80", /* Name of spi_driver for this device */ .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, /* Framework bus number */ - .chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/ + .chip_select = 2, /* On BF518F-EZBRD it's SPI0_SSEL2 */ .platform_data = &bfin_spi_flash_data, .controller_data = &spi_flash_chip_info, .mode = SPI_MODE_3, @@ -369,6 +369,11 @@ static struct resource bfin_spi0_resource[] = { [1] = { .start = CH_SPI0, .end = CH_SPI0, + .flags = IORESOURCE_DMA, + }, + [2] = { + .start = IRQ_SPI0, + .end = IRQ_SPI0, .flags = IORESOURCE_IRQ, }, }; @@ -399,6 +404,11 @@ static struct resource bfin_spi1_resource[] = { [1] = { .start = CH_SPI1, .end = CH_SPI1, + .flags = IORESOURCE_DMA, + }, + [2] = { + .start = IRQ_SPI1, + .end = IRQ_SPI1, .flags = IORESOURCE_IRQ, }, }; diff --git a/arch/blackfin/mach-bf527/boards/cm_bf527.c b/arch/blackfin/mach-bf527/boards/cm_bf527.c index 6d6f9effa0b..1eaf27ff722 100644 --- a/arch/blackfin/mach-bf527/boards/cm_bf527.c +++ b/arch/blackfin/mach-bf527/boards/cm_bf527.c @@ -664,6 +664,11 @@ static struct resource bfin_spi0_resource[] = { [1] = { .start = CH_SPI, .end = CH_SPI, + .flags = IORESOURCE_DMA, + }, + [2] = { + .start = IRQ_SPI, + .end = IRQ_SPI, .flags = IORESOURCE_IRQ, }, }; diff --git a/arch/blackfin/mach-bf527/boards/ezbrd.c b/arch/blackfin/mach-bf527/boards/ezbrd.c index 1435c5d38cd..9f9c0005dcf 100644 --- a/arch/blackfin/mach-bf527/boards/ezbrd.c +++ b/arch/blackfin/mach-bf527/boards/ezbrd.c @@ -467,6 +467,11 @@ static struct resource bfin_spi0_resource[] = { [1] = { .start = CH_SPI, .end = CH_SPI, + .flags = IORESOURCE_DMA, + }, + [2] = { + .start = IRQ_SPI, + .end = IRQ_SPI, .flags = IORESOURCE_IRQ, }, }; diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c index 147edd1eb1a..3e5b7db6b06 100644 --- a/arch/blackfin/mach-bf527/boards/ezkit.c +++ b/arch/blackfin/mach-bf527/boards/ezkit.c @@ -723,6 +723,11 @@ static struct resource bfin_spi0_resource[] = { [1] = { .start = CH_SPI, .end = CH_SPI, + .flags = IORESOURCE_DMA, + }, + [2] = { + .start = IRQ_SPI, + .end = IRQ_SPI, .flags = IORESOURCE_IRQ, }, }; diff --git a/arch/blackfin/mach-bf533/boards/H8606.c b/arch/blackfin/mach-bf533/boards/H8606.c index 895f213ea45..38cf8ffd6d7 100644 --- a/arch/blackfin/mach-bf533/boards/H8606.c +++ b/arch/blackfin/mach-bf533/boards/H8606.c @@ -266,6 +266,11 @@ static struct resource bfin_spi0_resource[] = { [1] = { .start = CH_SPI, .end = CH_SPI, + .flags = IORESOURCE_DMA, + }, + [2] = { + .start = IRQ_SPI, + .end = IRQ_SPI, .flags = IORESOURCE_IRQ, } }; diff --git a/arch/blackfin/mach-bf533/boards/blackstamp.c b/arch/blackfin/mach-bf533/boards/blackstamp.c index 0765872a8ad..9ecdc361fa6 100644 --- a/arch/blackfin/mach-bf533/boards/blackstamp.c +++ b/arch/blackfin/mach-bf533/boards/blackstamp.c @@ -162,6 +162,11 @@ static struct resource bfin_spi0_resource[] = { [1] = { .start = CH_SPI, .end = CH_SPI, + .flags = IORESOURCE_DMA, + }, + [2] = { + .start = IRQ_SPI, + .end = IRQ_SPI, .flags = IORESOURCE_IRQ, } }; diff --git a/arch/blackfin/mach-bf533/boards/cm_bf533.c b/arch/blackfin/mach-bf533/boards/cm_bf533.c index a727e538fa2..1443e92d8b6 100644 --- a/arch/blackfin/mach-bf533/boards/cm_bf533.c +++ b/arch/blackfin/mach-bf533/boards/cm_bf533.c @@ -160,6 +160,11 @@ static struct resource bfin_spi0_resource[] = { [1] = { .start = CH_SPI, .end = CH_SPI, + .flags = IORESOURCE_DMA, + }, + [2] = { + .start = IRQ_SPI, + .end = IRQ_SPI, .flags = IORESOURCE_IRQ, } }; diff --git a/arch/blackfin/mach-bf533/boards/ezkit.c b/arch/blackfin/mach-bf533/boards/ezkit.c index 842f1c9c239..89a5ec4ca04 100644 --- a/arch/blackfin/mach-bf533/boards/ezkit.c +++ b/arch/blackfin/mach-bf533/boards/ezkit.c @@ -196,6 +196,11 @@ static struct resource bfin_spi0_resource[] = { [1] = { .start = CH_SPI, .end = CH_SPI, + .flags = IORESOURCE_DMA, + }, + [2] = { + .start = IRQ_SPI, + .end = IRQ_SPI, .flags = IORESOURCE_IRQ, } }; diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c index e19c565ade1..a68ade8a3ca 100644 --- a/arch/blackfin/mach-bf533/boards/stamp.c +++ b/arch/blackfin/mach-bf533/boards/stamp.c @@ -299,6 +299,11 @@ static struct resource bfin_spi0_resource[] = { [1] = { .start = CH_SPI, .end = CH_SPI, + .flags = IORESOURCE_DMA, + }, + [2] = { + .start = IRQ_SPI, + .end = IRQ_SPI, .flags = IORESOURCE_IRQ, } }; diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537.c b/arch/blackfin/mach-bf537/boards/cm_bf537.c index 4fee1967312..2a87d1cfcd0 100644 --- a/arch/blackfin/mach-bf537/boards/cm_bf537.c +++ b/arch/blackfin/mach-bf537/boards/cm_bf537.c @@ -182,8 +182,13 @@ static struct resource bfin_spi0_resource[] = { [1] = { .start = CH_SPI, .end = CH_SPI, + .flags = IORESOURCE_DMA, + }, + [2] = { + .start = IRQ_SPI, + .end = IRQ_SPI, .flags = IORESOURCE_IRQ, - } + }, }; /* SPI controller data */ diff --git a/arch/blackfin/mach-bf537/boards/minotaur.c b/arch/blackfin/mach-bf537/boards/minotaur.c index 3c159819e55..399f81da7b9 100644 --- a/arch/blackfin/mach-bf537/boards/minotaur.c +++ b/arch/blackfin/mach-bf537/boards/minotaur.c @@ -184,6 +184,11 @@ static struct resource bfin_spi0_resource[] = { [1] = { .start = CH_SPI, .end = CH_SPI, + .flags = IORESOURCE_DMA, + }, + [2] = { + .start = IRQ_SPI, + .end = IRQ_SPI, .flags = IORESOURCE_IRQ, }, }; diff --git a/arch/blackfin/mach-bf537/boards/pnav10.c b/arch/blackfin/mach-bf537/boards/pnav10.c index 26707ce39f2..838240f151f 100644 --- a/arch/blackfin/mach-bf537/boards/pnav10.c +++ b/arch/blackfin/mach-bf537/boards/pnav10.c @@ -398,8 +398,13 @@ static struct resource bfin_spi0_resource[] = { [1] = { .start = CH_SPI, .end = CH_SPI, + .flags = IORESOURCE_DMA, + }, + [2] = { + .start = IRQ_SPI, + .end = IRQ_SPI, .flags = IORESOURCE_IRQ, - } + }, }; /* SPI controller data */ diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c index dfb5036f8a6..ff7228caa7d 100644 --- a/arch/blackfin/mach-bf537/boards/stamp.c +++ b/arch/blackfin/mach-bf537/boards/stamp.c @@ -1345,7 +1345,7 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = { #if defined(CONFIG_PMIC_ADP5520) || defined(CONFIG_PMIC_ADP5520_MODULE) { I2C_BOARD_INFO("pmic-adp5520", 0x32), - .irq = IRQ_PF7, + .irq = IRQ_PG0, .platform_data = (void *)&adp5520_pdev_data, }, #endif diff --git a/arch/blackfin/mach-bf537/boards/tcm_bf537.c b/arch/blackfin/mach-bf537/boards/tcm_bf537.c index 28057459120..e523e6e610d 100644 --- a/arch/blackfin/mach-bf537/boards/tcm_bf537.c +++ b/arch/blackfin/mach-bf537/boards/tcm_bf537.c @@ -182,6 +182,11 @@ static struct resource bfin_spi0_resource[] = { [1] = { .start = CH_SPI, .end = CH_SPI, + .flags = IORESOURCE_DMA, + }, + [2] = { + .start = IRQ_SPI, + .end = IRQ_SPI, .flags = IORESOURCE_IRQ, } }; diff --git a/arch/blackfin/mach-bf538/boards/ezkit.c b/arch/blackfin/mach-bf538/boards/ezkit.c index e37cb937888..57695b4c3c0 100644 --- a/arch/blackfin/mach-bf538/boards/ezkit.c +++ b/arch/blackfin/mach-bf538/boards/ezkit.c @@ -352,6 +352,11 @@ static struct resource bfin_spi0_resource[] = { [1] = { .start = CH_SPI0, .end = CH_SPI0, + .flags = IORESOURCE_DMA, + }, + [2] = { + .start = IRQ_SPI0, + .end = IRQ_SPI0, .flags = IORESOURCE_IRQ, } }; @@ -366,6 +371,11 @@ static struct resource bfin_spi1_resource[] = { [1] = { .start = CH_SPI1, .end = CH_SPI1, + .flags = IORESOURCE_DMA, + }, + [2] = { + .start = IRQ_SPI1, + .end = IRQ_SPI1, .flags = IORESOURCE_IRQ, } }; diff --git a/arch/blackfin/mach-bf548/boards/cm_bf548.c b/arch/blackfin/mach-bf548/boards/cm_bf548.c index f53ad682530..f5a3c30a41b 100644 --- a/arch/blackfin/mach-bf548/boards/cm_bf548.c +++ b/arch/blackfin/mach-bf548/boards/cm_bf548.c @@ -612,6 +612,11 @@ static struct resource bfin_spi0_resource[] = { [1] = { .start = CH_SPI0, .end = CH_SPI0, + .flags = IORESOURCE_DMA, + }, + [2] = { + .start = IRQ_SPI0, + .end = IRQ_SPI0, .flags = IORESOURCE_IRQ, } }; @@ -626,6 +631,11 @@ static struct resource bfin_spi1_resource[] = { [1] = { .start = CH_SPI1, .end = CH_SPI1, + .flags = IORESOURCE_DMA, + }, + [2] = { + .start = IRQ_SPI1, + .end = IRQ_SPI1, .flags = IORESOURCE_IRQ, } }; diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c index add5a17452c..805a57b5e65 100644 --- a/arch/blackfin/mach-bf548/boards/ezkit.c +++ b/arch/blackfin/mach-bf548/boards/ezkit.c @@ -396,6 +396,8 @@ static struct platform_device bfin_sir3_device = { #endif #if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) +#include <linux/smsc911x.h> + static struct resource smsc911x_resources[] = { { .name = "smsc911x-memory", @@ -409,11 +411,22 @@ static struct resource smsc911x_resources[] = { .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL, }, }; + +static struct smsc911x_platform_config smsc911x_config = { + .flags = SMSC911X_USE_32BIT, + .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, + .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN, + .phy_interface = PHY_INTERFACE_MODE_MII, +}; + static struct platform_device smsc911x_device = { .name = "smsc911x", .id = 0, .num_resources = ARRAY_SIZE(smsc911x_resources), .resource = smsc911x_resources, + .dev = { + .platform_data = &smsc911x_config, + }, }; #endif @@ -741,6 +754,11 @@ static struct resource bfin_spi0_resource[] = { [1] = { .start = CH_SPI0, .end = CH_SPI0, + .flags = IORESOURCE_DMA, + }, + [2] = { + .start = IRQ_SPI0, + .end = IRQ_SPI0, .flags = IORESOURCE_IRQ, } }; @@ -755,6 +773,11 @@ static struct resource bfin_spi1_resource[] = { [1] = { .start = CH_SPI1, .end = CH_SPI1, + .flags = IORESOURCE_DMA, + }, + [2] = { + .start = IRQ_SPI1, + .end = IRQ_SPI1, .flags = IORESOURCE_IRQ, } }; diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c index 0dd9685e5d5..0c9d72c5f5b 100644 --- a/arch/blackfin/mach-bf561/boards/cm_bf561.c +++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c @@ -177,8 +177,13 @@ static struct resource bfin_spi0_resource[] = { [1] = { .start = CH_SPI, .end = CH_SPI, + .flags = IORESOURCE_DMA, + }, + [2] = { + .start = IRQ_SPI, + .end = IRQ_SPI, .flags = IORESOURCE_IRQ, - } + }, }; /* SPI controller data */ diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c index 0e2178a1aec..b5ef7ff7b7b 100644 --- a/arch/blackfin/mach-bf561/boards/ezkit.c +++ b/arch/blackfin/mach-bf561/boards/ezkit.c @@ -304,6 +304,11 @@ static struct resource bfin_spi0_resource[] = { [1] = { .start = CH_SPI, .end = CH_SPI, + .flags = IORESOURCE_DMA, + }, + [2] = { + .start = IRQ_SPI, + .end = IRQ_SPI, .flags = IORESOURCE_IRQ, } }; diff --git a/arch/blackfin/mach-common/cache-c.c b/arch/blackfin/mach-common/cache-c.c index e6ab1f81512..b59ce3cb380 100644 --- a/arch/blackfin/mach-common/cache-c.c +++ b/arch/blackfin/mach-common/cache-c.c @@ -16,9 +16,21 @@ void blackfin_invalidate_entire_dcache(void) { u32 dmem = bfin_read_DMEM_CONTROL(); - SSYNC(); bfin_write_DMEM_CONTROL(dmem & ~0xc); SSYNC(); bfin_write_DMEM_CONTROL(dmem); SSYNC(); } + +/* Invalidate the Entire Instruction cache by + * clearing IMC bit + */ +void blackfin_invalidate_entire_icache(void) +{ + u32 imem = bfin_read_IMEM_CONTROL(); + bfin_write_IMEM_CONTROL(imem & ~0x4); + SSYNC(); + bfin_write_IMEM_CONTROL(imem); + SSYNC(); +} + diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S index da0558ad1b1..31fa313e81c 100644 --- a/arch/blackfin/mach-common/entry.S +++ b/arch/blackfin/mach-common/entry.S @@ -42,6 +42,7 @@ #include <asm/thread_info.h> /* TIF_NEED_RESCHED */ #include <asm/asm-offsets.h> #include <asm/trace.h> +#include <asm/traps.h> #include <asm/context.S> @@ -84,13 +85,15 @@ ENTRY(_ex_workaround_261) if !cc jump _bfin_return_from_exception; /* fall through */ R7 = P4; - R6 = 0x26; /* Data CPLB Miss */ + R6 = VEC_CPLB_M; /* Data CPLB Miss */ cc = R6 == R7; if cc jump _ex_dcplb_miss (BP); - R6 = 0x23; /* Data CPLB Miss */ +#ifdef CONFIG_MPU + R6 = VEC_CPLB_VL; /* Data CPLB Violation */ cc = R6 == R7; if cc jump _ex_dcplb_viol (BP); - /* Handle 0x23 Data CPLB Protection Violation +#endif + /* Handle Data CPLB Protection Violation * and Data CPLB Multiple Hits - Linux Trap Zero */ jump _ex_trap_c; @@ -270,7 +273,7 @@ ENTRY(_bfin_return_from_exception) r6.l = lo(SEQSTAT_EXCAUSE); r6.h = hi(SEQSTAT_EXCAUSE); r7 = r7 & r6; - r6 = 0x25; + r6 = VEC_UNCOV; CC = R7 == R6; if CC JUMP _double_fault; #endif @@ -1605,6 +1608,7 @@ ENTRY(_sys_call_table) .long _sys_inotify_init1 /* 365 */ .long _sys_preadv .long _sys_pwritev + .long _sys_rt_tgsigqueueinfo .rept NR_syscalls-(.-_sys_call_table)/4 .long _sys_ni_syscall diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c index 3b8ebaee77f..61840059dfa 100644 --- a/arch/blackfin/mach-common/smp.c +++ b/arch/blackfin/mach-common/smp.c @@ -144,7 +144,7 @@ static void ipi_call_function(unsigned int cpu, struct ipi_message *msg) static irqreturn_t ipi_handler(int irq, void *dev_instance) { - struct ipi_message *msg, *mg; + struct ipi_message *msg; struct ipi_message_queue *msg_queue; unsigned int cpu = smp_processor_id(); @@ -154,7 +154,8 @@ static irqreturn_t ipi_handler(int irq, void *dev_instance) msg_queue->count++; spin_lock(&msg_queue->lock); - list_for_each_entry_safe(msg, mg, &msg_queue->head, list) { + while (!list_empty(&msg_queue->head)) { + msg = list_entry(msg_queue->head.next, typeof(*msg), list); list_del(&msg->list); switch (msg->type) { case BFIN_IPI_RESCHEDULE: @@ -221,7 +222,7 @@ int smp_call_function(void (*func)(void *info), void *info, int wait) for_each_cpu_mask(cpu, callmap) { msg_queue = &per_cpu(ipi_msg_queue, cpu); spin_lock_irqsave(&msg_queue->lock, flags); - list_add(&msg->list, &msg_queue->head); + list_add_tail(&msg->list, &msg_queue->head); spin_unlock_irqrestore(&msg_queue->lock, flags); platform_send_ipi_cpu(cpu); } @@ -261,7 +262,7 @@ int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, msg_queue = &per_cpu(ipi_msg_queue, cpu); spin_lock_irqsave(&msg_queue->lock, flags); - list_add(&msg->list, &msg_queue->head); + list_add_tail(&msg->list, &msg_queue->head); spin_unlock_irqrestore(&msg_queue->lock, flags); platform_send_ipi_cpu(cpu); @@ -292,7 +293,7 @@ void smp_send_reschedule(int cpu) msg_queue = &per_cpu(ipi_msg_queue, cpu); spin_lock_irqsave(&msg_queue->lock, flags); - list_add(&msg->list, &msg_queue->head); + list_add_tail(&msg->list, &msg_queue->head); spin_unlock_irqrestore(&msg_queue->lock, flags); platform_send_ipi_cpu(cpu); @@ -320,7 +321,7 @@ void smp_send_stop(void) for_each_cpu_mask(cpu, callmap) { msg_queue = &per_cpu(ipi_msg_queue, cpu); spin_lock_irqsave(&msg_queue->lock, flags); - list_add(&msg->list, &msg_queue->head); + list_add_tail(&msg->list, &msg_queue->head); spin_unlock_irqrestore(&msg_queue->lock, flags); platform_send_ipi_cpu(cpu); } @@ -468,6 +469,17 @@ void smp_icache_flush_range_others(unsigned long start, unsigned long end) } EXPORT_SYMBOL_GPL(smp_icache_flush_range_others); +#ifdef __ARCH_SYNC_CORE_ICACHE +void resync_core_icache(void) +{ + unsigned int cpu = get_cpu(); + blackfin_invalidate_entire_icache(); + ++per_cpu(cpu_data, cpu).icache_invld_count; + put_cpu(); +} +EXPORT_SYMBOL(resync_core_icache); +#endif + #ifdef __ARCH_SYNC_CORE_DCACHE unsigned long barrier_mask __attribute__ ((__section__(".l2.bss"))); diff --git a/arch/cris/include/asm/kmap_types.h b/arch/cris/include/asm/kmap_types.h index 492988cb907..d2d643c4ea5 100644 --- a/arch/cris/include/asm/kmap_types.h +++ b/arch/cris/include/asm/kmap_types.h @@ -5,21 +5,6 @@ * is actually used on cris. */ -enum km_type { - KM_BOUNCE_READ, - KM_SKB_SUNRPC_DATA, - KM_SKB_DATA_SOFTIRQ, - KM_USER0, - KM_USER1, - KM_BIO_SRC_IRQ, - KM_BIO_DST_IRQ, - KM_PTE0, - KM_PTE1, - KM_IRQ0, - KM_IRQ1, - KM_SOFTIRQ0, - KM_SOFTIRQ1, - KM_TYPE_NR -}; +#include <asm-generic/kmap_types.h> #endif diff --git a/arch/cris/kernel/process.c b/arch/cris/kernel/process.c index 4df0b320d52..51dcd04d277 100644 --- a/arch/cris/kernel/process.c +++ b/arch/cris/kernel/process.c @@ -38,10 +38,6 @@ static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); -struct mm_struct init_mm = INIT_MM(init_mm); - -EXPORT_SYMBOL(init_mm); - /* * Initial thread structure. * diff --git a/arch/frv/kernel/init_task.c b/arch/frv/kernel/init_task.c index 29429a8b7f6..1d3df1d9495 100644 --- a/arch/frv/kernel/init_task.c +++ b/arch/frv/kernel/init_task.c @@ -12,10 +12,6 @@ static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); -struct mm_struct init_mm = INIT_MM(init_mm); - -EXPORT_SYMBOL(init_mm); - /* * Initial thread structure. * diff --git a/arch/h8300/include/asm/kmap_types.h b/arch/h8300/include/asm/kmap_types.h index 1ec8a342712..be12a716011 100644 --- a/arch/h8300/include/asm/kmap_types.h +++ b/arch/h8300/include/asm/kmap_types.h @@ -1,21 +1,6 @@ #ifndef _ASM_H8300_KMAP_TYPES_H #define _ASM_H8300_KMAP_TYPES_H -enum km_type { - KM_BOUNCE_READ, - KM_SKB_SUNRPC_DATA, - KM_SKB_DATA_SOFTIRQ, - KM_USER0, - KM_USER1, - KM_BIO_SRC_IRQ, - KM_BIO_DST_IRQ, - KM_PTE0, - KM_PTE1, - KM_IRQ0, - KM_IRQ1, - KM_SOFTIRQ0, - KM_SOFTIRQ1, - KM_TYPE_NR -}; +#include <asm-generic/kmap_types.h> #endif diff --git a/arch/h8300/kernel/init_task.c b/arch/h8300/kernel/init_task.c index cb5dc552da9..089c65ed6eb 100644 --- a/arch/h8300/kernel/init_task.c +++ b/arch/h8300/kernel/init_task.c @@ -14,10 +14,6 @@ static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); -struct mm_struct init_mm = INIT_MM(init_mm); - -EXPORT_SYMBOL(init_mm); - /* * Initial task structure. * diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index 56ceb68eb99..fe63b2dc9d0 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -1131,7 +1131,7 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp #ifdef CONFIG_NUMA { struct page *page; - page = alloc_pages_node(ioc->node == MAX_NUMNODES ? + page = alloc_pages_exact_node(ioc->node == MAX_NUMNODES ? numa_node_id() : ioc->node, flags, get_order(size)); diff --git a/arch/ia64/include/asm/kmap_types.h b/arch/ia64/include/asm/kmap_types.h index 5d1658aa2b3..05d5f999610 100644 --- a/arch/ia64/include/asm/kmap_types.h +++ b/arch/ia64/include/asm/kmap_types.h @@ -1,30 +1,12 @@ #ifndef _ASM_IA64_KMAP_TYPES_H #define _ASM_IA64_KMAP_TYPES_H - #ifdef CONFIG_DEBUG_HIGHMEM -# define D(n) __KM_FENCE_##n , -#else -# define D(n) +#define __WITH_KM_FENCE #endif -enum km_type { -D(0) KM_BOUNCE_READ, -D(1) KM_SKB_SUNRPC_DATA, -D(2) KM_SKB_DATA_SOFTIRQ, -D(3) KM_USER0, -D(4) KM_USER1, -D(5) KM_BIO_SRC_IRQ, -D(6) KM_BIO_DST_IRQ, -D(7) KM_PTE0, -D(8) KM_PTE1, -D(9) KM_IRQ0, -D(10) KM_IRQ1, -D(11) KM_SOFTIRQ0, -D(12) KM_SOFTIRQ1, -D(13) KM_TYPE_NR -}; +#include <asm-generic/kmap_types.h> -#undef D +#undef __WITH_KM_FENCE #endif /* _ASM_IA64_KMAP_TYPES_H */ diff --git a/arch/ia64/kernel/init_task.c b/arch/ia64/kernel/init_task.c index 5b0e830c6f3..c475fc281be 100644 --- a/arch/ia64/kernel/init_task.c +++ b/arch/ia64/kernel/init_task.c @@ -19,10 +19,6 @@ static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); -struct mm_struct init_mm = INIT_MM(init_mm); - -EXPORT_SYMBOL(init_mm); - /* * Initial task structure. * diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 8f33a884042..5b17bd40227 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -1829,8 +1829,7 @@ ia64_mca_cpu_init(void *cpu_data) data = mca_bootmem(); first_time = 0; } else - data = page_address(alloc_pages_node(numa_node_id(), - GFP_KERNEL, get_order(sz))); + data = __get_free_pages(GFP_KERNEL, get_order(sz)); if (!data) panic("Could not allocate MCA memory for cpu %d\n", cpu); diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 8a06dc48059..bdc176cb5e8 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -5595,7 +5595,7 @@ pfm_interrupt_handler(int irq, void *arg) (*pfm_alt_intr_handler->handler)(irq, arg, regs); } - put_cpu_no_resched(); + put_cpu(); return IRQ_HANDLED; } diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c index 8eff8c1d40a..6ba72ab42fc 100644 --- a/arch/ia64/kernel/uncached.c +++ b/arch/ia64/kernel/uncached.c @@ -98,7 +98,8 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) /* attempt to allocate a granule's worth of cached memory pages */ - page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, + page = alloc_pages_exact_node(nid, + GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, IA64_GRANULE_SHIFT-PAGE_SHIFT); if (!page) { mutex_unlock(&uc_pool->add_chunk_mutex); diff --git a/arch/ia64/mm/extable.c b/arch/ia64/mm/extable.c index e95d5ad9285..c99a41e29fe 100644 --- a/arch/ia64/mm/extable.c +++ b/arch/ia64/mm/extable.c @@ -8,7 +8,7 @@ #include <linux/sort.h> #include <asm/uaccess.h> -#include <asm/module.h> +#include <linux/module.h> static int cmp_ex(const void *a, const void *b) { @@ -55,7 +55,7 @@ void sort_extable (struct exception_table_entry *start, static inline unsigned long ex_to_addr(const struct exception_table_entry *x) { - return (unsigned long)&x->insn + x->insn; + return (unsigned long)&x->addr + x->addr; } #ifdef CONFIG_MODULES diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c index d876423e4e7..98b684928e1 100644 --- a/arch/ia64/sn/pci/pci_dma.c +++ b/arch/ia64/sn/pci/pci_dma.c @@ -90,7 +90,8 @@ static void *sn_dma_alloc_coherent(struct device *dev, size_t size, */ node = pcibus_to_node(pdev->bus); if (likely(node >=0)) { - struct page *p = alloc_pages_node(node, flags, get_order(size)); + struct page *p = alloc_pages_exact_node(node, + flags, get_order(size)); if (likely(p)) cpuaddr = page_address(p); diff --git a/arch/m32r/include/asm/kmap_types.h b/arch/m32r/include/asm/kmap_types.h index fa94dc6410e..4cdb5e3a06b 100644 --- a/arch/m32r/include/asm/kmap_types.h +++ b/arch/m32r/include/asm/kmap_types.h @@ -2,28 +2,11 @@ #define __M32R_KMAP_TYPES_H #ifdef CONFIG_DEBUG_HIGHMEM -# define D(n) __KM_FENCE_##n , -#else -# define D(n) +#define __WITH_KM_FENCE #endif -enum km_type { -D(0) KM_BOUNCE_READ, -D(1) KM_SKB_SUNRPC_DATA, -D(2) KM_SKB_DATA_SOFTIRQ, -D(3) KM_USER0, -D(4) KM_USER1, -D(5) KM_BIO_SRC_IRQ, -D(6) KM_BIO_DST_IRQ, -D(7) KM_PTE0, -D(8) KM_PTE1, -D(9) KM_IRQ0, -D(10) KM_IRQ1, -D(11) KM_SOFTIRQ0, -D(12) KM_SOFTIRQ1, -D(13) KM_TYPE_NR -}; +#include <asm-generic/kmap_types.h> -#undef D +#undef __WITH_KM_FENCE #endif /* __M32R_KMAP_TYPES_H */ diff --git a/arch/m32r/kernel/init_task.c b/arch/m32r/kernel/init_task.c index 016885c6f26..fce57e5d3f9 100644 --- a/arch/m32r/kernel/init_task.c +++ b/arch/m32r/kernel/init_task.c @@ -13,10 +13,6 @@ static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); -struct mm_struct init_mm = INIT_MM(init_mm); - -EXPORT_SYMBOL(init_mm); - /* * Initial thread structure. * diff --git a/arch/m32r/mm/discontig.c b/arch/m32r/mm/discontig.c index 7daf897292c..b7a78ad429b 100644 --- a/arch/m32r/mm/discontig.c +++ b/arch/m32r/mm/discontig.c @@ -154,9 +154,9 @@ unsigned long __init zone_sizes_init(void) * Use all area of internal RAM. * see __alloc_pages() */ - NODE_DATA(1)->node_zones->pages_min = 0; - NODE_DATA(1)->node_zones->pages_low = 0; - NODE_DATA(1)->node_zones->pages_high = 0; + NODE_DATA(1)->node_zones->watermark[WMARK_MIN] = 0; + NODE_DATA(1)->node_zones->watermark[WMARK_LOW] = 0; + NODE_DATA(1)->node_zones->watermark[WMARK_HIGH] = 0; return holes; } diff --git a/arch/m32r/platforms/m32104ut/setup.c b/arch/m32r/platforms/m32104ut/setup.c index 98138b4e922..922fdfdadea 100644 --- a/arch/m32r/platforms/m32104ut/setup.c +++ b/arch/m32r/platforms/m32104ut/setup.c @@ -63,7 +63,7 @@ static void shutdown_m32104ut_irq(unsigned int irq) outl(M32R_ICUCR_ILEVEL7, port); } -static struct hw_interrupt_type m32104ut_irq_type = +static struct irq_chip m32104ut_irq_type = { .typename = "M32104UT-IRQ", .startup = startup_m32104ut_irq, diff --git a/arch/m32r/platforms/m32700ut/setup.c b/arch/m32r/platforms/m32700ut/setup.c index 77b0ae9379e..9c1bc7487c1 100644 --- a/arch/m32r/platforms/m32700ut/setup.c +++ b/arch/m32r/platforms/m32700ut/setup.c @@ -69,7 +69,7 @@ static void shutdown_m32700ut_irq(unsigned int irq) outl(M32R_ICUCR_ILEVEL7, port); } -static struct hw_interrupt_type m32700ut_irq_type = +static struct irq_chip m32700ut_irq_type = { .typename = "M32700UT-IRQ", .startup = startup_m32700ut_irq, @@ -146,7 +146,7 @@ static void shutdown_m32700ut_pld_irq(unsigned int irq) outw(PLD_ICUCR_ILEVEL7, port); } -static struct hw_interrupt_type m32700ut_pld_irq_type = +static struct irq_chip m32700ut_pld_irq_type = { .typename = "M32700UT-PLD-IRQ", .startup = startup_m32700ut_pld_irq, @@ -215,7 +215,7 @@ static void shutdown_m32700ut_lanpld_irq(unsigned int irq) outw(PLD_ICUCR_ILEVEL7, port); } -static struct hw_interrupt_type m32700ut_lanpld_irq_type = +static struct irq_chip m32700ut_lanpld_irq_type = { .typename = "M32700UT-PLD-LAN-IRQ", .startup = startup_m32700ut_lanpld_irq, @@ -284,7 +284,7 @@ static void shutdown_m32700ut_lcdpld_irq(unsigned int irq) outw(PLD_ICUCR_ILEVEL7, port); } -static struct hw_interrupt_type m32700ut_lcdpld_irq_type = +static struct irq_chip m32700ut_lcdpld_irq_type = { .typename = "M32700UT-PLD-LCD-IRQ", .startup = startup_m32700ut_lcdpld_irq, diff --git a/arch/m32r/platforms/mappi/setup.c b/arch/m32r/platforms/mappi/setup.c index 3ec087ff221..fb4b17799b6 100644 --- a/arch/m32r/platforms/mappi/setup.c +++ b/arch/m32r/platforms/mappi/setup.c @@ -63,7 +63,7 @@ static void shutdown_mappi_irq(unsigned int irq) outl(M32R_ICUCR_ILEVEL7, port); } -static struct hw_interrupt_type mappi_irq_type = +static struct irq_chip mappi_irq_type = { .typename = "MAPPI-IRQ", .startup = startup_mappi_irq, diff --git a/arch/m32r/platforms/mappi2/setup.c b/arch/m32r/platforms/mappi2/setup.c index d87969c6356..6a65eda0a05 100644 --- a/arch/m32r/platforms/mappi2/setup.c +++ b/arch/m32r/platforms/mappi2/setup.c @@ -70,7 +70,7 @@ static void shutdown_mappi2_irq(unsigned int irq) outl(M32R_ICUCR_ILEVEL7, port); } -static struct hw_interrupt_type mappi2_irq_type = +static struct irq_chip mappi2_irq_type = { .typename = "MAPPI2-IRQ", .startup = startup_mappi2_irq, diff --git a/arch/m32r/platforms/mappi3/setup.c b/arch/m32r/platforms/mappi3/setup.c index 785b4bd6d9f..9c337aeac94 100644 --- a/arch/m32r/platforms/mappi3/setup.c +++ b/arch/m32r/platforms/mappi3/setup.c @@ -70,7 +70,7 @@ static void shutdown_mappi3_irq(unsigned int irq) outl(M32R_ICUCR_ILEVEL7, port); } -static struct hw_interrupt_type mappi3_irq_type = +static struct irq_chip mappi3_irq_type = { .typename = "MAPPI3-IRQ", .startup = startup_mappi3_irq, diff --git a/arch/m32r/platforms/oaks32r/setup.c b/arch/m32r/platforms/oaks32r/setup.c index 6faa5db68e9..ed865741c38 100644 --- a/arch/m32r/platforms/oaks32r/setup.c +++ b/arch/m32r/platforms/oaks32r/setup.c @@ -61,7 +61,7 @@ static void shutdown_oaks32r_irq(unsigned int irq) outl(M32R_ICUCR_ILEVEL7, port); } -static struct hw_interrupt_type oaks32r_irq_type = +static struct irq_chip oaks32r_irq_type = { .typename = "OAKS32R-IRQ", .startup = startup_oaks32r_irq, diff --git a/arch/m32r/platforms/opsput/setup.c b/arch/m32r/platforms/opsput/setup.c index fab13fd8542..80d68065701 100644 --- a/arch/m32r/platforms/opsput/setup.c +++ b/arch/m32r/platforms/opsput/setup.c @@ -70,7 +70,7 @@ static void shutdown_opsput_irq(unsigned int irq) outl(M32R_ICUCR_ILEVEL7, port); } -static struct hw_interrupt_type opsput_irq_type = +static struct irq_chip opsput_irq_type = { .typename = "OPSPUT-IRQ", .startup = startup_opsput_irq, @@ -147,7 +147,7 @@ static void shutdown_opsput_pld_irq(unsigned int irq) outw(PLD_ICUCR_ILEVEL7, port); } -static struct hw_interrupt_type opsput_pld_irq_type = +static struct irq_chip opsput_pld_irq_type = { .typename = "OPSPUT-PLD-IRQ", .startup = startup_opsput_pld_irq, @@ -216,7 +216,7 @@ static void shutdown_opsput_lanpld_irq(unsigned int irq) outw(PLD_ICUCR_ILEVEL7, port); } -static struct hw_interrupt_type opsput_lanpld_irq_type = +static struct irq_chip opsput_lanpld_irq_type = { .typename = "OPSPUT-PLD-LAN-IRQ", .startup = startup_opsput_lanpld_irq, @@ -285,7 +285,7 @@ static void shutdown_opsput_lcdpld_irq(unsigned int irq) outw(PLD_ICUCR_ILEVEL7, port); } -static struct hw_interrupt_type opsput_lcdpld_irq_type = +static struct irq_chip opsput_lcdpld_irq_type = { "OPSPUT-PLD-LCD-IRQ", startup_opsput_lcdpld_irq, diff --git a/arch/m32r/platforms/usrv/setup.c b/arch/m32r/platforms/usrv/setup.c index 89588d649eb..757302660af 100644 --- a/arch/m32r/platforms/usrv/setup.c +++ b/arch/m32r/platforms/usrv/setup.c @@ -61,7 +61,7 @@ static void shutdown_mappi_irq(unsigned int irq) outl(M32R_ICUCR_ILEVEL7, port); } -static struct hw_interrupt_type mappi_irq_type = +static struct irq_chip mappi_irq_type = { .typename = "M32700-IRQ", .startup = startup_mappi_irq, @@ -134,7 +134,7 @@ static void shutdown_m32700ut_pld_irq(unsigned int irq) outw(PLD_ICUCR_ILEVEL7, port); } -static struct hw_interrupt_type m32700ut_pld_irq_type = +static struct irq_chip m32700ut_pld_irq_type = { .typename = "USRV-PLD-IRQ", .startup = startup_m32700ut_pld_irq, diff --git a/arch/m68k/include/asm/kmap_types.h b/arch/m68k/include/asm/kmap_types.h index c843c63d380..3413cc1390e 100644 --- a/arch/m68k/include/asm/kmap_types.h +++ b/arch/m68k/include/asm/kmap_types.h @@ -1,21 +1,6 @@ #ifndef __ASM_M68K_KMAP_TYPES_H #define __ASM_M68K_KMAP_TYPES_H -enum km_type { - KM_BOUNCE_READ, - KM_SKB_SUNRPC_DATA, - KM_SKB_DATA_SOFTIRQ, - KM_USER0, - KM_USER1, - KM_BIO_SRC_IRQ, - KM_BIO_DST_IRQ, - KM_PTE0, - KM_PTE1, - KM_IRQ0, - KM_IRQ1, - KM_SOFTIRQ0, - KM_SOFTIRQ1, - KM_TYPE_NR -}; +#include <asm-generic/kmap_types.h> #endif /* __ASM_M68K_KMAP_TYPES_H */ diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c index ec37fb56c12..72bad65dba3 100644 --- a/arch/m68k/kernel/process.c +++ b/arch/m68k/kernel/process.c @@ -42,10 +42,6 @@ */ static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); -struct mm_struct init_mm = INIT_MM(init_mm); - -EXPORT_SYMBOL(init_mm); - union thread_union init_thread_union __attribute__((section(".data.init_task"), aligned(THREAD_SIZE))) = { INIT_THREAD_INFO(init_task) }; diff --git a/arch/m68knommu/kernel/init_task.c b/arch/m68knommu/kernel/init_task.c index fe282de1d59..45e97a207fe 100644 --- a/arch/m68knommu/kernel/init_task.c +++ b/arch/m68knommu/kernel/init_task.c @@ -14,10 +14,6 @@ static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); -struct mm_struct init_mm = INIT_MM(init_mm); - -EXPORT_SYMBOL(init_mm); - /* * Initial task structure. * diff --git a/arch/microblaze/include/asm/kmap_types.h b/arch/microblaze/include/asm/kmap_types.h index 4d7e222f5dd..25975252d83 100644 --- a/arch/microblaze/include/asm/kmap_types.h +++ b/arch/microblaze/include/asm/kmap_types.h @@ -1,29 +1,6 @@ -/* - * Copyright (C) 2006 Atmark Techno, Inc. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ - #ifndef _ASM_MICROBLAZE_KMAP_TYPES_H #define _ASM_MICROBLAZE_KMAP_TYPES_H -enum km_type { - KM_BOUNCE_READ, - KM_SKB_SUNRPC_DATA, - KM_SKB_DATA_SOFTIRQ, - KM_USER0, - KM_USER1, - KM_BIO_SRC_IRQ, - KM_BIO_DST_IRQ, - KM_PTE0, - KM_PTE1, - KM_IRQ0, - KM_IRQ1, - KM_SOFTIRQ0, - KM_SOFTIRQ1, - KM_TYPE_NR, -}; +#include <asm-generic/kmap_types.h> #endif /* _ASM_MICROBLAZE_KMAP_TYPES_H */ diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 25f3b0a11ca..b29f0280d71 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -618,6 +618,8 @@ config CAVIUM_OCTEON_REFERENCE_BOARD select SYS_HAS_EARLY_PRINTK select SYS_HAS_CPU_CAVIUM_OCTEON select SWAP_IO_SPACE + select HW_HAS_PCI + select ARCH_SUPPORTS_MSI help This option supports all of the Octeon reference boards from Cavium Networks. It builds a kernel that dynamically determines the Octeon @@ -851,6 +853,11 @@ config SYS_SUPPORTS_BIG_ENDIAN config SYS_SUPPORTS_LITTLE_ENDIAN bool +config SYS_SUPPORTS_HUGETLBFS + bool + depends on CPU_SUPPORTS_HUGEPAGES && 64BIT + default y + config IRQ_CPU bool @@ -1055,6 +1062,7 @@ config CPU_MIPS64_R1 select CPU_SUPPORTS_32BIT_KERNEL select CPU_SUPPORTS_64BIT_KERNEL select CPU_SUPPORTS_HIGHMEM + select CPU_SUPPORTS_HUGEPAGES help Choose this option to build a kernel for release 1 or later of the MIPS64 architecture. Many modern embedded systems with a 64-bit @@ -1074,6 +1082,7 @@ config CPU_MIPS64_R2 select CPU_SUPPORTS_32BIT_KERNEL select CPU_SUPPORTS_64BIT_KERNEL select CPU_SUPPORTS_HIGHMEM + select CPU_SUPPORTS_HUGEPAGES help Choose this option to build a kernel for release 2 or later of the MIPS64 architecture. Many modern embedded systems with a 64-bit @@ -1160,6 +1169,7 @@ config CPU_R5500 select CPU_HAS_LLSC select CPU_SUPPORTS_32BIT_KERNEL select CPU_SUPPORTS_64BIT_KERNEL + select CPU_SUPPORTS_HUGEPAGES help NEC VR5500 and VR5500A series processors implement 64-bit MIPS IV instruction set. @@ -1245,6 +1255,7 @@ config CPU_CAVIUM_OCTEON select WEAK_ORDERING select WEAK_REORDERING_BEYOND_LLSC select CPU_SUPPORTS_HIGHMEM + select CPU_SUPPORTS_HUGEPAGES help The Cavium Octeon processor is a highly integrated chip containing many ethernet hardware widgets for networking tasks. The processor @@ -1364,6 +1375,8 @@ config CPU_SUPPORTS_32BIT_KERNEL bool config CPU_SUPPORTS_64BIT_KERNEL bool +config CPU_SUPPORTS_HUGEPAGES + bool # # Set to y for ptrace access to watch registers. @@ -2121,6 +2134,10 @@ endmenu menu "Power management options" +config ARCH_HIBERNATION_POSSIBLE + def_bool y + depends on !SMP + config ARCH_SUSPEND_POSSIBLE def_bool y depends on !SMP diff --git a/arch/mips/Makefile b/arch/mips/Makefile index c4cae9e6b80..807572a6a4d 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -167,7 +167,6 @@ libs-$(CONFIG_ARC) += arch/mips/fw/arc/ libs-$(CONFIG_CFE) += arch/mips/fw/cfe/ libs-$(CONFIG_SNIPROM) += arch/mips/fw/sni/ libs-y += arch/mips/fw/lib/ -libs-$(CONFIG_SIBYTE_CFE) += arch/mips/sibyte/cfe/ # # Board-dependent options and extra files @@ -184,7 +183,6 @@ load-$(CONFIG_MACH_JAZZ) += 0xffffffff80080000 # Common Alchemy Au1x00 stuff # core-$(CONFIG_SOC_AU1X00) += arch/mips/alchemy/common/ -cflags-$(CONFIG_SOC_AU1X00) += -I$(srctree)/arch/mips/include/asm/mach-au1x00 # # AMD Alchemy Pb1000 eval board @@ -282,6 +280,10 @@ load-$(CONFIG_MIPS_MTX1) += 0xffffffff80100000 libs-$(CONFIG_MIPS_XXS1500) += arch/mips/alchemy/xxs1500/ load-$(CONFIG_MIPS_XXS1500) += 0xffffffff80100000 +# must be last for Alchemy systems for GPIO to work properly +cflags-$(CONFIG_SOC_AU1X00) += -I$(srctree)/arch/mips/include/asm/mach-au1x00 + + # # Cobalt Server # @@ -675,6 +677,9 @@ core-y += arch/mips/kernel/ arch/mips/mm/ arch/mips/math-emu/ drivers-$(CONFIG_OPROFILE) += arch/mips/oprofile/ +# suspend and hibernation support +drivers-$(CONFIG_PM) += arch/mips/power/ + ifdef CONFIG_LASAT rom.bin rom.sw: vmlinux $(Q)$(MAKE) $(build)=arch/mips/lasat/image $@ diff --git a/arch/mips/alchemy/Kconfig b/arch/mips/alchemy/Kconfig index 8128aebfb15..00b498e97c8 100644 --- a/arch/mips/alchemy/Kconfig +++ b/arch/mips/alchemy/Kconfig @@ -1,3 +1,14 @@ +# au1000-style gpio +config ALCHEMY_GPIO_AU1000 + bool + +# select this in your board config if you don't want to use the gpio +# namespace as documented in the manuals. In this case however you need +# to create the necessary gpio_* functions in your board code/headers! +# see arch/mips/include/asm/mach-au1x00/gpio.h for more information. +config ALCHEMY_GPIO_INDIRECT + def_bool n + choice prompt "Machine type" depends on MACH_ALCHEMY @@ -108,22 +119,27 @@ endchoice config SOC_AU1000 bool select SOC_AU1X00 + select ALCHEMY_GPIO_AU1000 config SOC_AU1100 bool select SOC_AU1X00 + select ALCHEMY_GPIO_AU1000 config SOC_AU1500 bool select SOC_AU1X00 + select ALCHEMY_GPIO_AU1000 config SOC_AU1550 bool select SOC_AU1X00 + select ALCHEMY_GPIO_AU1000 config SOC_AU1200 bool select SOC_AU1X00 + select ALCHEMY_GPIO_AU1000 config SOC_AU1X00 bool @@ -134,4 +150,5 @@ config SOC_AU1X00 select SYS_HAS_CPU_MIPS32_R1 select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_APM_EMULATION - select ARCH_REQUIRE_GPIOLIB + select GENERIC_GPIO + select ARCH_WANT_OPTIONAL_GPIOLIB diff --git a/arch/mips/alchemy/common/Makefile b/arch/mips/alchemy/common/Makefile index d50d4764eaf..b67fb512529 100644 --- a/arch/mips/alchemy/common/Makefile +++ b/arch/mips/alchemy/common/Makefile @@ -7,7 +7,14 @@ obj-y += prom.o irq.o puts.o time.o reset.o \ clocks.o platform.o power.o setup.o \ - sleeper.o dma.o dbdma.o gpio.o + sleeper.o dma.o dbdma.o + +# optional gpiolib support +ifeq ($(CONFIG_ALCHEMY_GPIO_INDIRECT),) + ifeq ($(CONFIG_GPIOLIB),y) + obj-$(CONFIG_ALCHEMY_GPIO_AU1000) += gpiolib-au1000.o + endif +endif obj-$(CONFIG_PCI) += pci.o diff --git a/arch/mips/alchemy/common/gpio.c b/arch/mips/alchemy/common/gpio.c deleted file mode 100644 index 91a9c4436c3..00000000000 --- a/arch/mips/alchemy/common/gpio.c +++ /dev/null @@ -1,201 +0,0 @@ -/* - * Copyright (C) 2007-2009, OpenWrt.org, Florian Fainelli <florian@openwrt.org> - * Architecture specific GPIO support - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN - * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 675 Mass Ave, Cambridge, MA 02139, USA. - * - * Notes : - * au1000 SoC have only one GPIO line : GPIO1 - * others have a second one : GPIO2 - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/types.h> -#include <linux/platform_device.h> -#include <linux/gpio.h> - -#include <asm/mach-au1x00/au1000.h> -#include <asm/gpio.h> - -struct au1000_gpio_chip { - struct gpio_chip chip; - void __iomem *regbase; -}; - -#if !defined(CONFIG_SOC_AU1000) -static int au1000_gpio2_get(struct gpio_chip *chip, unsigned offset) -{ - u32 mask = 1 << offset; - struct au1000_gpio_chip *gpch; - - gpch = container_of(chip, struct au1000_gpio_chip, chip); - return readl(gpch->regbase + AU1000_GPIO2_ST) & mask; -} - -static void au1000_gpio2_set(struct gpio_chip *chip, - unsigned offset, int value) -{ - u32 mask = ((GPIO2_OUT_EN_MASK << offset) | (!!value << offset)); - struct au1000_gpio_chip *gpch; - unsigned long flags; - - gpch = container_of(chip, struct au1000_gpio_chip, chip); - - local_irq_save(flags); - writel(mask, gpch->regbase + AU1000_GPIO2_OUT); - local_irq_restore(flags); -} - -static int au1000_gpio2_direction_input(struct gpio_chip *chip, unsigned offset) -{ - u32 mask = 1 << offset; - u32 tmp; - struct au1000_gpio_chip *gpch; - unsigned long flags; - - gpch = container_of(chip, struct au1000_gpio_chip, chip); - - local_irq_save(flags); - tmp = readl(gpch->regbase + AU1000_GPIO2_DIR); - tmp &= ~mask; - writel(tmp, gpch->regbase + AU1000_GPIO2_DIR); - local_irq_restore(flags); - - return 0; -} - -static int au1000_gpio2_direction_output(struct gpio_chip *chip, - unsigned offset, int value) -{ - u32 mask = 1 << offset; - u32 out_mask = ((GPIO2_OUT_EN_MASK << offset) | (!!value << offset)); - u32 tmp; - struct au1000_gpio_chip *gpch; - unsigned long flags; - - gpch = container_of(chip, struct au1000_gpio_chip, chip); - - local_irq_save(flags); - tmp = readl(gpch->regbase + AU1000_GPIO2_DIR); - tmp |= mask; - writel(tmp, gpch->regbase + AU1000_GPIO2_DIR); - writel(out_mask, gpch->regbase + AU1000_GPIO2_OUT); - local_irq_restore(flags); - - return 0; -} -#endif /* !defined(CONFIG_SOC_AU1000) */ - -static int au1000_gpio1_get(struct gpio_chip *chip, unsigned offset) -{ - u32 mask = 1 << offset; - struct au1000_gpio_chip *gpch; - - gpch = container_of(chip, struct au1000_gpio_chip, chip); - return readl(gpch->regbase + AU1000_GPIO1_ST) & mask; -} - -static void au1000_gpio1_set(struct gpio_chip *chip, - unsigned offset, int value) -{ - u32 mask = 1 << offset; - u32 reg_offset; - struct au1000_gpio_chip *gpch; - unsigned long flags; - - gpch = container_of(chip, struct au1000_gpio_chip, chip); - - if (value) - reg_offset = AU1000_GPIO1_OUT; - else - reg_offset = AU1000_GPIO1_CLR; - - local_irq_save(flags); - writel(mask, gpch->regbase + reg_offset); - local_irq_restore(flags); -} - -static int au1000_gpio1_direction_input(struct gpio_chip *chip, unsigned offset) -{ - u32 mask = 1 << offset; - struct au1000_gpio_chip *gpch; - - gpch = container_of(chip, struct au1000_gpio_chip, chip); - writel(mask, gpch->regbase + AU1000_GPIO1_ST); - - return 0; -} - -static int au1000_gpio1_direction_output(struct gpio_chip *chip, - unsigned offset, int value) -{ - u32 mask = 1 << offset; - struct au1000_gpio_chip *gpch; - - gpch = container_of(chip, struct au1000_gpio_chip, chip); - - writel(mask, gpch->regbase + AU1000_GPIO1_TRI_OUT); - au1000_gpio1_set(chip, offset, value); - - return 0; -} - -struct au1000_gpio_chip au1000_gpio_chip[] = { - [0] = { - .regbase = (void __iomem *)SYS_BASE, - .chip = { - .label = "au1000-gpio1", - .direction_input = au1000_gpio1_direction_input, - .direction_output = au1000_gpio1_direction_output, - .get = au1000_gpio1_get, - .set = au1000_gpio1_set, - .base = 0, - .ngpio = 32, - }, - }, -#if !defined(CONFIG_SOC_AU1000) - [1] = { - .regbase = (void __iomem *)GPIO2_BASE, - .chip = { - .label = "au1000-gpio2", - .direction_input = au1000_gpio2_direction_input, - .direction_output = au1000_gpio2_direction_output, - .get = au1000_gpio2_get, - .set = au1000_gpio2_set, - .base = AU1XXX_GPIO_BASE, - .ngpio = 32, - }, - }, -#endif -}; - -static int __init au1000_gpio_init(void) -{ - gpiochip_add(&au1000_gpio_chip[0].chip); -#if !defined(CONFIG_SOC_AU1000) - gpiochip_add(&au1000_gpio_chip[1].chip); -#endif - - return 0; -} -arch_initcall(au1000_gpio_init); - diff --git a/arch/mips/alchemy/common/gpiolib-au1000.c b/arch/mips/alchemy/common/gpiolib-au1000.c new file mode 100644 index 00000000000..1bfa91f939f --- /dev/null +++ b/arch/mips/alchemy/common/gpiolib-au1000.c @@ -0,0 +1,130 @@ +/* + * Copyright (C) 2007-2009, OpenWrt.org, Florian Fainelli <florian@openwrt.org> + * GPIOLIB support for Au1000, Au1500, Au1100, Au1550 and Au12x0. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Notes : + * au1000 SoC have only one GPIO block : GPIO1 + * Au1100, Au15x0, Au12x0 have a second one : GPIO2 + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/platform_device.h> +#include <linux/gpio.h> + +#include <asm/mach-au1x00/au1000.h> +#include <asm/mach-au1x00/gpio.h> + +#if !defined(CONFIG_SOC_AU1000) +static int gpio2_get(struct gpio_chip *chip, unsigned offset) +{ + return alchemy_gpio2_get_value(offset + ALCHEMY_GPIO2_BASE); +} + +static void gpio2_set(struct gpio_chip *chip, unsigned offset, int value) +{ + alchemy_gpio2_set_value(offset + ALCHEMY_GPIO2_BASE, value); +} + +static int gpio2_direction_input(struct gpio_chip *chip, unsigned offset) +{ + return alchemy_gpio2_direction_input(offset + ALCHEMY_GPIO2_BASE); +} + +static int gpio2_direction_output(struct gpio_chip *chip, unsigned offset, + int value) +{ + return alchemy_gpio2_direction_output(offset + ALCHEMY_GPIO2_BASE, + value); +} + +static int gpio2_to_irq(struct gpio_chip *chip, unsigned offset) +{ + return alchemy_gpio2_to_irq(offset + ALCHEMY_GPIO2_BASE); +} +#endif /* !defined(CONFIG_SOC_AU1000) */ + +static int gpio1_get(struct gpio_chip *chip, unsigned offset) +{ + return alchemy_gpio1_get_value(offset + ALCHEMY_GPIO1_BASE); +} + +static void gpio1_set(struct gpio_chip *chip, + unsigned offset, int value) +{ + alchemy_gpio1_set_value(offset + ALCHEMY_GPIO1_BASE, value); +} + +static int gpio1_direction_input(struct gpio_chip *chip, unsigned offset) +{ + return alchemy_gpio1_direction_input(offset + ALCHEMY_GPIO1_BASE); +} + +static int gpio1_direction_output(struct gpio_chip *chip, + unsigned offset, int value) +{ + return alchemy_gpio1_direction_output(offset + ALCHEMY_GPIO1_BASE, + value); +} + +static int gpio1_to_irq(struct gpio_chip *chip, unsigned offset) +{ + return alchemy_gpio1_to_irq(offset + ALCHEMY_GPIO1_BASE); +} + +struct gpio_chip alchemy_gpio_chip[] = { + [0] = { + .label = "alchemy-gpio1", + .direction_input = gpio1_direction_input, + .direction_output = gpio1_direction_output, + .get = gpio1_get, + .set = gpio1_set, + .to_irq = gpio1_to_irq, + .base = ALCHEMY_GPIO1_BASE, + .ngpio = ALCHEMY_GPIO1_NUM, + }, +#if !defined(CONFIG_SOC_AU1000) + [1] = { + .label = "alchemy-gpio2", + .direction_input = gpio2_direction_input, + .direction_output = gpio2_direction_output, + .get = gpio2_get, + .set = gpio2_set, + .to_irq = gpio2_to_irq, + .base = ALCHEMY_GPIO2_BASE, + .ngpio = ALCHEMY_GPIO2_NUM, + }, +#endif +}; + +static int __init alchemy_gpiolib_init(void) +{ + gpiochip_add(&alchemy_gpio_chip[0]); +#if !defined(CONFIG_SOC_AU1000) + gpiochip_add(&alchemy_gpio_chip[1]); +#endif + + return 0; +} +arch_initcall(alchemy_gpiolib_init); diff --git a/arch/mips/alchemy/common/reset.c b/arch/mips/alchemy/common/reset.c index 0191c936cb5..4791011e8f9 100644 --- a/arch/mips/alchemy/common/reset.c +++ b/arch/mips/alchemy/common/reset.c @@ -27,8 +27,9 @@ * 675 Mass Ave, Cambridge, MA 02139, USA. */ -#include <asm/cacheflush.h> +#include <linux/gpio.h> +#include <asm/cacheflush.h> #include <asm/mach-au1x00/au1000.h> void au1000_restart(char *command) @@ -161,7 +162,7 @@ void au1000_halt(void) #else printk(KERN_NOTICE "\n** You can safely turn off the power\n"); #ifdef CONFIG_MIPS_MIRAGE - au_writel((1 << 26) | (1 << 10), GPIO2_OUTPUT); + gpio_direction_output(210, 1); #endif #ifdef CONFIG_MIPS_DB1200 au_writew(au_readw(0xB980001C) | (1 << 14), 0xB980001C); diff --git a/arch/mips/alchemy/devboards/db1x00/board_setup.c b/arch/mips/alchemy/devboards/db1x00/board_setup.c index a75ffbf99f2..de30d8ea717 100644 --- a/arch/mips/alchemy/devboards/db1x00/board_setup.c +++ b/arch/mips/alchemy/devboards/db1x00/board_setup.c @@ -27,6 +27,7 @@ * 675 Mass Ave, Cambridge, MA 02139, USA. */ +#include <linux/gpio.h> #include <linux/init.h> #include <asm/mach-au1x00/au1000.h> @@ -94,12 +95,12 @@ void __init board_setup(void) #endif bcsr->pcmcia = 0x0000; /* turn off PCMCIA power */ -#ifdef CONFIG_MIPS_MIRAGE /* Enable GPIO[31:0] inputs */ - au_writel(0, SYS_PININPUTEN); + alchemy_gpio1_input_enable(); - /* GPIO[20] is output, tristate the other input primary GPIOs */ - au_writel(~(1 << 20), SYS_TRIOUTCLR); +#ifdef CONFIG_MIPS_MIRAGE + /* GPIO[20] is output */ + alchemy_gpio_direction_output(20, 0); /* Set GPIO[210:208] instead of SSI_0 */ pin_func = au_readl(SYS_PINFUNC) | SYS_PF_S0; @@ -118,8 +119,7 @@ void __init board_setup(void) * Enable speaker amplifier. This should * be part of the audio driver. */ - au_writel(au_readl(GPIO2_DIR) | 0x200, GPIO2_DIR); - au_writel(0x02000200, GPIO2_OUTPUT); + alchemy_gpio_direction_output(209, 1); #endif au_sync(); diff --git a/arch/mips/alchemy/devboards/pb1000/board_setup.c b/arch/mips/alchemy/devboards/pb1000/board_setup.c index aed2fdecc70..cd273545e81 100644 --- a/arch/mips/alchemy/devboards/pb1000/board_setup.c +++ b/arch/mips/alchemy/devboards/pb1000/board_setup.c @@ -24,6 +24,7 @@ */ #include <linux/delay.h> +#include <linux/gpio.h> #include <linux/init.h> #include <linux/interrupt.h> #include <asm/mach-au1x00/au1000.h> @@ -130,8 +131,11 @@ void __init board_setup(void) pin_func |= SYS_PF_USB; au_writel(pin_func, SYS_PINFUNC); - au_writel(0x2800, SYS_TRIOUTCLR); - au_writel(0x0030, SYS_OUTPUTCLR); + + alchemy_gpio_direction_input(11); + alchemy_gpio_direction_input(13); + alchemy_gpio_direction_output(4, 0); + alchemy_gpio_direction_output(5, 0); #endif /* defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) */ /* Make GPIO 15 an input (for interrupt line) */ @@ -140,7 +144,7 @@ void __init board_setup(void) pin_func |= SYS_PF_I2S; au_writel(pin_func, SYS_PINFUNC); - au_writel(0x8000, SYS_TRIOUTCLR); + alchemy_gpio_direction_input(15); static_cfg0 = au_readl(MEM_STCFG0) & ~0xc00; au_writel(static_cfg0, MEM_STCFG0); diff --git a/arch/mips/alchemy/devboards/pb1100/board_setup.c b/arch/mips/alchemy/devboards/pb1100/board_setup.c index 4df57fae15d..61263081ef5 100644 --- a/arch/mips/alchemy/devboards/pb1100/board_setup.c +++ b/arch/mips/alchemy/devboards/pb1100/board_setup.c @@ -23,6 +23,7 @@ * 675 Mass Ave, Cambridge, MA 02139, USA. */ +#include <linux/gpio.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/interrupt.h> @@ -88,7 +89,7 @@ void __init board_setup(void) /* Set AUX clock to 12 MHz * 8 = 96 MHz */ au_writel(8, SYS_AUXPLL); - au_writel(0, SYS_PININPUTEN); + alchemy_gpio1_input_enable(); udelay(100); #if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) diff --git a/arch/mips/alchemy/devboards/pb1500/board_setup.c b/arch/mips/alchemy/devboards/pb1500/board_setup.c index fed3b093156..d7a56569e7e 100644 --- a/arch/mips/alchemy/devboards/pb1500/board_setup.c +++ b/arch/mips/alchemy/devboards/pb1500/board_setup.c @@ -23,8 +23,9 @@ * 675 Mass Ave, Cambridge, MA 02139, USA. */ -#include <linux/init.h> #include <linux/delay.h> +#include <linux/gpio.h> +#include <linux/init.h> #include <linux/interrupt.h> #include <asm/mach-au1x00/au1000.h> @@ -90,11 +91,12 @@ void __init board_setup(void) au_writel(0, SYS_PINSTATERD); udelay(100); -#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) - /* GPIO201 is input for PCMCIA card detect */ /* GPIO203 is input for PCMCIA interrupt request */ - au_writel(au_readl(GPIO2_DIR) & ~((1 << 1) | (1 << 3)), GPIO2_DIR); + alchemy_gpio_direction_input(201); + alchemy_gpio_direction_input(203); + +#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) /* Zero and disable FREQ2 */ sys_freqctrl = au_readl(SYS_FREQCTRL0); diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c index d5eb9c325ed..632f9862a0f 100644 --- a/arch/mips/alchemy/devboards/pm.c +++ b/arch/mips/alchemy/devboards/pm.c @@ -9,6 +9,7 @@ #include <linux/suspend.h> #include <linux/sysfs.h> #include <asm/mach-au1x00/au1000.h> +#include <asm/mach-au1x00/gpio.h> /* * Generic suspend userspace interface for Alchemy development boards. @@ -26,7 +27,7 @@ static unsigned long db1x_pm_last_wakesrc; static int db1x_pm_enter(suspend_state_t state) { /* enable GPIO based wakeup */ - au_writel(1, SYS_PININPUTEN); + alchemy_gpio1_input_enable(); /* clear and setup wake cause and source */ au_writel(0, SYS_WAKEMSK); diff --git a/arch/mips/alchemy/mtx-1/board_setup.c b/arch/mips/alchemy/mtx-1/board_setup.c index 8ed1ae12bc5..cc32c69a74a 100644 --- a/arch/mips/alchemy/mtx-1/board_setup.c +++ b/arch/mips/alchemy/mtx-1/board_setup.c @@ -28,6 +28,7 @@ * 675 Mass Ave, Cambridge, MA 02139, USA. */ +#include <linux/gpio.h> #include <linux/init.h> #include <asm/mach-au1x00/au1000.h> @@ -55,10 +56,11 @@ void __init board_setup(void) } #endif + alchemy_gpio2_enable(); + #if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) /* Enable USB power switch */ - au_writel(au_readl(GPIO2_DIR) | 0x10, GPIO2_DIR); - au_writel(0x100000, GPIO2_OUTPUT); + alchemy_gpio_direction_output(204, 0); #endif /* defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) */ #ifdef CONFIG_PCI @@ -74,14 +76,14 @@ void __init board_setup(void) /* Initialize GPIO */ au_writel(0xFFFFFFFF, SYS_TRIOUTCLR); - au_writel(0x00000001, SYS_OUTPUTCLR); /* set M66EN (PCI 66MHz) to OFF */ - au_writel(0x00000008, SYS_OUTPUTSET); /* set PCI CLKRUN# to OFF */ - au_writel(0x00000002, SYS_OUTPUTSET); /* set EXT_IO3 ON */ - au_writel(0x00000020, SYS_OUTPUTCLR); /* set eth PHY TX_ER to OFF */ + alchemy_gpio_direction_output(0, 0); /* Disable M66EN (PCI 66MHz) */ + alchemy_gpio_direction_output(3, 1); /* Disable PCI CLKRUN# */ + alchemy_gpio_direction_output(1, 1); /* Enable EXT_IO3 */ + alchemy_gpio_direction_output(5, 0); /* Disable eth PHY TX_ER */ /* Enable LED and set it to green */ - au_writel(au_readl(GPIO2_DIR) | 0x1800, GPIO2_DIR); - au_writel(0x18000800, GPIO2_OUTPUT); + alchemy_gpio_direction_output(211, 1); /* green on */ + alchemy_gpio_direction_output(212, 0); /* red off */ board_pci_idsel = mtx1_pci_idsel; @@ -101,10 +103,10 @@ mtx1_pci_idsel(unsigned int devsel, int assert) if (assert && devsel != 0) /* Suppress signal to Cardbus */ - au_writel(0x00000002, SYS_OUTPUTCLR); /* set EXT_IO3 OFF */ + gpio_set_value(1, 0); /* set EXT_IO3 OFF */ else - au_writel(0x00000002, SYS_OUTPUTSET); /* set EXT_IO3 ON */ + gpio_set_value(1, 1); /* set EXT_IO3 ON */ + au_sync_udelay(1); return 1; } - diff --git a/arch/mips/alchemy/xxs1500/board_setup.c b/arch/mips/alchemy/xxs1500/board_setup.c index a2634fabc50..4de2d48caed 100644 --- a/arch/mips/alchemy/xxs1500/board_setup.c +++ b/arch/mips/alchemy/xxs1500/board_setup.c @@ -23,6 +23,7 @@ * 675 Mass Ave, Cambridge, MA 02139, USA. */ +#include <linux/gpio.h> #include <linux/init.h> #include <linux/delay.h> @@ -50,6 +51,9 @@ void __init board_setup(void) } #endif + alchemy_gpio1_input_enable(); + alchemy_gpio2_enable(); + /* Set multiple use pins (UART3/GPIO) to UART (it's used as UART too) */ pin_func = au_readl(SYS_PINFUNC) & ~SYS_PF_UR3; pin_func |= SYS_PF_UR3; @@ -65,20 +69,19 @@ void __init board_setup(void) au_writel(0x01, UART3_ADDR + UART_MCR); /* UART_MCR_DTR is 0x01??? */ #ifdef CONFIG_PCMCIA_XXS1500 - /* Setup PCMCIA signals */ - au_writel(0, SYS_PININPUTEN); - /* GPIO 0, 1, and 4 are inputs */ - au_writel(1 | (1 << 1) | (1 << 4), SYS_TRIOUTCLR); + alchemy_gpio_direction_input(0); + alchemy_gpio_direction_input(1); + alchemy_gpio_direction_input(4); - /* Enable GPIO2 if not already enabled */ - au_writel(1, GPIO2_ENABLE); /* GPIO2 208/9/10/11 are inputs */ - au_writel((1 << 8) | (1 << 9) | (1 << 10) | (1 << 11), GPIO2_DIR); + alchemy_gpio_direction_input(208); + alchemy_gpio_direction_input(209); + alchemy_gpio_direction_input(210); + alchemy_gpio_direction_input(211); /* Turn off power */ - au_writel((au_readl(GPIO2_PINSTATE) & ~(1 << 14)) | (1 << 30), - GPIO2_OUTPUT); + alchemy_gpio_direction_output(214, 0); #endif #ifdef CONFIG_PCI diff --git a/arch/mips/cavium-octeon/Makefile b/arch/mips/cavium-octeon/Makefile index d6903c3f3d5..7c0528b0e34 100644 --- a/arch/mips/cavium-octeon/Makefile +++ b/arch/mips/cavium-octeon/Makefile @@ -14,5 +14,9 @@ obj-y += dma-octeon.o flash_setup.o obj-y += octeon-memcpy.o obj-$(CONFIG_SMP) += smp.o +obj-$(CONFIG_PCI) += pci-common.o +obj-$(CONFIG_PCI) += pci.o +obj-$(CONFIG_PCI) += pcie.o +obj-$(CONFIG_PCI_MSI) += msi.o EXTRA_CFLAGS += -Werror diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c index 01b1ef94b36..627c162a615 100644 --- a/arch/mips/cavium-octeon/dma-octeon.c +++ b/arch/mips/cavium-octeon/dma-octeon.c @@ -13,20 +13,327 @@ */ #include <linux/types.h> #include <linux/mm.h> +#include <linux/module.h> +#include <linux/string.h> +#include <linux/dma-mapping.h> +#include <linux/platform_device.h> +#include <linux/scatterlist.h> + +#include <linux/cache.h> +#include <linux/io.h> + +#include <asm/octeon/octeon.h> +#include <asm/octeon/cvmx-npi-defs.h> +#include <asm/octeon/cvmx-pci-defs.h> #include <dma-coherence.h> +#ifdef CONFIG_PCI +#include "pci-common.h" +#endif + +#define BAR2_PCI_ADDRESS 0x8000000000ul + +struct bar1_index_state { + int16_t ref_count; /* Number of PCI mappings using this index */ + uint16_t address_bits; /* Upper bits of physical address. This is + shifted 22 bits */ +}; + +#ifdef CONFIG_PCI +static DEFINE_SPINLOCK(bar1_lock); +static struct bar1_index_state bar1_state[32]; +#endif + dma_addr_t octeon_map_dma_mem(struct device *dev, void *ptr, size_t size) { +#ifndef CONFIG_PCI /* Without PCI/PCIe this function can be called for Octeon internal devices such as USB. These devices all support 64bit addressing */ mb(); return virt_to_phys(ptr); +#else + unsigned long flags; + uint64_t dma_mask; + int64_t start_index; + dma_addr_t result = -1; + uint64_t physical = virt_to_phys(ptr); + int64_t index; + + mb(); + /* + * Use the DMA masks to determine the allowed memory + * region. For us it doesn't limit the actual memory, just the + * address visible over PCI. Devices with limits need to use + * lower indexed Bar1 entries. + */ + if (dev) { + dma_mask = dev->coherent_dma_mask; + if (dev->dma_mask) + dma_mask = *dev->dma_mask; + } else { + dma_mask = 0xfffffffful; + } + + /* + * Platform devices, such as the internal USB, skip all + * translation and use Octeon physical addresses directly. + */ + if (!dev || dev->bus == &platform_bus_type) + return physical; + + switch (octeon_dma_bar_type) { + case OCTEON_DMA_BAR_TYPE_PCIE: + if (unlikely(physical < (16ul << 10))) + panic("dma_map_single: Not allowed to map first 16KB." + " It interferes with BAR0 special area\n"); + else if ((physical + size >= (256ul << 20)) && + (physical < (512ul << 20))) + panic("dma_map_single: Not allowed to map bootbus\n"); + else if ((physical + size >= 0x400000000ull) && + physical < 0x410000000ull) + panic("dma_map_single: " + "Attempt to map illegal memory address 0x%llx\n", + physical); + else if (physical >= 0x420000000ull) + panic("dma_map_single: " + "Attempt to map illegal memory address 0x%llx\n", + physical); + else if ((physical + size >= + (4ull<<30) - (OCTEON_PCI_BAR1_HOLE_SIZE<<20)) + && physical < (4ull<<30)) + pr_warning("dma_map_single: Warning: " + "Mapping memory address that might " + "conflict with devices 0x%llx-0x%llx\n", + physical, physical+size-1); + /* The 2nd 256MB is mapped at 256<<20 instead of 0x410000000 */ + if ((physical >= 0x410000000ull) && physical < 0x420000000ull) + result = physical - 0x400000000ull; + else + result = physical; + if (((result+size-1) & dma_mask) != result+size-1) + panic("dma_map_single: Attempt to map address " + "0x%llx-0x%llx, which can't be accessed " + "according to the dma mask 0x%llx\n", + physical, physical+size-1, dma_mask); + goto done; + + case OCTEON_DMA_BAR_TYPE_BIG: +#ifdef CONFIG_64BIT + /* If the device supports 64bit addressing, then use BAR2 */ + if (dma_mask > BAR2_PCI_ADDRESS) { + result = physical + BAR2_PCI_ADDRESS; + goto done; + } +#endif + if (unlikely(physical < (4ul << 10))) { + panic("dma_map_single: Not allowed to map first 4KB. " + "It interferes with BAR0 special area\n"); + } else if (physical < (256ul << 20)) { + if (unlikely(physical + size > (256ul << 20))) + panic("dma_map_single: Requested memory spans " + "Bar0 0:256MB and bootbus\n"); + result = physical; + goto done; + } else if (unlikely(physical < (512ul << 20))) { + panic("dma_map_single: Not allowed to map bootbus\n"); + } else if (physical < (2ul << 30)) { + if (unlikely(physical + size > (2ul << 30))) + panic("dma_map_single: Requested memory spans " + "Bar0 512MB:2GB and BAR1\n"); + result = physical; + goto done; + } else if (physical < (2ul << 30) + (128 << 20)) { + /* Fall through */ + } else if (physical < + (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20)) { + if (unlikely + (physical + size > + (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20))) + panic("dma_map_single: Requested memory " + "extends past Bar1 (4GB-%luMB)\n", + OCTEON_PCI_BAR1_HOLE_SIZE); + result = physical; + goto done; + } else if ((physical >= 0x410000000ull) && + (physical < 0x420000000ull)) { + if (unlikely(physical + size > 0x420000000ull)) + panic("dma_map_single: Requested memory spans " + "non existant memory\n"); + /* BAR0 fixed mapping 256MB:512MB -> + * 16GB+256MB:16GB+512MB */ + result = physical - 0x400000000ull; + goto done; + } else { + /* Continued below switch statement */ + } + break; + + case OCTEON_DMA_BAR_TYPE_SMALL: +#ifdef CONFIG_64BIT + /* If the device supports 64bit addressing, then use BAR2 */ + if (dma_mask > BAR2_PCI_ADDRESS) { + result = physical + BAR2_PCI_ADDRESS; + goto done; + } +#endif + /* Continued below switch statement */ + break; + + default: + panic("dma_map_single: Invalid octeon_dma_bar_type\n"); + } + + /* Don't allow mapping to span multiple Bar entries. The hardware guys + won't guarantee that DMA across boards work */ + if (unlikely((physical >> 22) != ((physical + size - 1) >> 22))) + panic("dma_map_single: " + "Requested memory spans more than one Bar1 entry\n"); + + if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) + start_index = 31; + else if (unlikely(dma_mask < (1ul << 27))) + start_index = (dma_mask >> 22); + else + start_index = 31; + + /* Only one processor can access the Bar register at once */ + spin_lock_irqsave(&bar1_lock, flags); + + /* Look through Bar1 for existing mapping that will work */ + for (index = start_index; index >= 0; index--) { + if ((bar1_state[index].address_bits == physical >> 22) && + (bar1_state[index].ref_count)) { + /* An existing mapping will work, use it */ + bar1_state[index].ref_count++; + if (unlikely(bar1_state[index].ref_count < 0)) + panic("dma_map_single: " + "Bar1[%d] reference count overflowed\n", + (int) index); + result = (index << 22) | (physical & ((1 << 22) - 1)); + /* Large BAR1 is offset at 2GB */ + if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) + result += 2ul << 30; + goto done_unlock; + } + } + + /* No existing mappings, look for a free entry */ + for (index = start_index; index >= 0; index--) { + if (unlikely(bar1_state[index].ref_count == 0)) { + union cvmx_pci_bar1_indexx bar1_index; + /* We have a free entry, use it */ + bar1_state[index].ref_count = 1; + bar1_state[index].address_bits = physical >> 22; + bar1_index.u32 = 0; + /* Address bits[35:22] sent to L2C */ + bar1_index.s.addr_idx = physical >> 22; + /* Don't put PCI accesses in L2. */ + bar1_index.s.ca = 1; + /* Endian Swap Mode */ + bar1_index.s.end_swp = 1; + /* Set '1' when the selected address range is valid. */ + bar1_index.s.addr_v = 1; + octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index), + bar1_index.u32); + /* An existing mapping will work, use it */ + result = (index << 22) | (physical & ((1 << 22) - 1)); + /* Large BAR1 is offset at 2GB */ + if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) + result += 2ul << 30; + goto done_unlock; + } + } + + pr_err("dma_map_single: " + "Can't find empty BAR1 index for physical mapping 0x%llx\n", + (unsigned long long) physical); + +done_unlock: + spin_unlock_irqrestore(&bar1_lock, flags); +done: + pr_debug("dma_map_single 0x%llx->0x%llx\n", physical, result); + return result; +#endif } void octeon_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr) { - /* Without PCI/PCIe this function can be called for Octeon internal - * devices such as USB. These devices all support 64bit addressing */ +#ifndef CONFIG_PCI + /* + * Without PCI/PCIe this function can be called for Octeon internal + * devices such as USB. These devices all support 64bit addressing. + */ + return; +#else + unsigned long flags; + uint64_t index; + + /* + * Platform devices, such as the internal USB, skip all + * translation and use Octeon physical addresses directly. + */ + if (dev->bus == &platform_bus_type) + return; + + switch (octeon_dma_bar_type) { + case OCTEON_DMA_BAR_TYPE_PCIE: + /* Nothing to do, all mappings are static */ + goto done; + + case OCTEON_DMA_BAR_TYPE_BIG: +#ifdef CONFIG_64BIT + /* Nothing to do for addresses using BAR2 */ + if (dma_addr >= BAR2_PCI_ADDRESS) + goto done; +#endif + if (unlikely(dma_addr < (4ul << 10))) + panic("dma_unmap_single: Unexpect DMA address 0x%llx\n", + dma_addr); + else if (dma_addr < (2ul << 30)) + /* Nothing to do for addresses using BAR0 */ + goto done; + else if (dma_addr < (2ul << 30) + (128ul << 20)) + /* Need to unmap, fall through */ + index = (dma_addr - (2ul << 30)) >> 22; + else if (dma_addr < + (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20)) + goto done; /* Nothing to do for the rest of BAR1 */ + else + panic("dma_unmap_single: Unexpect DMA address 0x%llx\n", + dma_addr); + /* Continued below switch statement */ + break; + + case OCTEON_DMA_BAR_TYPE_SMALL: +#ifdef CONFIG_64BIT + /* Nothing to do for addresses using BAR2 */ + if (dma_addr >= BAR2_PCI_ADDRESS) + goto done; +#endif + index = dma_addr >> 22; + /* Continued below switch statement */ + break; + + default: + panic("dma_unmap_single: Invalid octeon_dma_bar_type\n"); + } + + if (unlikely(index > 31)) + panic("dma_unmap_single: " + "Attempt to unmap an invalid address (0x%llx)\n", + dma_addr); + + spin_lock_irqsave(&bar1_lock, flags); + bar1_state[index].ref_count--; + if (bar1_state[index].ref_count == 0) + octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index), 0); + else if (unlikely(bar1_state[index].ref_count < 0)) + panic("dma_unmap_single: Bar1[%u] reference count < 0\n", + (int) index); + spin_unlock_irqrestore(&bar1_lock, flags); +done: + pr_debug("dma_unmap_single 0x%llx\n", dma_addr); return; +#endif } diff --git a/arch/mips/cavium-octeon/executive/Makefile b/arch/mips/cavium-octeon/executive/Makefile index 80d6cb26766..2fd66db6939 100644 --- a/arch/mips/cavium-octeon/executive/Makefile +++ b/arch/mips/cavium-octeon/executive/Makefile @@ -11,3 +11,4 @@ obj-y += cvmx-bootmem.o cvmx-l2c.o cvmx-sysinfo.o octeon-model.o +obj-$(CONFIG_PCI) += cvmx-helper-errata.o cvmx-helper-jtag.o diff --git a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c index 4f5a08b37cc..25666da17b2 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c +++ b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c @@ -31,6 +31,7 @@ */ #include <linux/kernel.h> +#include <linux/module.h> #include <asm/octeon/cvmx.h> #include <asm/octeon/cvmx-spinlock.h> @@ -97,6 +98,33 @@ void *cvmx_bootmem_alloc(uint64_t size, uint64_t alignment) return cvmx_bootmem_alloc_range(size, alignment, 0, 0); } +void *cvmx_bootmem_alloc_named_range(uint64_t size, uint64_t min_addr, + uint64_t max_addr, uint64_t align, + char *name) +{ + int64_t addr; + + addr = cvmx_bootmem_phy_named_block_alloc(size, min_addr, max_addr, + align, name, 0); + if (addr >= 0) + return cvmx_phys_to_ptr(addr); + else + return NULL; +} + +void *cvmx_bootmem_alloc_named_address(uint64_t size, uint64_t address, + char *name) +{ + return cvmx_bootmem_alloc_named_range(size, address, address + size, + 0, name); +} + +void *cvmx_bootmem_alloc_named(uint64_t size, uint64_t alignment, char *name) +{ + return cvmx_bootmem_alloc_named_range(size, 0, 0, alignment, name); +} +EXPORT_SYMBOL(cvmx_bootmem_alloc_named); + int cvmx_bootmem_free_named(char *name) { return cvmx_bootmem_phy_named_block_free(name, 0); @@ -106,6 +134,7 @@ struct cvmx_bootmem_named_block_desc *cvmx_bootmem_find_named_block(char *name) { return cvmx_bootmem_phy_named_block_find(name, 0); } +EXPORT_SYMBOL(cvmx_bootmem_find_named_block); void cvmx_bootmem_lock(void) { @@ -584,3 +613,78 @@ int cvmx_bootmem_phy_named_block_free(char *name, uint32_t flags) cvmx_bootmem_unlock(); return named_block_ptr != NULL; /* 0 on failure, 1 on success */ } + +int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr, + uint64_t max_addr, + uint64_t alignment, + char *name, + uint32_t flags) +{ + int64_t addr_allocated; + struct cvmx_bootmem_named_block_desc *named_block_desc_ptr; + +#ifdef DEBUG + cvmx_dprintf("cvmx_bootmem_phy_named_block_alloc: size: 0x%llx, min: " + "0x%llx, max: 0x%llx, align: 0x%llx, name: %s\n", + (unsigned long long)size, + (unsigned long long)min_addr, + (unsigned long long)max_addr, + (unsigned long long)alignment, + name); +#endif + if (cvmx_bootmem_desc->major_version != 3) { + cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: " + "%d.%d at addr: %p\n", + (int)cvmx_bootmem_desc->major_version, + (int)cvmx_bootmem_desc->minor_version, + cvmx_bootmem_desc); + return -1; + } + + /* + * Take lock here, as name lookup/block alloc/name add need to + * be atomic. + */ + if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING)) + cvmx_spinlock_lock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock)); + + /* Get pointer to first available named block descriptor */ + named_block_desc_ptr = + cvmx_bootmem_phy_named_block_find(NULL, + flags | CVMX_BOOTMEM_FLAG_NO_LOCKING); + + /* + * Check to see if name already in use, return error if name + * not available or no more room for blocks. + */ + if (cvmx_bootmem_phy_named_block_find(name, + flags | CVMX_BOOTMEM_FLAG_NO_LOCKING) || !named_block_desc_ptr) { + if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING)) + cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock)); + return -1; + } + + + /* + * Round size up to mult of minimum alignment bytes We need + * the actual size allocated to allow for blocks to be + * coallesced when they are freed. The alloc routine does the + * same rounding up on all allocations. + */ + size = __ALIGN_MASK(size, (CVMX_BOOTMEM_ALIGNMENT_SIZE - 1)); + + addr_allocated = cvmx_bootmem_phy_alloc(size, min_addr, max_addr, + alignment, + flags | CVMX_BOOTMEM_FLAG_NO_LOCKING); + if (addr_allocated >= 0) { + named_block_desc_ptr->base_addr = addr_allocated; + named_block_desc_ptr->size = size; + strncpy(named_block_desc_ptr->name, name, + cvmx_bootmem_desc->named_block_name_len); + named_block_desc_ptr->name[cvmx_bootmem_desc->named_block_name_len - 1] = 0; + } + + if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING)) + cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock)); + return addr_allocated; +} diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-errata.c b/arch/mips/cavium-octeon/executive/cvmx-helper-errata.c new file mode 100644 index 00000000000..868659e64d4 --- /dev/null +++ b/arch/mips/cavium-octeon/executive/cvmx-helper-errata.c @@ -0,0 +1,73 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/** + * + * Fixes and workaround for Octeon chip errata. This file + * contains functions called by cvmx-helper to workaround known + * chip errata. For the most part, code doesn't need to call + * these functions directly. + * + */ +#include <linux/module.h> + +#include <asm/octeon/octeon.h> + +#include <asm/octeon/cvmx-helper-jtag.h> + +/** + * Due to errata G-720, the 2nd order CDR circuit on CN52XX pass + * 1 doesn't work properly. The following code disables 2nd order + * CDR for the specified QLM. + * + * @qlm: QLM to disable 2nd order CDR for. + */ +void __cvmx_helper_errata_qlm_disable_2nd_order_cdr(int qlm) +{ + int lane; + cvmx_helper_qlm_jtag_init(); + /* We need to load all four lanes of the QLM, a total of 1072 bits */ + for (lane = 0; lane < 4; lane++) { + /* + * Each lane has 268 bits. We need to set + * cfg_cdr_incx<67:64> = 3 and cfg_cdr_secord<77> = + * 1. All other bits are zero. Bits go in LSB first, + * so start off with the zeros for bits <63:0>. + */ + cvmx_helper_qlm_jtag_shift_zeros(qlm, 63 - 0 + 1); + /* cfg_cdr_incx<67:64>=3 */ + cvmx_helper_qlm_jtag_shift(qlm, 67 - 64 + 1, 3); + /* Zeros for bits <76:68> */ + cvmx_helper_qlm_jtag_shift_zeros(qlm, 76 - 68 + 1); + /* cfg_cdr_secord<77>=1 */ + cvmx_helper_qlm_jtag_shift(qlm, 77 - 77 + 1, 1); + /* Zeros for bits <267:78> */ + cvmx_helper_qlm_jtag_shift_zeros(qlm, 267 - 78 + 1); + } + cvmx_helper_qlm_jtag_update(qlm); +} +EXPORT_SYMBOL(__cvmx_helper_errata_qlm_disable_2nd_order_cdr); diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c b/arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c new file mode 100644 index 00000000000..c1c54890bae --- /dev/null +++ b/arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c @@ -0,0 +1,144 @@ + +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/** + * + * Helper utilities for qlm_jtag. + * + */ + +#include <asm/octeon/octeon.h> +#include <asm/octeon/cvmx-helper-jtag.h> + + +/** + * Initialize the internal QLM JTAG logic to allow programming + * of the JTAG chain by the cvmx_helper_qlm_jtag_*() functions. + * These functions should only be used at the direction of Cavium + * Networks. Programming incorrect values into the JTAG chain + * can cause chip damage. + */ +void cvmx_helper_qlm_jtag_init(void) +{ + union cvmx_ciu_qlm_jtgc jtgc; + uint32_t clock_div = 0; + uint32_t divisor = cvmx_sysinfo_get()->cpu_clock_hz / (25 * 1000000); + divisor = (divisor - 1) >> 2; + /* Convert the divisor into a power of 2 shift */ + while (divisor) { + clock_div++; + divisor = divisor >> 1; + } + + /* + * Clock divider for QLM JTAG operations. eclk is divided by + * 2^(CLK_DIV + 2) + */ + jtgc.u64 = 0; + jtgc.s.clk_div = clock_div; + jtgc.s.mux_sel = 0; + if (OCTEON_IS_MODEL(OCTEON_CN52XX)) + jtgc.s.bypass = 0x3; + else + jtgc.s.bypass = 0xf; + cvmx_write_csr(CVMX_CIU_QLM_JTGC, jtgc.u64); + cvmx_read_csr(CVMX_CIU_QLM_JTGC); +} + +/** + * Write up to 32bits into the QLM jtag chain. Bits are shifted + * into the MSB and out the LSB, so you should shift in the low + * order bits followed by the high order bits. The JTAG chain is + * 4 * 268 bits long, or 1072. + * + * @qlm: QLM to shift value into + * @bits: Number of bits to shift in (1-32). + * @data: Data to shift in. Bit 0 enters the chain first, followed by + * bit 1, etc. + * + * Returns The low order bits of the JTAG chain that shifted out of the + * circle. + */ +uint32_t cvmx_helper_qlm_jtag_shift(int qlm, int bits, uint32_t data) +{ + union cvmx_ciu_qlm_jtgd jtgd; + jtgd.u64 = 0; + jtgd.s.shift = 1; + jtgd.s.shft_cnt = bits - 1; + jtgd.s.shft_reg = data; + if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)) + jtgd.s.select = 1 << qlm; + cvmx_write_csr(CVMX_CIU_QLM_JTGD, jtgd.u64); + do { + jtgd.u64 = cvmx_read_csr(CVMX_CIU_QLM_JTGD); + } while (jtgd.s.shift); + return jtgd.s.shft_reg >> (32 - bits); +} + +/** + * Shift long sequences of zeros into the QLM JTAG chain. It is + * common to need to shift more than 32 bits of zeros into the + * chain. This function is a convience wrapper around + * cvmx_helper_qlm_jtag_shift() to shift more than 32 bits of + * zeros at a time. + * + * @qlm: QLM to shift zeros into + * @bits: + */ +void cvmx_helper_qlm_jtag_shift_zeros(int qlm, int bits) +{ + while (bits > 0) { + int n = bits; + if (n > 32) + n = 32; + cvmx_helper_qlm_jtag_shift(qlm, n, 0); + bits -= n; + } +} + +/** + * Program the QLM JTAG chain into all lanes of the QLM. You must + * have already shifted in 268*4, or 1072 bits into the JTAG + * chain. Updating invalid values can possibly cause chip damage. + * + * @qlm: QLM to program + */ +void cvmx_helper_qlm_jtag_update(int qlm) +{ + union cvmx_ciu_qlm_jtgd jtgd; + + /* Update the new data */ + jtgd.u64 = 0; + jtgd.s.update = 1; + if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)) + jtgd.s.select = 1 << qlm; + cvmx_write_csr(CVMX_CIU_QLM_JTGD, jtgd.u64); + do { + jtgd.u64 = cvmx_read_csr(CVMX_CIU_QLM_JTGD); + } while (jtgd.s.update); +} diff --git a/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c b/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c index 4812370706a..e5838890cba 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c +++ b/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c @@ -29,6 +29,7 @@ * This module provides system/board/application information obtained * by the bootloader. */ +#include <linux/module.h> #include <asm/octeon/cvmx.h> #include <asm/octeon/cvmx-spinlock.h> @@ -69,6 +70,7 @@ struct cvmx_sysinfo *cvmx_sysinfo_get(void) { return &(state.sysinfo); } +EXPORT_SYMBOL(cvmx_sysinfo_get); /** * This function is used in non-simple executive environments (such as diff --git a/arch/mips/cavium-octeon/msi.c b/arch/mips/cavium-octeon/msi.c new file mode 100644 index 00000000000..964b03b75a8 --- /dev/null +++ b/arch/mips/cavium-octeon/msi.c @@ -0,0 +1,288 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2005-2007 Cavium Networks + */ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/msi.h> +#include <linux/spinlock.h> +#include <linux/interrupt.h> + +#include <asm/octeon/octeon.h> +#include <asm/octeon/cvmx-npi-defs.h> +#include <asm/octeon/cvmx-pci-defs.h> +#include <asm/octeon/cvmx-npei-defs.h> +#include <asm/octeon/cvmx-pexp-defs.h> + +#include "pci-common.h" + +/* + * Each bit in msi_free_irq_bitmask represents a MSI interrupt that is + * in use. + */ +static uint64_t msi_free_irq_bitmask; + +/* + * Each bit in msi_multiple_irq_bitmask tells that the device using + * this bit in msi_free_irq_bitmask is also using the next bit. This + * is used so we can disable all of the MSI interrupts when a device + * uses multiple. + */ +static uint64_t msi_multiple_irq_bitmask; + +/* + * This lock controls updates to msi_free_irq_bitmask and + * msi_multiple_irq_bitmask. + */ +static DEFINE_SPINLOCK(msi_free_irq_bitmask_lock); + + +/** + * Called when a driver request MSI interrupts instead of the + * legacy INT A-D. This routine will allocate multiple interrupts + * for MSI devices that support them. A device can override this by + * programming the MSI control bits [6:4] before calling + * pci_enable_msi(). + * + * @param dev Device requesting MSI interrupts + * @param desc MSI descriptor + * + * Returns 0 on success. + */ +int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) +{ + struct msi_msg msg; + uint16_t control; + int configured_private_bits; + int request_private_bits; + int irq; + int irq_step; + uint64_t search_mask; + + /* + * Read the MSI config to figure out how many IRQs this device + * wants. Most devices only want 1, which will give + * configured_private_bits and request_private_bits equal 0. + */ + pci_read_config_word(dev, desc->msi_attrib.pos + PCI_MSI_FLAGS, + &control); + + /* + * If the number of private bits has been configured then use + * that value instead of the requested number. This gives the + * driver the chance to override the number of interrupts + * before calling pci_enable_msi(). + */ + configured_private_bits = (control & PCI_MSI_FLAGS_QSIZE) >> 4; + if (configured_private_bits == 0) { + /* Nothing is configured, so use the hardware requested size */ + request_private_bits = (control & PCI_MSI_FLAGS_QMASK) >> 1; + } else { + /* + * Use the number of configured bits, assuming the + * driver wanted to override the hardware request + * value. + */ + request_private_bits = configured_private_bits; + } + + /* + * The PCI 2.3 spec mandates that there are at most 32 + * interrupts. If this device asks for more, only give it one. + */ + if (request_private_bits > 5) + request_private_bits = 0; + +try_only_one: + /* + * The IRQs have to be aligned on a power of two based on the + * number being requested. + */ + irq_step = 1 << request_private_bits; + + /* Mask with one bit for each IRQ */ + search_mask = (1 << irq_step) - 1; + + /* + * We're going to search msi_free_irq_bitmask_lock for zero + * bits. This represents an MSI interrupt number that isn't in + * use. + */ + spin_lock(&msi_free_irq_bitmask_lock); + for (irq = 0; irq < 64; irq += irq_step) { + if ((msi_free_irq_bitmask & (search_mask << irq)) == 0) { + msi_free_irq_bitmask |= search_mask << irq; + msi_multiple_irq_bitmask |= (search_mask >> 1) << irq; + break; + } + } + spin_unlock(&msi_free_irq_bitmask_lock); + + /* Make sure the search for available interrupts didn't fail */ + if (irq >= 64) { + if (request_private_bits) { + pr_err("arch_setup_msi_irq: Unable to find %d free " + "interrupts, trying just one", + 1 << request_private_bits); + request_private_bits = 0; + goto try_only_one; + } else + panic("arch_setup_msi_irq: Unable to find a free MSI " + "interrupt"); + } + + /* MSI interrupts start at logical IRQ OCTEON_IRQ_MSI_BIT0 */ + irq += OCTEON_IRQ_MSI_BIT0; + + switch (octeon_dma_bar_type) { + case OCTEON_DMA_BAR_TYPE_SMALL: + /* When not using big bar, Bar 0 is based at 128MB */ + msg.address_lo = + ((128ul << 20) + CVMX_PCI_MSI_RCV) & 0xffffffff; + msg.address_hi = ((128ul << 20) + CVMX_PCI_MSI_RCV) >> 32; + case OCTEON_DMA_BAR_TYPE_BIG: + /* When using big bar, Bar 0 is based at 0 */ + msg.address_lo = (0 + CVMX_PCI_MSI_RCV) & 0xffffffff; + msg.address_hi = (0 + CVMX_PCI_MSI_RCV) >> 32; + break; + case OCTEON_DMA_BAR_TYPE_PCIE: + /* When using PCIe, Bar 0 is based at 0 */ + /* FIXME CVMX_NPEI_MSI_RCV* other than 0? */ + msg.address_lo = (0 + CVMX_NPEI_PCIE_MSI_RCV) & 0xffffffff; + msg.address_hi = (0 + CVMX_NPEI_PCIE_MSI_RCV) >> 32; + break; + default: + panic("arch_setup_msi_irq: Invalid octeon_dma_bar_type\n"); + } + msg.data = irq - OCTEON_IRQ_MSI_BIT0; + + /* Update the number of IRQs the device has available to it */ + control &= ~PCI_MSI_FLAGS_QSIZE; + control |= request_private_bits << 4; + pci_write_config_word(dev, desc->msi_attrib.pos + PCI_MSI_FLAGS, + control); + + set_irq_msi(irq, desc); + write_msi_msg(irq, &msg); + return 0; +} + + +/** + * Called when a device no longer needs its MSI interrupts. All + * MSI interrupts for the device are freed. + * + * @irq: The devices first irq number. There may be multple in sequence. + */ +void arch_teardown_msi_irq(unsigned int irq) +{ + int number_irqs; + uint64_t bitmask; + + if ((irq < OCTEON_IRQ_MSI_BIT0) || (irq > OCTEON_IRQ_MSI_BIT63)) + panic("arch_teardown_msi_irq: Attempted to teardown illegal " + "MSI interrupt (%d)", irq); + irq -= OCTEON_IRQ_MSI_BIT0; + + /* + * Count the number of IRQs we need to free by looking at the + * msi_multiple_irq_bitmask. Each bit set means that the next + * IRQ is also owned by this device. + */ + number_irqs = 0; + while ((irq+number_irqs < 64) && + (msi_multiple_irq_bitmask & (1ull << (irq + number_irqs)))) + number_irqs++; + number_irqs++; + /* Mask with one bit for each IRQ */ + bitmask = (1 << number_irqs) - 1; + /* Shift the mask to the correct bit location */ + bitmask <<= irq; + if ((msi_free_irq_bitmask & bitmask) != bitmask) + panic("arch_teardown_msi_irq: Attempted to teardown MSI " + "interrupt (%d) not in use", irq); + + /* Checks are done, update the in use bitmask */ + spin_lock(&msi_free_irq_bitmask_lock); + msi_free_irq_bitmask &= ~bitmask; + msi_multiple_irq_bitmask &= ~bitmask; + spin_unlock(&msi_free_irq_bitmask_lock); +} + + +/** + * Called by the interrupt handling code when an MSI interrupt + * occurs. + * + * @param cpl + * @param dev_id + * + * @return + */ +static irqreturn_t octeon_msi_interrupt(int cpl, void *dev_id) +{ + uint64_t msi_bits; + int irq; + + if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) + msi_bits = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_RCV0); + else + msi_bits = cvmx_read_csr(CVMX_NPI_NPI_MSI_RCV); + irq = fls64(msi_bits); + if (irq) { + irq += OCTEON_IRQ_MSI_BIT0 - 1; + if (irq_desc[irq].action) { + do_IRQ(irq); + return IRQ_HANDLED; + } else { + pr_err("Spurious MSI interrupt %d\n", irq); + if (octeon_has_feature(OCTEON_FEATURE_PCIE)) { + /* These chips have PCIe */ + cvmx_write_csr(CVMX_PEXP_NPEI_MSI_RCV0, + 1ull << (irq - + OCTEON_IRQ_MSI_BIT0)); + } else { + /* These chips have PCI */ + cvmx_write_csr(CVMX_NPI_NPI_MSI_RCV, + 1ull << (irq - + OCTEON_IRQ_MSI_BIT0)); + } + } + } + return IRQ_NONE; +} + + +/** + * Initializes the MSI interrupt handling code + * + * @return + */ +int octeon_msi_initialize(void) +{ + int r; + if (octeon_has_feature(OCTEON_FEATURE_PCIE)) { + r = request_irq(OCTEON_IRQ_PCI_MSI0, octeon_msi_interrupt, + IRQF_SHARED, + "MSI[0:63]", octeon_msi_interrupt); + } else if (octeon_is_pci_host()) { + r = request_irq(OCTEON_IRQ_PCI_MSI0, octeon_msi_interrupt, + IRQF_SHARED, + "MSI[0:15]", octeon_msi_interrupt); + r += request_irq(OCTEON_IRQ_PCI_MSI1, octeon_msi_interrupt, + IRQF_SHARED, + "MSI[16:31]", octeon_msi_interrupt); + r += request_irq(OCTEON_IRQ_PCI_MSI2, octeon_msi_interrupt, + IRQF_SHARED, + "MSI[32:47]", octeon_msi_interrupt); + r += request_irq(OCTEON_IRQ_PCI_MSI3, octeon_msi_interrupt, + IRQF_SHARED, + "MSI[48:63]", octeon_msi_interrupt); + } + return 0; +} + +subsys_initcall(octeon_msi_initialize); diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index d3a0c8154be..8dfa009e007 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c @@ -10,6 +10,8 @@ #include <linux/hardirq.h> #include <asm/octeon/octeon.h> +#include <asm/octeon/cvmx-pexp-defs.h> +#include <asm/octeon/cvmx-npi-defs.h> DEFINE_RWLOCK(octeon_irq_ciu0_rwlock); DEFINE_RWLOCK(octeon_irq_ciu1_rwlock); diff --git a/arch/mips/cavium-octeon/pci-common.c b/arch/mips/cavium-octeon/pci-common.c new file mode 100644 index 00000000000..cd029f88da7 --- /dev/null +++ b/arch/mips/cavium-octeon/pci-common.c @@ -0,0 +1,137 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2005-2007 Cavium Networks + */ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/time.h> +#include <linux/delay.h> +#include "pci-common.h" + +typeof(pcibios_map_irq) *octeon_pcibios_map_irq; +enum octeon_dma_bar_type octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_INVALID; + +/** + * Map a PCI device to the appropriate interrupt line + * + * @param dev The Linux PCI device structure for the device to map + * @param slot The slot number for this device on __BUS 0__. Linux + * enumerates through all the bridges and figures out the + * slot on Bus 0 where this device eventually hooks to. + * @param pin The PCI interrupt pin read from the device, then swizzled + * as it goes through each bridge. + * @return Interrupt number for the device + */ +int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + if (octeon_pcibios_map_irq) + return octeon_pcibios_map_irq(dev, slot, pin); + else + panic("octeon_pcibios_map_irq doesn't point to a " + "pcibios_map_irq() function"); +} + + +/** + * Called to perform platform specific PCI setup + * + * @param dev + * @return + */ +int pcibios_plat_dev_init(struct pci_dev *dev) +{ + uint16_t config; + uint32_t dconfig; + int pos; + /* + * Force the Cache line setting to 64 bytes. The standard + * Linux bus scan doesn't seem to set it. Octeon really has + * 128 byte lines, but Intel bridges get really upset if you + * try and set values above 64 bytes. Value is specified in + * 32bit words. + */ + pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 64 / 4); + /* Set latency timers for all devices */ + pci_write_config_byte(dev, PCI_LATENCY_TIMER, 48); + + /* Enable reporting System errors and parity errors on all devices */ + /* Enable parity checking and error reporting */ + pci_read_config_word(dev, PCI_COMMAND, &config); + config |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR; + pci_write_config_word(dev, PCI_COMMAND, config); + + if (dev->subordinate) { + /* Set latency timers on sub bridges */ + pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 48); + /* More bridge error detection */ + pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &config); + config |= PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR; + pci_write_config_word(dev, PCI_BRIDGE_CONTROL, config); + } + + /* Enable the PCIe normal error reporting */ + pos = pci_find_capability(dev, PCI_CAP_ID_EXP); + if (pos) { + /* Update Device Control */ + pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &config); + /* Correctable Error Reporting */ + config |= PCI_EXP_DEVCTL_CERE; + /* Non-Fatal Error Reporting */ + config |= PCI_EXP_DEVCTL_NFERE; + /* Fatal Error Reporting */ + config |= PCI_EXP_DEVCTL_FERE; + /* Unsupported Request */ + config |= PCI_EXP_DEVCTL_URRE; + pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, config); + } + + /* Find the Advanced Error Reporting capability */ + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); + if (pos) { + /* Clear Uncorrectable Error Status */ + pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, + &dconfig); + pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, + dconfig); + /* Enable reporting of all uncorrectable errors */ + /* Uncorrectable Error Mask - turned on bits disable errors */ + pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, 0); + /* + * Leave severity at HW default. This only controls if + * errors are reported as uncorrectable or + * correctable, not if the error is reported. + */ + /* PCI_ERR_UNCOR_SEVER - Uncorrectable Error Severity */ + /* Clear Correctable Error Status */ + pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &dconfig); + pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, dconfig); + /* Enable reporting of all correctable errors */ + /* Correctable Error Mask - turned on bits disable errors */ + pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, 0); + /* Advanced Error Capabilities */ + pci_read_config_dword(dev, pos + PCI_ERR_CAP, &dconfig); + /* ECRC Generation Enable */ + if (config & PCI_ERR_CAP_ECRC_GENC) + config |= PCI_ERR_CAP_ECRC_GENE; + /* ECRC Check Enable */ + if (config & PCI_ERR_CAP_ECRC_CHKC) + config |= PCI_ERR_CAP_ECRC_CHKE; + pci_write_config_dword(dev, pos + PCI_ERR_CAP, dconfig); + /* PCI_ERR_HEADER_LOG - Header Log Register (16 bytes) */ + /* Report all errors to the root complex */ + pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, + PCI_ERR_ROOT_CMD_COR_EN | + PCI_ERR_ROOT_CMD_NONFATAL_EN | + PCI_ERR_ROOT_CMD_FATAL_EN); + /* Clear the Root status register */ + pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &dconfig); + pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, dconfig); + } + + return 0; +} diff --git a/arch/mips/cavium-octeon/pci-common.h b/arch/mips/cavium-octeon/pci-common.h new file mode 100644 index 00000000000..74ae79991e4 --- /dev/null +++ b/arch/mips/cavium-octeon/pci-common.h @@ -0,0 +1,39 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2005-2007 Cavium Networks + */ +#ifndef __OCTEON_PCI_COMMON_H__ +#define __OCTEON_PCI_COMMON_H__ + +#include <linux/pci.h> + +/* Some PCI cards require delays when accessing config space. */ +#define PCI_CONFIG_SPACE_DELAY 10000 + +/* pcibios_map_irq() is defined inside pci-common.c. All it does is call the + Octeon specific version pointed to by this variable. This function needs to + change for PCI or PCIe based hosts */ +extern typeof(pcibios_map_irq) *octeon_pcibios_map_irq; + +/* The following defines are only used when octeon_dma_bar_type = + OCTEON_DMA_BAR_TYPE_BIG */ +#define OCTEON_PCI_BAR1_HOLE_BITS 5 +#define OCTEON_PCI_BAR1_HOLE_SIZE (1ul<<(OCTEON_PCI_BAR1_HOLE_BITS+3)) + +enum octeon_dma_bar_type { + OCTEON_DMA_BAR_TYPE_INVALID, + OCTEON_DMA_BAR_TYPE_SMALL, + OCTEON_DMA_BAR_TYPE_BIG, + OCTEON_DMA_BAR_TYPE_PCIE +}; + +/** + * This is a variable to tell the DMA mapping system in dma-octeon.c + * how to map PCI DMA addresses. + */ +extern enum octeon_dma_bar_type octeon_dma_bar_type; + +#endif diff --git a/arch/mips/cavium-octeon/pci.c b/arch/mips/cavium-octeon/pci.c new file mode 100644 index 00000000000..67c0ff5e92f --- /dev/null +++ b/arch/mips/cavium-octeon/pci.c @@ -0,0 +1,568 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2005-2007 Cavium Networks + */ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/time.h> +#include <linux/delay.h> + +#include <asm/time.h> + +#include <asm/octeon/octeon.h> +#include <asm/octeon/cvmx-npi-defs.h> +#include <asm/octeon/cvmx-pci-defs.h> + +#include "pci-common.h" + +#define USE_OCTEON_INTERNAL_ARBITER + +/* + * Octeon's PCI controller uses did=3, subdid=2 for PCI IO + * addresses. Use PCI endian swapping 1 so no address swapping is + * necessary. The Linux io routines will endian swap the data. + */ +#define OCTEON_PCI_IOSPACE_BASE 0x80011a0400000000ull +#define OCTEON_PCI_IOSPACE_SIZE (1ull<<32) + +/* Octeon't PCI controller uses did=3, subdid=3 for PCI memory. */ +#define OCTEON_PCI_MEMSPACE_OFFSET (0x00011b0000000000ull) + +/** + * This is the bit decoding used for the Octeon PCI controller addresses + */ +union octeon_pci_address { + uint64_t u64; + struct { + uint64_t upper:2; + uint64_t reserved:13; + uint64_t io:1; + uint64_t did:5; + uint64_t subdid:3; + uint64_t reserved2:4; + uint64_t endian_swap:2; + uint64_t reserved3:10; + uint64_t bus:8; + uint64_t dev:5; + uint64_t func:3; + uint64_t reg:8; + } s; +}; + +/** + * Return the mapping of PCI device number to IRQ line. Each + * character in the return string represents the interrupt + * line for the device at that position. Device 1 maps to the + * first character, etc. The characters A-D are used for PCI + * interrupts. + * + * Returns PCI interrupt mapping + */ +const char *octeon_get_pci_interrupts(void) +{ + /* + * Returning an empty string causes the interrupts to be + * routed based on the PCI specification. From the PCI spec: + * + * INTA# of Device Number 0 is connected to IRQW on the system + * board. (Device Number has no significance regarding being + * located on the system board or in a connector.) INTA# of + * Device Number 1 is connected to IRQX on the system + * board. INTA# of Device Number 2 is connected to IRQY on the + * system board. INTA# of Device Number 3 is connected to IRQZ + * on the system board. The table below describes how each + * agent's INTx# lines are connected to the system board + * interrupt lines. The following equation can be used to + * determine to which INTx# signal on the system board a given + * device's INTx# line(s) is connected. + * + * MB = (D + I) MOD 4 MB = System board Interrupt (IRQW = 0, + * IRQX = 1, IRQY = 2, and IRQZ = 3) D = Device Number I = + * Interrupt Number (INTA# = 0, INTB# = 1, INTC# = 2, and + * INTD# = 3) + */ + switch (octeon_bootinfo->board_type) { + case CVMX_BOARD_TYPE_NAO38: + /* This is really the NAC38 */ + return "AAAAADABAAAAAAAAAAAAAAAAAAAAAAAA"; + case CVMX_BOARD_TYPE_THUNDER: + return ""; + case CVMX_BOARD_TYPE_EBH3000: + return ""; + case CVMX_BOARD_TYPE_EBH3100: + case CVMX_BOARD_TYPE_CN3010_EVB_HS5: + case CVMX_BOARD_TYPE_CN3005_EVB_HS5: + return "AAABAAAAAAAAAAAAAAAAAAAAAAAAAAAA"; + case CVMX_BOARD_TYPE_BBGW_REF: + return "AABCD"; + default: + return ""; + } +} + +/** + * Map a PCI device to the appropriate interrupt line + * + * @dev: The Linux PCI device structure for the device to map + * @slot: The slot number for this device on __BUS 0__. Linux + * enumerates through all the bridges and figures out the + * slot on Bus 0 where this device eventually hooks to. + * @pin: The PCI interrupt pin read from the device, then swizzled + * as it goes through each bridge. + * Returns Interrupt number for the device + */ +int __init octeon_pci_pcibios_map_irq(const struct pci_dev *dev, + u8 slot, u8 pin) +{ + int irq_num; + const char *interrupts; + int dev_num; + + /* Get the board specific interrupt mapping */ + interrupts = octeon_get_pci_interrupts(); + + dev_num = dev->devfn >> 3; + if (dev_num < strlen(interrupts)) + irq_num = ((interrupts[dev_num] - 'A' + pin - 1) & 3) + + OCTEON_IRQ_PCI_INT0; + else + irq_num = ((slot + pin - 3) & 3) + OCTEON_IRQ_PCI_INT0; + return irq_num; +} + + +/** + * Read a value from configuration space + * + */ +static int octeon_read_config(struct pci_bus *bus, unsigned int devfn, + int reg, int size, u32 *val) +{ + union octeon_pci_address pci_addr; + + pci_addr.u64 = 0; + pci_addr.s.upper = 2; + pci_addr.s.io = 1; + pci_addr.s.did = 3; + pci_addr.s.subdid = 1; + pci_addr.s.endian_swap = 1; + pci_addr.s.bus = bus->number; + pci_addr.s.dev = devfn >> 3; + pci_addr.s.func = devfn & 0x7; + pci_addr.s.reg = reg; + +#if PCI_CONFIG_SPACE_DELAY + udelay(PCI_CONFIG_SPACE_DELAY); +#endif + switch (size) { + case 4: + *val = le32_to_cpu(cvmx_read64_uint32(pci_addr.u64)); + return PCIBIOS_SUCCESSFUL; + case 2: + *val = le16_to_cpu(cvmx_read64_uint16(pci_addr.u64)); + return PCIBIOS_SUCCESSFUL; + case 1: + *val = cvmx_read64_uint8(pci_addr.u64); + return PCIBIOS_SUCCESSFUL; + } + return PCIBIOS_FUNC_NOT_SUPPORTED; +} + + +/** + * Write a value to PCI configuration space + * + * @bus: + * @devfn: + * @reg: + * @size: + * @val: + * Returns + */ +static int octeon_write_config(struct pci_bus *bus, unsigned int devfn, + int reg, int size, u32 val) +{ + union octeon_pci_address pci_addr; + + pci_addr.u64 = 0; + pci_addr.s.upper = 2; + pci_addr.s.io = 1; + pci_addr.s.did = 3; + pci_addr.s.subdid = 1; + pci_addr.s.endian_swap = 1; + pci_addr.s.bus = bus->number; + pci_addr.s.dev = devfn >> 3; + pci_addr.s.func = devfn & 0x7; + pci_addr.s.reg = reg; + +#if PCI_CONFIG_SPACE_DELAY + udelay(PCI_CONFIG_SPACE_DELAY); +#endif + switch (size) { + case 4: + cvmx_write64_uint32(pci_addr.u64, cpu_to_le32(val)); + return PCIBIOS_SUCCESSFUL; + case 2: + cvmx_write64_uint16(pci_addr.u64, cpu_to_le16(val)); + return PCIBIOS_SUCCESSFUL; + case 1: + cvmx_write64_uint8(pci_addr.u64, val); + return PCIBIOS_SUCCESSFUL; + } + return PCIBIOS_FUNC_NOT_SUPPORTED; +} + + +static struct pci_ops octeon_pci_ops = { + octeon_read_config, + octeon_write_config, +}; + +static struct resource octeon_pci_mem_resource = { + .start = 0, + .end = 0, + .name = "Octeon PCI MEM", + .flags = IORESOURCE_MEM, +}; + +/* + * PCI ports must be above 16KB so the ISA bus filtering in the PCI-X to PCI + * bridge + */ +static struct resource octeon_pci_io_resource = { + .start = 0x4000, + .end = OCTEON_PCI_IOSPACE_SIZE - 1, + .name = "Octeon PCI IO", + .flags = IORESOURCE_IO, +}; + +static struct pci_controller octeon_pci_controller = { + .pci_ops = &octeon_pci_ops, + .mem_resource = &octeon_pci_mem_resource, + .mem_offset = OCTEON_PCI_MEMSPACE_OFFSET, + .io_resource = &octeon_pci_io_resource, + .io_offset = 0, + .io_map_base = OCTEON_PCI_IOSPACE_BASE, +}; + + +/** + * Low level initialize the Octeon PCI controller + * + * Returns + */ +static void octeon_pci_initialize(void) +{ + union cvmx_pci_cfg01 cfg01; + union cvmx_npi_ctl_status ctl_status; + union cvmx_pci_ctl_status_2 ctl_status_2; + union cvmx_pci_cfg19 cfg19; + union cvmx_pci_cfg16 cfg16; + union cvmx_pci_cfg22 cfg22; + union cvmx_pci_cfg56 cfg56; + + /* Reset the PCI Bus */ + cvmx_write_csr(CVMX_CIU_SOFT_PRST, 0x1); + cvmx_read_csr(CVMX_CIU_SOFT_PRST); + + udelay(2000); /* Hold PCI reset for 2 ms */ + + ctl_status.u64 = 0; /* cvmx_read_csr(CVMX_NPI_CTL_STATUS); */ + ctl_status.s.max_word = 1; + ctl_status.s.timer = 1; + cvmx_write_csr(CVMX_NPI_CTL_STATUS, ctl_status.u64); + + /* Deassert PCI reset and advertize PCX Host Mode Device Capability + (64b) */ + cvmx_write_csr(CVMX_CIU_SOFT_PRST, 0x4); + cvmx_read_csr(CVMX_CIU_SOFT_PRST); + + udelay(2000); /* Wait 2 ms after deasserting PCI reset */ + + ctl_status_2.u32 = 0; + ctl_status_2.s.tsr_hwm = 1; /* Initializes to 0. Must be set + before any PCI reads. */ + ctl_status_2.s.bar2pres = 1; /* Enable BAR2 */ + ctl_status_2.s.bar2_enb = 1; + ctl_status_2.s.bar2_cax = 1; /* Don't use L2 */ + ctl_status_2.s.bar2_esx = 1; + ctl_status_2.s.pmo_amod = 1; /* Round robin priority */ + if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) { + /* BAR1 hole */ + ctl_status_2.s.bb1_hole = OCTEON_PCI_BAR1_HOLE_BITS; + ctl_status_2.s.bb1_siz = 1; /* BAR1 is 2GB */ + ctl_status_2.s.bb_ca = 1; /* Don't use L2 with big bars */ + ctl_status_2.s.bb_es = 1; /* Big bar in byte swap mode */ + ctl_status_2.s.bb1 = 1; /* BAR1 is big */ + ctl_status_2.s.bb0 = 1; /* BAR0 is big */ + } + + octeon_npi_write32(CVMX_NPI_PCI_CTL_STATUS_2, ctl_status_2.u32); + udelay(2000); /* Wait 2 ms before doing PCI reads */ + + ctl_status_2.u32 = octeon_npi_read32(CVMX_NPI_PCI_CTL_STATUS_2); + pr_notice("PCI Status: %s %s-bit\n", + ctl_status_2.s.ap_pcix ? "PCI-X" : "PCI", + ctl_status_2.s.ap_64ad ? "64" : "32"); + + if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)) { + union cvmx_pci_cnt_reg cnt_reg_start; + union cvmx_pci_cnt_reg cnt_reg_end; + unsigned long cycles, pci_clock; + + cnt_reg_start.u64 = cvmx_read_csr(CVMX_NPI_PCI_CNT_REG); + cycles = read_c0_cvmcount(); + udelay(1000); + cnt_reg_end.u64 = cvmx_read_csr(CVMX_NPI_PCI_CNT_REG); + cycles = read_c0_cvmcount() - cycles; + pci_clock = (cnt_reg_end.s.pcicnt - cnt_reg_start.s.pcicnt) / + (cycles / (mips_hpt_frequency / 1000000)); + pr_notice("PCI Clock: %lu MHz\n", pci_clock); + } + + /* + * TDOMC must be set to one in PCI mode. TDOMC should be set to 4 + * in PCI-X mode to allow four oustanding splits. Otherwise, + * should not change from its reset value. Don't write PCI_CFG19 + * in PCI mode (0x82000001 reset value), write it to 0x82000004 + * after PCI-X mode is known. MRBCI,MDWE,MDRE -> must be zero. + * MRBCM -> must be one. + */ + if (ctl_status_2.s.ap_pcix) { + cfg19.u32 = 0; + /* + * Target Delayed/Split request outstanding maximum + * count. [1..31] and 0=32. NOTE: If the user + * programs these bits beyond the Designed Maximum + * outstanding count, then the designed maximum table + * depth will be used instead. No additional + * Deferred/Split transactions will be accepted if + * this outstanding maximum count is + * reached. Furthermore, no additional deferred/split + * transactions will be accepted if the I/O delay/ I/O + * Split Request outstanding maximum is reached. + */ + cfg19.s.tdomc = 4; + /* + * Master Deferred Read Request Outstanding Max Count + * (PCI only). CR4C[26:24] Max SAC cycles MAX DAC + * cycles 000 8 4 001 1 0 010 2 1 011 3 1 100 4 2 101 + * 5 2 110 6 3 111 7 3 For example, if these bits are + * programmed to 100, the core can support 2 DAC + * cycles, 4 SAC cycles or a combination of 1 DAC and + * 2 SAC cycles. NOTE: For the PCI-X maximum + * outstanding split transactions, refer to + * CRE0[22:20]. + */ + cfg19.s.mdrrmc = 2; + /* + * Master Request (Memory Read) Byte Count/Byte Enable + * select. 0 = Byte Enables valid. In PCI mode, a + * burst transaction cannot be performed using Memory + * Read command=4?h6. 1 = DWORD Byte Count valid + * (default). In PCI Mode, the memory read byte + * enables are automatically generated by the + * core. Note: N3 Master Request transaction sizes are + * always determined through the + * am_attr[<35:32>|<7:0>] field. + */ + cfg19.s.mrbcm = 1; + octeon_npi_write32(CVMX_NPI_PCI_CFG19, cfg19.u32); + } + + + cfg01.u32 = 0; + cfg01.s.msae = 1; /* Memory Space Access Enable */ + cfg01.s.me = 1; /* Master Enable */ + cfg01.s.pee = 1; /* PERR# Enable */ + cfg01.s.see = 1; /* System Error Enable */ + cfg01.s.fbbe = 1; /* Fast Back to Back Transaction Enable */ + + octeon_npi_write32(CVMX_NPI_PCI_CFG01, cfg01.u32); + +#ifdef USE_OCTEON_INTERNAL_ARBITER + /* + * When OCTEON is a PCI host, most systems will use OCTEON's + * internal arbiter, so must enable it before any PCI/PCI-X + * traffic can occur. + */ + { + union cvmx_npi_pci_int_arb_cfg pci_int_arb_cfg; + + pci_int_arb_cfg.u64 = 0; + pci_int_arb_cfg.s.en = 1; /* Internal arbiter enable */ + cvmx_write_csr(CVMX_NPI_PCI_INT_ARB_CFG, pci_int_arb_cfg.u64); + } +#endif /* USE_OCTEON_INTERNAL_ARBITER */ + + /* + * Preferrably written to 1 to set MLTD. [RDSATI,TRTAE, + * TWTAE,TMAE,DPPMR -> must be zero. TILT -> must not be set to + * 1..7. + */ + cfg16.u32 = 0; + cfg16.s.mltd = 1; /* Master Latency Timer Disable */ + octeon_npi_write32(CVMX_NPI_PCI_CFG16, cfg16.u32); + + /* + * Should be written to 0x4ff00. MTTV -> must be zero. + * FLUSH -> must be 1. MRV -> should be 0xFF. + */ + cfg22.u32 = 0; + /* Master Retry Value [1..255] and 0=infinite */ + cfg22.s.mrv = 0xff; + /* + * AM_DO_FLUSH_I control NOTE: This bit MUST BE ONE for proper + * N3K operation. + */ + cfg22.s.flush = 1; + octeon_npi_write32(CVMX_NPI_PCI_CFG22, cfg22.u32); + + /* + * MOST Indicates the maximum number of outstanding splits (in -1 + * notation) when OCTEON is in PCI-X mode. PCI-X performance is + * affected by the MOST selection. Should generally be written + * with one of 0x3be807, 0x2be807, 0x1be807, or 0x0be807, + * depending on the desired MOST of 3, 2, 1, or 0, respectively. + */ + cfg56.u32 = 0; + cfg56.s.pxcid = 7; /* RO - PCI-X Capability ID */ + cfg56.s.ncp = 0xe8; /* RO - Next Capability Pointer */ + cfg56.s.dpere = 1; /* Data Parity Error Recovery Enable */ + cfg56.s.roe = 1; /* Relaxed Ordering Enable */ + cfg56.s.mmbc = 1; /* Maximum Memory Byte Count + [0=512B,1=1024B,2=2048B,3=4096B] */ + cfg56.s.most = 3; /* Maximum outstanding Split transactions [0=1 + .. 7=32] */ + + octeon_npi_write32(CVMX_NPI_PCI_CFG56, cfg56.u32); + + /* + * Affects PCI performance when OCTEON services reads to its + * BAR1/BAR2. Refer to Section 10.6.1. The recommended values are + * 0x22, 0x33, and 0x33 for PCI_READ_CMD_6, PCI_READ_CMD_C, and + * PCI_READ_CMD_E, respectively. Unfortunately due to errata DDR-700, + * these values need to be changed so they won't possibly prefetch off + * of the end of memory if PCI is DMAing a buffer at the end of + * memory. Note that these values differ from their reset values. + */ + octeon_npi_write32(CVMX_NPI_PCI_READ_CMD_6, 0x21); + octeon_npi_write32(CVMX_NPI_PCI_READ_CMD_C, 0x31); + octeon_npi_write32(CVMX_NPI_PCI_READ_CMD_E, 0x31); +} + + +/** + * Initialize the Octeon PCI controller + * + * Returns + */ +static int __init octeon_pci_setup(void) +{ + union cvmx_npi_mem_access_subidx mem_access; + int index; + + /* Only these chips have PCI */ + if (octeon_has_feature(OCTEON_FEATURE_PCIE)) + return 0; + + /* Point pcibios_map_irq() to the PCI version of it */ + octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq; + + /* Only use the big bars on chips that support it */ + if (OCTEON_IS_MODEL(OCTEON_CN31XX) || + OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2) || + OCTEON_IS_MODEL(OCTEON_CN38XX_PASS1)) + octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_SMALL; + else + octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG; + + /* PCI I/O and PCI MEM values */ + set_io_port_base(OCTEON_PCI_IOSPACE_BASE); + ioport_resource.start = 0; + ioport_resource.end = OCTEON_PCI_IOSPACE_SIZE - 1; + if (!octeon_is_pci_host()) { + pr_notice("Not in host mode, PCI Controller not initialized\n"); + return 0; + } + + pr_notice("%s Octeon big bar support\n", + (octeon_dma_bar_type == + OCTEON_DMA_BAR_TYPE_BIG) ? "Enabling" : "Disabling"); + + octeon_pci_initialize(); + + mem_access.u64 = 0; + mem_access.s.esr = 1; /* Endian-Swap on read. */ + mem_access.s.esw = 1; /* Endian-Swap on write. */ + mem_access.s.nsr = 0; /* No-Snoop on read. */ + mem_access.s.nsw = 0; /* No-Snoop on write. */ + mem_access.s.ror = 0; /* Relax Read on read. */ + mem_access.s.row = 0; /* Relax Order on write. */ + mem_access.s.ba = 0; /* PCI Address bits [63:36]. */ + cvmx_write_csr(CVMX_NPI_MEM_ACCESS_SUBID3, mem_access.u64); + + /* + * Remap the Octeon BAR 2 above all 32 bit devices + * (0x8000000000ul). This is done here so it is remapped + * before the readl()'s below. We don't want BAR2 overlapping + * with BAR0/BAR1 during these reads. + */ + octeon_npi_write32(CVMX_NPI_PCI_CFG08, 0); + octeon_npi_write32(CVMX_NPI_PCI_CFG09, 0x80); + + /* Disable the BAR1 movable mappings */ + for (index = 0; index < 32; index++) + octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index), 0); + + if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) { + /* Remap the Octeon BAR 0 to 0-2GB */ + octeon_npi_write32(CVMX_NPI_PCI_CFG04, 0); + octeon_npi_write32(CVMX_NPI_PCI_CFG05, 0); + + /* + * Remap the Octeon BAR 1 to map 2GB-4GB (minus the + * BAR 1 hole). + */ + octeon_npi_write32(CVMX_NPI_PCI_CFG06, 2ul << 30); + octeon_npi_write32(CVMX_NPI_PCI_CFG07, 0); + + /* Devices go after BAR1 */ + octeon_pci_mem_resource.start = + OCTEON_PCI_MEMSPACE_OFFSET + (4ul << 30) - + (OCTEON_PCI_BAR1_HOLE_SIZE << 20); + octeon_pci_mem_resource.end = + octeon_pci_mem_resource.start + (1ul << 30); + } else { + /* Remap the Octeon BAR 0 to map 128MB-(128MB+4KB) */ + octeon_npi_write32(CVMX_NPI_PCI_CFG04, 128ul << 20); + octeon_npi_write32(CVMX_NPI_PCI_CFG05, 0); + + /* Remap the Octeon BAR 1 to map 0-128MB */ + octeon_npi_write32(CVMX_NPI_PCI_CFG06, 0); + octeon_npi_write32(CVMX_NPI_PCI_CFG07, 0); + + /* Devices go after BAR0 */ + octeon_pci_mem_resource.start = + OCTEON_PCI_MEMSPACE_OFFSET + (128ul << 20) + + (4ul << 10); + octeon_pci_mem_resource.end = + octeon_pci_mem_resource.start + (1ul << 30); + } + + register_pci_controller(&octeon_pci_controller); + + /* + * Clear any errors that might be pending from before the bus + * was setup properly. + */ + cvmx_write_csr(CVMX_NPI_PCI_INT_SUM2, -1); + return 0; +} + +arch_initcall(octeon_pci_setup); diff --git a/arch/mips/cavium-octeon/pcie.c b/arch/mips/cavium-octeon/pcie.c new file mode 100644 index 00000000000..49d14081b3b --- /dev/null +++ b/arch/mips/cavium-octeon/pcie.c @@ -0,0 +1,1370 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2007, 2008 Cavium Networks + */ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/time.h> +#include <linux/delay.h> + +#include <asm/octeon/octeon.h> +#include <asm/octeon/cvmx-npei-defs.h> +#include <asm/octeon/cvmx-pciercx-defs.h> +#include <asm/octeon/cvmx-pescx-defs.h> +#include <asm/octeon/cvmx-pexp-defs.h> +#include <asm/octeon/cvmx-helper-errata.h> + +#include "pci-common.h" + +union cvmx_pcie_address { + uint64_t u64; + struct { + uint64_t upper:2; /* Normally 2 for XKPHYS */ + uint64_t reserved_49_61:13; /* Must be zero */ + uint64_t io:1; /* 1 for IO space access */ + uint64_t did:5; /* PCIe DID = 3 */ + uint64_t subdid:3; /* PCIe SubDID = 1 */ + uint64_t reserved_36_39:4; /* Must be zero */ + uint64_t es:2; /* Endian swap = 1 */ + uint64_t port:2; /* PCIe port 0,1 */ + uint64_t reserved_29_31:3; /* Must be zero */ + /* + * Selects the type of the configuration request (0 = type 0, + * 1 = type 1). + */ + uint64_t ty:1; + /* Target bus number sent in the ID in the request. */ + uint64_t bus:8; + /* + * Target device number sent in the ID in the + * request. Note that Dev must be zero for type 0 + * configuration requests. + */ + uint64_t dev:5; + /* Target function number sent in the ID in the request. */ + uint64_t func:3; + /* + * Selects a register in the configuration space of + * the target. + */ + uint64_t reg:12; + } config; + struct { + uint64_t upper:2; /* Normally 2 for XKPHYS */ + uint64_t reserved_49_61:13; /* Must be zero */ + uint64_t io:1; /* 1 for IO space access */ + uint64_t did:5; /* PCIe DID = 3 */ + uint64_t subdid:3; /* PCIe SubDID = 2 */ + uint64_t reserved_36_39:4; /* Must be zero */ + uint64_t es:2; /* Endian swap = 1 */ + uint64_t port:2; /* PCIe port 0,1 */ + uint64_t address:32; /* PCIe IO address */ + } io; + struct { + uint64_t upper:2; /* Normally 2 for XKPHYS */ + uint64_t reserved_49_61:13; /* Must be zero */ + uint64_t io:1; /* 1 for IO space access */ + uint64_t did:5; /* PCIe DID = 3 */ + uint64_t subdid:3; /* PCIe SubDID = 3-6 */ + uint64_t reserved_36_39:4; /* Must be zero */ + uint64_t address:36; /* PCIe Mem address */ + } mem; +}; + +/** + * Return the Core virtual base address for PCIe IO access. IOs are + * read/written as an offset from this address. + * + * @pcie_port: PCIe port the IO is for + * + * Returns 64bit Octeon IO base address for read/write + */ +static inline uint64_t cvmx_pcie_get_io_base_address(int pcie_port) +{ + union cvmx_pcie_address pcie_addr; + pcie_addr.u64 = 0; + pcie_addr.io.upper = 0; + pcie_addr.io.io = 1; + pcie_addr.io.did = 3; + pcie_addr.io.subdid = 2; + pcie_addr.io.es = 1; + pcie_addr.io.port = pcie_port; + return pcie_addr.u64; +} + +/** + * Size of the IO address region returned at address + * cvmx_pcie_get_io_base_address() + * + * @pcie_port: PCIe port the IO is for + * + * Returns Size of the IO window + */ +static inline uint64_t cvmx_pcie_get_io_size(int pcie_port) +{ + return 1ull << 32; +} + +/** + * Return the Core virtual base address for PCIe MEM access. Memory is + * read/written as an offset from this address. + * + * @pcie_port: PCIe port the IO is for + * + * Returns 64bit Octeon IO base address for read/write + */ +static inline uint64_t cvmx_pcie_get_mem_base_address(int pcie_port) +{ + union cvmx_pcie_address pcie_addr; + pcie_addr.u64 = 0; + pcie_addr.mem.upper = 0; + pcie_addr.mem.io = 1; + pcie_addr.mem.did = 3; + pcie_addr.mem.subdid = 3 + pcie_port; + return pcie_addr.u64; +} + +/** + * Size of the Mem address region returned at address + * cvmx_pcie_get_mem_base_address() + * + * @pcie_port: PCIe port the IO is for + * + * Returns Size of the Mem window + */ +static inline uint64_t cvmx_pcie_get_mem_size(int pcie_port) +{ + return 1ull << 36; +} + +/** + * Read a PCIe config space register indirectly. This is used for + * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???. + * + * @pcie_port: PCIe port to read from + * @cfg_offset: Address to read + * + * Returns Value read + */ +static uint32_t cvmx_pcie_cfgx_read(int pcie_port, uint32_t cfg_offset) +{ + union cvmx_pescx_cfg_rd pescx_cfg_rd; + pescx_cfg_rd.u64 = 0; + pescx_cfg_rd.s.addr = cfg_offset; + cvmx_write_csr(CVMX_PESCX_CFG_RD(pcie_port), pescx_cfg_rd.u64); + pescx_cfg_rd.u64 = cvmx_read_csr(CVMX_PESCX_CFG_RD(pcie_port)); + return pescx_cfg_rd.s.data; +} + +/** + * Write a PCIe config space register indirectly. This is used for + * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???. + * + * @pcie_port: PCIe port to write to + * @cfg_offset: Address to write + * @val: Value to write + */ +static void cvmx_pcie_cfgx_write(int pcie_port, uint32_t cfg_offset, + uint32_t val) +{ + union cvmx_pescx_cfg_wr pescx_cfg_wr; + pescx_cfg_wr.u64 = 0; + pescx_cfg_wr.s.addr = cfg_offset; + pescx_cfg_wr.s.data = val; + cvmx_write_csr(CVMX_PESCX_CFG_WR(pcie_port), pescx_cfg_wr.u64); +} + +/** + * Build a PCIe config space request address for a device + * + * @pcie_port: PCIe port to access + * @bus: Sub bus + * @dev: Device ID + * @fn: Device sub function + * @reg: Register to access + * + * Returns 64bit Octeon IO address + */ +static inline uint64_t __cvmx_pcie_build_config_addr(int pcie_port, int bus, + int dev, int fn, int reg) +{ + union cvmx_pcie_address pcie_addr; + union cvmx_pciercx_cfg006 pciercx_cfg006; + + pciercx_cfg006.u32 = + cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG006(pcie_port)); + if ((bus <= pciercx_cfg006.s.pbnum) && (dev != 0)) + return 0; + + pcie_addr.u64 = 0; + pcie_addr.config.upper = 2; + pcie_addr.config.io = 1; + pcie_addr.config.did = 3; + pcie_addr.config.subdid = 1; + pcie_addr.config.es = 1; + pcie_addr.config.port = pcie_port; + pcie_addr.config.ty = (bus > pciercx_cfg006.s.pbnum); + pcie_addr.config.bus = bus; + pcie_addr.config.dev = dev; + pcie_addr.config.func = fn; + pcie_addr.config.reg = reg; + return pcie_addr.u64; +} + +/** + * Read 8bits from a Device's config space + * + * @pcie_port: PCIe port the device is on + * @bus: Sub bus + * @dev: Device ID + * @fn: Device sub function + * @reg: Register to access + * + * Returns Result of the read + */ +static uint8_t cvmx_pcie_config_read8(int pcie_port, int bus, int dev, + int fn, int reg) +{ + uint64_t address = + __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg); + if (address) + return cvmx_read64_uint8(address); + else + return 0xff; +} + +/** + * Read 16bits from a Device's config space + * + * @pcie_port: PCIe port the device is on + * @bus: Sub bus + * @dev: Device ID + * @fn: Device sub function + * @reg: Register to access + * + * Returns Result of the read + */ +static uint16_t cvmx_pcie_config_read16(int pcie_port, int bus, int dev, + int fn, int reg) +{ + uint64_t address = + __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg); + if (address) + return le16_to_cpu(cvmx_read64_uint16(address)); + else + return 0xffff; +} + +/** + * Read 32bits from a Device's config space + * + * @pcie_port: PCIe port the device is on + * @bus: Sub bus + * @dev: Device ID + * @fn: Device sub function + * @reg: Register to access + * + * Returns Result of the read + */ +static uint32_t cvmx_pcie_config_read32(int pcie_port, int bus, int dev, + int fn, int reg) +{ + uint64_t address = + __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg); + if (address) + return le32_to_cpu(cvmx_read64_uint32(address)); + else + return 0xffffffff; +} + +/** + * Write 8bits to a Device's config space + * + * @pcie_port: PCIe port the device is on + * @bus: Sub bus + * @dev: Device ID + * @fn: Device sub function + * @reg: Register to access + * @val: Value to write + */ +static void cvmx_pcie_config_write8(int pcie_port, int bus, int dev, int fn, + int reg, uint8_t val) +{ + uint64_t address = + __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg); + if (address) + cvmx_write64_uint8(address, val); +} + +/** + * Write 16bits to a Device's config space + * + * @pcie_port: PCIe port the device is on + * @bus: Sub bus + * @dev: Device ID + * @fn: Device sub function + * @reg: Register to access + * @val: Value to write + */ +static void cvmx_pcie_config_write16(int pcie_port, int bus, int dev, int fn, + int reg, uint16_t val) +{ + uint64_t address = + __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg); + if (address) + cvmx_write64_uint16(address, cpu_to_le16(val)); +} + +/** + * Write 32bits to a Device's config space + * + * @pcie_port: PCIe port the device is on + * @bus: Sub bus + * @dev: Device ID + * @fn: Device sub function + * @reg: Register to access + * @val: Value to write + */ +static void cvmx_pcie_config_write32(int pcie_port, int bus, int dev, int fn, + int reg, uint32_t val) +{ + uint64_t address = + __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg); + if (address) + cvmx_write64_uint32(address, cpu_to_le32(val)); +} + +/** + * Initialize the RC config space CSRs + * + * @pcie_port: PCIe port to initialize + */ +static void __cvmx_pcie_rc_initialize_config_space(int pcie_port) +{ + union cvmx_pciercx_cfg030 pciercx_cfg030; + union cvmx_npei_ctl_status2 npei_ctl_status2; + union cvmx_pciercx_cfg070 pciercx_cfg070; + union cvmx_pciercx_cfg001 pciercx_cfg001; + union cvmx_pciercx_cfg032 pciercx_cfg032; + union cvmx_pciercx_cfg006 pciercx_cfg006; + union cvmx_pciercx_cfg008 pciercx_cfg008; + union cvmx_pciercx_cfg009 pciercx_cfg009; + union cvmx_pciercx_cfg010 pciercx_cfg010; + union cvmx_pciercx_cfg011 pciercx_cfg011; + union cvmx_pciercx_cfg035 pciercx_cfg035; + union cvmx_pciercx_cfg075 pciercx_cfg075; + union cvmx_pciercx_cfg034 pciercx_cfg034; + + /* Max Payload Size (PCIE*_CFG030[MPS]) */ + /* Max Read Request Size (PCIE*_CFG030[MRRS]) */ + /* Relaxed-order, no-snoop enables (PCIE*_CFG030[RO_EN,NS_EN] */ + /* Error Message Enables (PCIE*_CFG030[CE_EN,NFE_EN,FE_EN,UR_EN]) */ + pciercx_cfg030.u32 = + cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG030(pcie_port)); + /* + * Max payload size = 128 bytes for best Octeon DMA + * performance. + */ + pciercx_cfg030.s.mps = 0; + /* + * Max read request size = 128 bytes for best Octeon DMA + * performance. + */ + pciercx_cfg030.s.mrrs = 0; + /* Enable relaxed ordering. */ + pciercx_cfg030.s.ro_en = 1; + /* Enable no snoop. */ + pciercx_cfg030.s.ns_en = 1; + /* Correctable error reporting enable. */ + pciercx_cfg030.s.ce_en = 1; + /* Non-fatal error reporting enable. */ + pciercx_cfg030.s.nfe_en = 1; + /* Fatal error reporting enable. */ + pciercx_cfg030.s.fe_en = 1; + /* Unsupported request reporting enable. */ + pciercx_cfg030.s.ur_en = 1; + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG030(pcie_port), + pciercx_cfg030.u32); + + /* + * Max Payload Size (NPEI_CTL_STATUS2[MPS]) must match + * PCIE*_CFG030[MPS] + * + * Max Read Request Size (NPEI_CTL_STATUS2[MRRS]) must not + * exceed PCIE*_CFG030[MRRS]. + */ + npei_ctl_status2.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS2); + /* Max payload size = 128 bytes for best Octeon DMA performance */ + npei_ctl_status2.s.mps = 0; + /* Max read request size = 128 bytes for best Octeon DMA performance */ + npei_ctl_status2.s.mrrs = 0; + cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS2, npei_ctl_status2.u64); + + /* ECRC Generation (PCIE*_CFG070[GE,CE]) */ + pciercx_cfg070.u32 = + cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG070(pcie_port)); + pciercx_cfg070.s.ge = 1; /* ECRC generation enable. */ + pciercx_cfg070.s.ce = 1; /* ECRC check enable. */ + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG070(pcie_port), + pciercx_cfg070.u32); + + /* + * Access Enables (PCIE*_CFG001[MSAE,ME]) ME and MSAE should + * always be set. + * + * Interrupt Disable (PCIE*_CFG001[I_DIS]) System Error + * Message Enable (PCIE*_CFG001[SEE]) + */ + pciercx_cfg001.u32 = + cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG001(pcie_port)); + pciercx_cfg001.s.msae = 1; /* Memory space enable. */ + pciercx_cfg001.s.me = 1; /* Bus master enable. */ + pciercx_cfg001.s.i_dis = 1; /* INTx assertion disable. */ + pciercx_cfg001.s.see = 1; /* SERR# enable */ + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG001(pcie_port), + pciercx_cfg001.u32); + + /* Advanced Error Recovery Message Enables */ + /* (PCIE*_CFG066,PCIE*_CFG067,PCIE*_CFG069) */ + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG066(pcie_port), 0); + /* Use CVMX_PCIERCX_CFG067 hardware default */ + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG069(pcie_port), 0); + + /* Active State Power Management (PCIE*_CFG032[ASLPC]) */ + pciercx_cfg032.u32 = + cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port)); + pciercx_cfg032.s.aslpc = 0; /* Active state Link PM control. */ + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG032(pcie_port), + pciercx_cfg032.u32); + + /* Entrance Latencies (PCIE*_CFG451[L0EL,L1EL]) */ + + /* + * Link Width Mode (PCIERCn_CFG452[LME]) - Set during + * cvmx_pcie_rc_initialize_link() + * + * Primary Bus Number (PCIERCn_CFG006[PBNUM]) + * + * We set the primary bus number to 1 so IDT bridges are + * happy. They don't like zero. + */ + pciercx_cfg006.u32 = 0; + pciercx_cfg006.s.pbnum = 1; + pciercx_cfg006.s.sbnum = 1; + pciercx_cfg006.s.subbnum = 1; + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG006(pcie_port), + pciercx_cfg006.u32); + + /* + * Memory-mapped I/O BAR (PCIERCn_CFG008) + * Most applications should disable the memory-mapped I/O BAR by + * setting PCIERCn_CFG008[ML_ADDR] < PCIERCn_CFG008[MB_ADDR] + */ + pciercx_cfg008.u32 = 0; + pciercx_cfg008.s.mb_addr = 0x100; + pciercx_cfg008.s.ml_addr = 0; + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG008(pcie_port), + pciercx_cfg008.u32); + + /* + * Prefetchable BAR (PCIERCn_CFG009,PCIERCn_CFG010,PCIERCn_CFG011) + * Most applications should disable the prefetchable BAR by setting + * PCIERCn_CFG011[UMEM_LIMIT],PCIERCn_CFG009[LMEM_LIMIT] < + * PCIERCn_CFG010[UMEM_BASE],PCIERCn_CFG009[LMEM_BASE] + */ + pciercx_cfg009.u32 = + cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG009(pcie_port)); + pciercx_cfg010.u32 = + cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG010(pcie_port)); + pciercx_cfg011.u32 = + cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG011(pcie_port)); + pciercx_cfg009.s.lmem_base = 0x100; + pciercx_cfg009.s.lmem_limit = 0; + pciercx_cfg010.s.umem_base = 0x100; + pciercx_cfg011.s.umem_limit = 0; + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG009(pcie_port), + pciercx_cfg009.u32); + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG010(pcie_port), + pciercx_cfg010.u32); + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG011(pcie_port), + pciercx_cfg011.u32); + + /* + * System Error Interrupt Enables (PCIERCn_CFG035[SECEE,SEFEE,SENFEE]) + * PME Interrupt Enables (PCIERCn_CFG035[PMEIE]) + */ + pciercx_cfg035.u32 = + cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG035(pcie_port)); + /* System error on correctable error enable. */ + pciercx_cfg035.s.secee = 1; + /* System error on fatal error enable. */ + pciercx_cfg035.s.sefee = 1; + /* System error on non-fatal error enable. */ + pciercx_cfg035.s.senfee = 1; + /* PME interrupt enable. */ + pciercx_cfg035.s.pmeie = 1; + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG035(pcie_port), + pciercx_cfg035.u32); + + /* + * Advanced Error Recovery Interrupt Enables + * (PCIERCn_CFG075[CERE,NFERE,FERE]) + */ + pciercx_cfg075.u32 = + cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG075(pcie_port)); + /* Correctable error reporting enable. */ + pciercx_cfg075.s.cere = 1; + /* Non-fatal error reporting enable. */ + pciercx_cfg075.s.nfere = 1; + /* Fatal error reporting enable. */ + pciercx_cfg075.s.fere = 1; + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG075(pcie_port), + pciercx_cfg075.u32); + + /* HP Interrupt Enables (PCIERCn_CFG034[HPINT_EN], + * PCIERCn_CFG034[DLLS_EN,CCINT_EN]) + */ + pciercx_cfg034.u32 = + cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG034(pcie_port)); + /* Hot-plug interrupt enable. */ + pciercx_cfg034.s.hpint_en = 1; + /* Data Link Layer state changed enable */ + pciercx_cfg034.s.dlls_en = 1; + /* Command completed interrupt enable. */ + pciercx_cfg034.s.ccint_en = 1; + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG034(pcie_port), + pciercx_cfg034.u32); +} + +/** + * Initialize a host mode PCIe link. This function takes a PCIe + * port from reset to a link up state. Software can then begin + * configuring the rest of the link. + * + * @pcie_port: PCIe port to initialize + * + * Returns Zero on success + */ +static int __cvmx_pcie_rc_initialize_link(int pcie_port) +{ + uint64_t start_cycle; + union cvmx_pescx_ctl_status pescx_ctl_status; + union cvmx_pciercx_cfg452 pciercx_cfg452; + union cvmx_pciercx_cfg032 pciercx_cfg032; + union cvmx_pciercx_cfg448 pciercx_cfg448; + + /* Set the lane width */ + pciercx_cfg452.u32 = + cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG452(pcie_port)); + pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port)); + if (pescx_ctl_status.s.qlm_cfg == 0) { + /* We're in 8 lane (56XX) or 4 lane (54XX) mode */ + pciercx_cfg452.s.lme = 0xf; + } else { + /* We're in 4 lane (56XX) or 2 lane (52XX) mode */ + pciercx_cfg452.s.lme = 0x7; + } + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG452(pcie_port), + pciercx_cfg452.u32); + + /* + * CN52XX pass 1.x has an errata where length mismatches on UR + * responses can cause bus errors on 64bit memory + * reads. Turning off length error checking fixes this. + */ + if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) { + union cvmx_pciercx_cfg455 pciercx_cfg455; + pciercx_cfg455.u32 = + cvmx_pcie_cfgx_read(pcie_port, + CVMX_PCIERCX_CFG455(pcie_port)); + pciercx_cfg455.s.m_cpl_len_err = 1; + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG455(pcie_port), + pciercx_cfg455.u32); + } + + /* Lane swap needs to be manually enabled for CN52XX */ + if (OCTEON_IS_MODEL(OCTEON_CN52XX) && (pcie_port == 1)) { + pescx_ctl_status.s.lane_swp = 1; + cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port), + pescx_ctl_status.u64); + } + + /* Bring up the link */ + pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port)); + pescx_ctl_status.s.lnk_enb = 1; + cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port), pescx_ctl_status.u64); + + /* + * CN52XX pass 1.0: Due to a bug in 2nd order CDR, it needs to + * be disabled. + */ + if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_0)) + __cvmx_helper_errata_qlm_disable_2nd_order_cdr(0); + + /* Wait for the link to come up */ + cvmx_dprintf("PCIe: Waiting for port %d link\n", pcie_port); + start_cycle = cvmx_get_cycle(); + do { + if (cvmx_get_cycle() - start_cycle > + 2 * cvmx_sysinfo_get()->cpu_clock_hz) { + cvmx_dprintf("PCIe: Port %d link timeout\n", + pcie_port); + return -1; + } + cvmx_wait(10000); + pciercx_cfg032.u32 = + cvmx_pcie_cfgx_read(pcie_port, + CVMX_PCIERCX_CFG032(pcie_port)); + } while (pciercx_cfg032.s.dlla == 0); + + /* Display the link status */ + cvmx_dprintf("PCIe: Port %d link active, %d lanes\n", pcie_port, + pciercx_cfg032.s.nlw); + + /* + * Update the Replay Time Limit. Empirically, some PCIe + * devices take a little longer to respond than expected under + * load. As a workaround for this we configure the Replay Time + * Limit to the value expected for a 512 byte MPS instead of + * our actual 256 byte MPS. The numbers below are directly + * from the PCIe spec table 3-4. + */ + pciercx_cfg448.u32 = + cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port)); + switch (pciercx_cfg032.s.nlw) { + case 1: /* 1 lane */ + pciercx_cfg448.s.rtl = 1677; + break; + case 2: /* 2 lanes */ + pciercx_cfg448.s.rtl = 867; + break; + case 4: /* 4 lanes */ + pciercx_cfg448.s.rtl = 462; + break; + case 8: /* 8 lanes */ + pciercx_cfg448.s.rtl = 258; + break; + } + cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port), + pciercx_cfg448.u32); + + return 0; +} + +/** + * Initialize a PCIe port for use in host(RC) mode. It doesn't + * enumerate the bus. + * + * @pcie_port: PCIe port to initialize + * + * Returns Zero on success + */ +static int cvmx_pcie_rc_initialize(int pcie_port) +{ + int i; + union cvmx_ciu_soft_prst ciu_soft_prst; + union cvmx_pescx_bist_status pescx_bist_status; + union cvmx_pescx_bist_status2 pescx_bist_status2; + union cvmx_npei_ctl_status npei_ctl_status; + union cvmx_npei_mem_access_ctl npei_mem_access_ctl; + union cvmx_npei_mem_access_subidx mem_access_subid; + union cvmx_npei_dbg_data npei_dbg_data; + union cvmx_pescx_ctl_status2 pescx_ctl_status2; + + /* + * Make sure we aren't trying to setup a target mode interface + * in host mode. + */ + npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS); + if ((pcie_port == 0) && !npei_ctl_status.s.host_mode) { + cvmx_dprintf("PCIe: ERROR: cvmx_pcie_rc_initialize() called " + "on port0, but port0 is not in host mode\n"); + return -1; + } + + /* + * Make sure a CN52XX isn't trying to bring up port 1 when it + * is disabled. + */ + if (OCTEON_IS_MODEL(OCTEON_CN52XX)) { + npei_dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA); + if ((pcie_port == 1) && npei_dbg_data.cn52xx.qlm0_link_width) { + cvmx_dprintf("PCIe: ERROR: cvmx_pcie_rc_initialize() " + "called on port1, but port1 is " + "disabled\n"); + return -1; + } + } + + /* + * PCIe switch arbitration mode. '0' == fixed priority NPEI, + * PCIe0, then PCIe1. '1' == round robin. + */ + npei_ctl_status.s.arb = 1; + /* Allow up to 0x20 config retries */ + npei_ctl_status.s.cfg_rtry = 0x20; + /* + * CN52XX pass1.x has an errata where P0_NTAGS and P1_NTAGS + * don't reset. + */ + if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) { + npei_ctl_status.s.p0_ntags = 0x20; + npei_ctl_status.s.p1_ntags = 0x20; + } + cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS, npei_ctl_status.u64); + + /* Bring the PCIe out of reset */ + if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200) { + /* + * The EBH5200 board swapped the PCIe reset lines on + * the board. As a workaround for this bug, we bring + * both PCIe ports out of reset at the same time + * instead of on separate calls. So for port 0, we + * bring both out of reset and do nothing on port 1. + */ + if (pcie_port == 0) { + ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST); + /* + * After a chip reset the PCIe will also be in + * reset. If it isn't, most likely someone is + * trying to init it again without a proper + * PCIe reset. + */ + if (ciu_soft_prst.s.soft_prst == 0) { + /* Reset the ports */ + ciu_soft_prst.s.soft_prst = 1; + cvmx_write_csr(CVMX_CIU_SOFT_PRST, + ciu_soft_prst.u64); + ciu_soft_prst.u64 = + cvmx_read_csr(CVMX_CIU_SOFT_PRST1); + ciu_soft_prst.s.soft_prst = 1; + cvmx_write_csr(CVMX_CIU_SOFT_PRST1, + ciu_soft_prst.u64); + /* Wait until pcie resets the ports. */ + udelay(2000); + } + ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1); + ciu_soft_prst.s.soft_prst = 0; + cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64); + ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST); + ciu_soft_prst.s.soft_prst = 0; + cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64); + } + } else { + /* + * The normal case: The PCIe ports are completely + * separate and can be brought out of reset + * independently. + */ + if (pcie_port) + ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1); + else + ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST); + /* + * After a chip reset the PCIe will also be in + * reset. If it isn't, most likely someone is trying + * to init it again without a proper PCIe reset. + */ + if (ciu_soft_prst.s.soft_prst == 0) { + /* Reset the port */ + ciu_soft_prst.s.soft_prst = 1; + if (pcie_port) + cvmx_write_csr(CVMX_CIU_SOFT_PRST1, + ciu_soft_prst.u64); + else + cvmx_write_csr(CVMX_CIU_SOFT_PRST, + ciu_soft_prst.u64); + /* Wait until pcie resets the ports. */ + udelay(2000); + } + if (pcie_port) { + ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1); + ciu_soft_prst.s.soft_prst = 0; + cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64); + } else { + ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST); + ciu_soft_prst.s.soft_prst = 0; + cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64); + } + } + + /* + * Wait for PCIe reset to complete. Due to errata PCIE-700, we + * don't poll PESCX_CTL_STATUS2[PCIERST], but simply wait a + * fixed number of cycles. + */ + cvmx_wait(400000); + + /* PESCX_BIST_STATUS2[PCLK_RUN] was missing on pass 1 of CN56XX and + CN52XX, so we only probe it on newer chips */ + if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) + && !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) { + /* Clear PCLK_RUN so we can check if the clock is running */ + pescx_ctl_status2.u64 = + cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port)); + pescx_ctl_status2.s.pclk_run = 1; + cvmx_write_csr(CVMX_PESCX_CTL_STATUS2(pcie_port), + pescx_ctl_status2.u64); + /* + * Now that we cleared PCLK_RUN, wait for it to be set + * again telling us the clock is running. + */ + if (CVMX_WAIT_FOR_FIELD64(CVMX_PESCX_CTL_STATUS2(pcie_port), + union cvmx_pescx_ctl_status2, + pclk_run, ==, 1, 10000)) { + cvmx_dprintf("PCIe: Port %d isn't clocked, skipping.\n", + pcie_port); + return -1; + } + } + + /* + * Check and make sure PCIe came out of reset. If it doesn't + * the board probably hasn't wired the clocks up and the + * interface should be skipped. + */ + pescx_ctl_status2.u64 = + cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port)); + if (pescx_ctl_status2.s.pcierst) { + cvmx_dprintf("PCIe: Port %d stuck in reset, skipping.\n", + pcie_port); + return -1; + } + + /* + * Check BIST2 status. If any bits are set skip this interface. This + * is an attempt to catch PCIE-813 on pass 1 parts. + */ + pescx_bist_status2.u64 = + cvmx_read_csr(CVMX_PESCX_BIST_STATUS2(pcie_port)); + if (pescx_bist_status2.u64) { + cvmx_dprintf("PCIe: Port %d BIST2 failed. Most likely this " + "port isn't hooked up, skipping.\n", + pcie_port); + return -1; + } + + /* Check BIST status */ + pescx_bist_status.u64 = + cvmx_read_csr(CVMX_PESCX_BIST_STATUS(pcie_port)); + if (pescx_bist_status.u64) + cvmx_dprintf("PCIe: BIST FAILED for port %d (0x%016llx)\n", + pcie_port, CAST64(pescx_bist_status.u64)); + + /* Initialize the config space CSRs */ + __cvmx_pcie_rc_initialize_config_space(pcie_port); + + /* Bring the link up */ + if (__cvmx_pcie_rc_initialize_link(pcie_port)) { + cvmx_dprintf + ("PCIe: ERROR: cvmx_pcie_rc_initialize_link() failed\n"); + return -1; + } + + /* Store merge control (NPEI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */ + npei_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL); + /* Allow 16 words to combine */ + npei_mem_access_ctl.s.max_word = 0; + /* Wait up to 127 cycles for more data */ + npei_mem_access_ctl.s.timer = 127; + cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL, npei_mem_access_ctl.u64); + + /* Setup Mem access SubDIDs */ + mem_access_subid.u64 = 0; + /* Port the request is sent to. */ + mem_access_subid.s.port = pcie_port; + /* Due to an errata on pass 1 chips, no merging is allowed. */ + mem_access_subid.s.nmerge = 1; + /* Endian-swap for Reads. */ + mem_access_subid.s.esr = 1; + /* Endian-swap for Writes. */ + mem_access_subid.s.esw = 1; + /* No Snoop for Reads. */ + mem_access_subid.s.nsr = 1; + /* No Snoop for Writes. */ + mem_access_subid.s.nsw = 1; + /* Disable Relaxed Ordering for Reads. */ + mem_access_subid.s.ror = 0; + /* Disable Relaxed Ordering for Writes. */ + mem_access_subid.s.row = 0; + /* PCIe Adddress Bits <63:34>. */ + mem_access_subid.s.ba = 0; + + /* + * Setup mem access 12-15 for port 0, 16-19 for port 1, + * supplying 36 bits of address space. + */ + for (i = 12 + pcie_port * 4; i < 16 + pcie_port * 4; i++) { + cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(i), + mem_access_subid.u64); + /* Set each SUBID to extend the addressable range */ + mem_access_subid.s.ba += 1; + } + + /* + * Disable the peer to peer forwarding register. This must be + * setup by the OS after it enumerates the bus and assigns + * addresses to the PCIe busses. + */ + for (i = 0; i < 4; i++) { + cvmx_write_csr(CVMX_PESCX_P2P_BARX_START(i, pcie_port), -1); + cvmx_write_csr(CVMX_PESCX_P2P_BARX_END(i, pcie_port), -1); + } + + /* Set Octeon's BAR0 to decode 0-16KB. It overlaps with Bar2 */ + cvmx_write_csr(CVMX_PESCX_P2N_BAR0_START(pcie_port), 0); + + /* + * Disable Octeon's BAR1. It isn't needed in RC mode since + * BAR2 maps all of memory. BAR2 also maps 256MB-512MB into + * the 2nd 256MB of memory. + */ + cvmx_write_csr(CVMX_PESCX_P2N_BAR1_START(pcie_port), -1); + + /* + * Set Octeon's BAR2 to decode 0-2^39. Bar0 and Bar1 take + * precedence where they overlap. It also overlaps with the + * device addresses, so make sure the peer to peer forwarding + * is set right. + */ + cvmx_write_csr(CVMX_PESCX_P2N_BAR2_START(pcie_port), 0); + + /* + * Setup BAR2 attributes + * + * Relaxed Ordering (NPEI_CTL_PORTn[PTLP_RO,CTLP_RO, WAIT_COM]) + * - PTLP_RO,CTLP_RO should normally be set (except for debug). + * - WAIT_COM=0 will likely work for all applications. + * + * Load completion relaxed ordering (NPEI_CTL_PORTn[WAITL_COM]). + */ + if (pcie_port) { + union cvmx_npei_ctl_port1 npei_ctl_port; + npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT1); + npei_ctl_port.s.bar2_enb = 1; + npei_ctl_port.s.bar2_esx = 1; + npei_ctl_port.s.bar2_cax = 0; + npei_ctl_port.s.ptlp_ro = 1; + npei_ctl_port.s.ctlp_ro = 1; + npei_ctl_port.s.wait_com = 0; + npei_ctl_port.s.waitl_com = 0; + cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT1, npei_ctl_port.u64); + } else { + union cvmx_npei_ctl_port0 npei_ctl_port; + npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT0); + npei_ctl_port.s.bar2_enb = 1; + npei_ctl_port.s.bar2_esx = 1; + npei_ctl_port.s.bar2_cax = 0; + npei_ctl_port.s.ptlp_ro = 1; + npei_ctl_port.s.ctlp_ro = 1; + npei_ctl_port.s.wait_com = 0; + npei_ctl_port.s.waitl_com = 0; + cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT0, npei_ctl_port.u64); + } + return 0; +} + + +/* Above was cvmx-pcie.c, below original pcie.c */ + + +/** + * Map a PCI device to the appropriate interrupt line + * + * @param dev The Linux PCI device structure for the device to map + * @param slot The slot number for this device on __BUS 0__. Linux + * enumerates through all the bridges and figures out the + * slot on Bus 0 where this device eventually hooks to. + * @param pin The PCI interrupt pin read from the device, then swizzled + * as it goes through each bridge. + * @return Interrupt number for the device + */ +int __init octeon_pcie_pcibios_map_irq(const struct pci_dev *dev, + u8 slot, u8 pin) +{ + /* + * The EBH5600 board with the PCI to PCIe bridge mistakenly + * wires the first slot for both device id 2 and interrupt + * A. According to the PCI spec, device id 2 should be C. The + * following kludge attempts to fix this. + */ + if (strstr(octeon_board_type_string(), "EBH5600") && + dev->bus && dev->bus->parent) { + /* + * Iterate all the way up the device chain and find + * the root bus. + */ + while (dev->bus && dev->bus->parent) + dev = to_pci_dev(dev->bus->bridge); + /* If the root bus is number 0 and the PEX 8114 is the + * root, assume we are behind the miswired bus. We + * need to correct the swizzle level by two. Yuck. + */ + if ((dev->bus->number == 0) && + (dev->vendor == 0x10b5) && (dev->device == 0x8114)) { + /* + * The pin field is one based, not zero. We + * need to swizzle it by minus two. + */ + pin = ((pin - 3) & 3) + 1; + } + } + /* + * The -1 is because pin starts with one, not zero. It might + * be that this equation needs to include the slot number, but + * I don't have hardware to check that against. + */ + return pin - 1 + OCTEON_IRQ_PCI_INT0; +} + +/** + * Read a value from configuration space + * + * @param bus + * @param devfn + * @param reg + * @param size + * @param val + * @return + */ +static inline int octeon_pcie_read_config(int pcie_port, struct pci_bus *bus, + unsigned int devfn, int reg, int size, + u32 *val) +{ + union octeon_cvmemctl cvmmemctl; + union octeon_cvmemctl cvmmemctl_save; + int bus_number = bus->number; + + /* + * We need to force the bus number to be zero on the root + * bus. Linux numbers the 2nd root bus to start after all + * buses on root 0. + */ + if (bus->parent == NULL) + bus_number = 0; + + /* + * PCIe only has a single device connected to Octeon. It is + * always device ID 0. Don't bother doing reads for other + * device IDs on the first segment. + */ + if ((bus_number == 0) && (devfn >> 3 != 0)) + return PCIBIOS_FUNC_NOT_SUPPORTED; + + /* + * The following is a workaround for the CN57XX, CN56XX, + * CN55XX, and CN54XX errata with PCIe config reads from non + * existent devices. These chips will hang the PCIe link if a + * config read is performed that causes a UR response. + */ + if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1) || + OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_1)) { + /* + * For our EBH5600 board, port 0 has a bridge with two + * PCI-X slots. We need a new special checks to make + * sure we only probe valid stuff. The PCIe->PCI-X + * bridge only respondes to device ID 0, function + * 0-1 + */ + if ((bus_number == 0) && (devfn >= 2)) + return PCIBIOS_FUNC_NOT_SUPPORTED; + /* + * The PCI-X slots are device ID 2,3. Choose one of + * the below "if" blocks based on what is plugged into + * the board. + */ +#if 1 + /* Use this option if you aren't using either slot */ + if (bus_number == 1) + return PCIBIOS_FUNC_NOT_SUPPORTED; +#elif 0 + /* + * Use this option if you are using the first slot but + * not the second. + */ + if ((bus_number == 1) && (devfn >> 3 != 2)) + return PCIBIOS_FUNC_NOT_SUPPORTED; +#elif 0 + /* + * Use this option if you are using the second slot + * but not the first. + */ + if ((bus_number == 1) && (devfn >> 3 != 3)) + return PCIBIOS_FUNC_NOT_SUPPORTED; +#elif 0 + /* Use this opion if you are using both slots */ + if ((bus_number == 1) && + !((devfn == (2 << 3)) || (devfn == (3 << 3)))) + return PCIBIOS_FUNC_NOT_SUPPORTED; +#endif + + /* + * Shorten the DID timeout so bus errors for PCIe + * config reads from non existent devices happen + * faster. This allows us to continue booting even if + * the above "if" checks are wrong. Once one of these + * errors happens, the PCIe port is dead. + */ + cvmmemctl_save.u64 = __read_64bit_c0_register($11, 7); + cvmmemctl.u64 = cvmmemctl_save.u64; + cvmmemctl.s.didtto = 2; + __write_64bit_c0_register($11, 7, cvmmemctl.u64); + } + + switch (size) { + case 4: + *val = cvmx_pcie_config_read32(pcie_port, bus_number, + devfn >> 3, devfn & 0x7, reg); + break; + case 2: + *val = cvmx_pcie_config_read16(pcie_port, bus_number, + devfn >> 3, devfn & 0x7, reg); + break; + case 1: + *val = cvmx_pcie_config_read8(pcie_port, bus_number, devfn >> 3, + devfn & 0x7, reg); + break; + default: + return PCIBIOS_FUNC_NOT_SUPPORTED; + } + + if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1) || + OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_1)) + __write_64bit_c0_register($11, 7, cvmmemctl_save.u64); + return PCIBIOS_SUCCESSFUL; +} + +static int octeon_pcie0_read_config(struct pci_bus *bus, unsigned int devfn, + int reg, int size, u32 *val) +{ + return octeon_pcie_read_config(0, bus, devfn, reg, size, val); +} + +static int octeon_pcie1_read_config(struct pci_bus *bus, unsigned int devfn, + int reg, int size, u32 *val) +{ + return octeon_pcie_read_config(1, bus, devfn, reg, size, val); +} + + + +/** + * Write a value to PCI configuration space + * + * @param bus + * @param devfn + * @param reg + * @param size + * @param val + * @return + */ +static inline int octeon_pcie_write_config(int pcie_port, struct pci_bus *bus, + unsigned int devfn, int reg, + int size, u32 val) +{ + int bus_number = bus->number; + /* + * We need to force the bus number to be zero on the root + * bus. Linux numbers the 2nd root bus to start after all + * busses on root 0. + */ + if (bus->parent == NULL) + bus_number = 0; + + switch (size) { + case 4: + cvmx_pcie_config_write32(pcie_port, bus_number, devfn >> 3, + devfn & 0x7, reg, val); + return PCIBIOS_SUCCESSFUL; + case 2: + cvmx_pcie_config_write16(pcie_port, bus_number, devfn >> 3, + devfn & 0x7, reg, val); + return PCIBIOS_SUCCESSFUL; + case 1: + cvmx_pcie_config_write8(pcie_port, bus_number, devfn >> 3, + devfn & 0x7, reg, val); + return PCIBIOS_SUCCESSFUL; + } +#if PCI_CONFIG_SPACE_DELAY + udelay(PCI_CONFIG_SPACE_DELAY); +#endif + return PCIBIOS_FUNC_NOT_SUPPORTED; +} + +static int octeon_pcie0_write_config(struct pci_bus *bus, unsigned int devfn, + int reg, int size, u32 val) +{ + return octeon_pcie_write_config(0, bus, devfn, reg, size, val); +} + +static int octeon_pcie1_write_config(struct pci_bus *bus, unsigned int devfn, + int reg, int size, u32 val) +{ + return octeon_pcie_write_config(1, bus, devfn, reg, size, val); +} + +static struct pci_ops octeon_pcie0_ops = { + octeon_pcie0_read_config, + octeon_pcie0_write_config, +}; + +static struct resource octeon_pcie0_mem_resource = { + .name = "Octeon PCIe0 MEM", + .flags = IORESOURCE_MEM, +}; + +static struct resource octeon_pcie0_io_resource = { + .name = "Octeon PCIe0 IO", + .flags = IORESOURCE_IO, +}; + +static struct pci_controller octeon_pcie0_controller = { + .pci_ops = &octeon_pcie0_ops, + .mem_resource = &octeon_pcie0_mem_resource, + .io_resource = &octeon_pcie0_io_resource, +}; + +static struct pci_ops octeon_pcie1_ops = { + octeon_pcie1_read_config, + octeon_pcie1_write_config, +}; + +static struct resource octeon_pcie1_mem_resource = { + .name = "Octeon PCIe1 MEM", + .flags = IORESOURCE_MEM, +}; + +static struct resource octeon_pcie1_io_resource = { + .name = "Octeon PCIe1 IO", + .flags = IORESOURCE_IO, +}; + +static struct pci_controller octeon_pcie1_controller = { + .pci_ops = &octeon_pcie1_ops, + .mem_resource = &octeon_pcie1_mem_resource, + .io_resource = &octeon_pcie1_io_resource, +}; + + +/** + * Initialize the Octeon PCIe controllers + * + * @return + */ +static int __init octeon_pcie_setup(void) +{ + union cvmx_npei_ctl_status npei_ctl_status; + int result; + + /* These chips don't have PCIe */ + if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) + return 0; + + /* Point pcibios_map_irq() to the PCIe version of it */ + octeon_pcibios_map_irq = octeon_pcie_pcibios_map_irq; + + /* Use the PCIe based DMA mappings */ + octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_PCIE; + + /* + * PCIe I/O range. It is based on port 0 but includes up until + * port 1's end. + */ + set_io_port_base(CVMX_ADD_IO_SEG(cvmx_pcie_get_io_base_address(0))); + ioport_resource.start = 0; + ioport_resource.end = + cvmx_pcie_get_io_base_address(1) - + cvmx_pcie_get_io_base_address(0) + cvmx_pcie_get_io_size(1) - 1; + + npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS); + if (npei_ctl_status.s.host_mode) { + pr_notice("PCIe: Initializing port 0\n"); + result = cvmx_pcie_rc_initialize(0); + if (result == 0) { + /* Memory offsets are physical addresses */ + octeon_pcie0_controller.mem_offset = + cvmx_pcie_get_mem_base_address(0); + /* IO offsets are Mips virtual addresses */ + octeon_pcie0_controller.io_map_base = + CVMX_ADD_IO_SEG(cvmx_pcie_get_io_base_address + (0)); + octeon_pcie0_controller.io_offset = 0; + /* + * To keep things similar to PCI, we start + * device addresses at the same place as PCI + * uisng big bar support. This normally + * translates to 4GB-256MB, which is the same + * as most x86 PCs. + */ + octeon_pcie0_controller.mem_resource->start = + cvmx_pcie_get_mem_base_address(0) + + (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20); + octeon_pcie0_controller.mem_resource->end = + cvmx_pcie_get_mem_base_address(0) + + cvmx_pcie_get_mem_size(0) - 1; + /* + * Ports must be above 16KB for the ISA bus + * filtering in the PCI-X to PCI bridge. + */ + octeon_pcie0_controller.io_resource->start = 4 << 10; + octeon_pcie0_controller.io_resource->end = + cvmx_pcie_get_io_size(0) - 1; + register_pci_controller(&octeon_pcie0_controller); + } + } else { + pr_notice("PCIe: Port 0 in endpoint mode, skipping.\n"); + } + + /* Skip the 2nd port on CN52XX if port 0 is in 4 lane mode */ + if (OCTEON_IS_MODEL(OCTEON_CN52XX)) { + union cvmx_npei_dbg_data npei_dbg_data; + npei_dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA); + if (npei_dbg_data.cn52xx.qlm0_link_width) + return 0; + } + + pr_notice("PCIe: Initializing port 1\n"); + result = cvmx_pcie_rc_initialize(1); + if (result == 0) { + /* Memory offsets are physical addresses */ + octeon_pcie1_controller.mem_offset = + cvmx_pcie_get_mem_base_address(1); + /* IO offsets are Mips virtual addresses */ + octeon_pcie1_controller.io_map_base = + CVMX_ADD_IO_SEG(cvmx_pcie_get_io_base_address(1)); + octeon_pcie1_controller.io_offset = + cvmx_pcie_get_io_base_address(1) - + cvmx_pcie_get_io_base_address(0); + /* + * To keep things similar to PCI, we start device + * addresses at the same place as PCI uisng big bar + * support. This normally translates to 4GB-256MB, + * which is the same as most x86 PCs. + */ + octeon_pcie1_controller.mem_resource->start = + cvmx_pcie_get_mem_base_address(1) + (4ul << 30) - + (OCTEON_PCI_BAR1_HOLE_SIZE << 20); + octeon_pcie1_controller.mem_resource->end = + cvmx_pcie_get_mem_base_address(1) + + cvmx_pcie_get_mem_size(1) - 1; + /* + * Ports must be above 16KB for the ISA bus filtering + * in the PCI-X to PCI bridge. + */ + octeon_pcie1_controller.io_resource->start = + cvmx_pcie_get_io_base_address(1) - + cvmx_pcie_get_io_base_address(0); + octeon_pcie1_controller.io_resource->end = + octeon_pcie1_controller.io_resource->start + + cvmx_pcie_get_io_size(1) - 1; + register_pci_controller(&octeon_pcie1_controller); + } + return 0; +} + +arch_initcall(octeon_pcie_setup); diff --git a/arch/mips/configs/bigsur_defconfig b/arch/mips/configs/bigsur_defconfig index 783da855a2e..d6d35b2e5fe 100644 --- a/arch/mips/configs/bigsur_defconfig +++ b/arch/mips/configs/bigsur_defconfig @@ -963,7 +963,7 @@ CONFIG_EEPROM_LEGACY=y CONFIG_SENSORS_PCF8574=y # CONFIG_PCF8575 is not set CONFIG_SENSORS_PCF8591=y -CONFIG_SENSORS_MAX6875=y +CONFIG_EEPROM_MAX6875=y # CONFIG_SENSORS_TSL2550 is not set CONFIG_I2C_DEBUG_CORE=y CONFIG_I2C_DEBUG_ALGO=y diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig index 8426d3b9501..fadb351d249 100644 --- a/arch/mips/configs/mtx1_defconfig +++ b/arch/mips/configs/mtx1_defconfig @@ -1849,7 +1849,7 @@ CONFIG_EEPROM_LEGACY=m CONFIG_SENSORS_PCF8574=m CONFIG_SENSORS_PCA9539=m CONFIG_SENSORS_PCF8591=m -CONFIG_SENSORS_MAX6875=m +CONFIG_EEPROM_MAX6875=m # CONFIG_SENSORS_TSL2550 is not set # CONFIG_I2C_DEBUG_CORE is not set # CONFIG_I2C_DEBUG_ALGO is not set diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index c0047f86133..8ab1d12ba7f 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -147,6 +147,10 @@ #define cpu_has_mips_r (cpu_has_mips32r1 | cpu_has_mips32r2 | \ cpu_has_mips64r1 | cpu_has_mips64r2) +#ifndef cpu_has_mips_r2_exec_hazard +#define cpu_has_mips_r2_exec_hazard cpu_has_mips_r2 +#endif + /* * MIPS32, MIPS64, VR5500, IDT32332, IDT32334 and maybe a few other * pre-MIPS32/MIPS53 processors have CLO, CLZ. For 64-bit kernels @@ -230,4 +234,8 @@ #define cpu_scache_line_size() cpu_data[0].scache.linesz #endif +#ifndef cpu_hwrena_impl_bits +#define cpu_hwrena_impl_bits 0 +#endif + #endif /* __ASM_CPU_FEATURES_H */ diff --git a/arch/mips/include/asm/delay.h b/arch/mips/include/asm/delay.h index a07e51b2be1..d2d8949be6b 100644 --- a/arch/mips/include/asm/delay.h +++ b/arch/mips/include/asm/delay.h @@ -15,7 +15,7 @@ extern void __delay(unsigned int loops); extern void __ndelay(unsigned int ns); extern void __udelay(unsigned int us); -#define ndelay(ns) __udelay(ns) +#define ndelay(ns) __ndelay(ns) #define udelay(us) __udelay(us) /* make sure "usecs *= ..." in udelay do not overflow. */ diff --git a/arch/mips/include/asm/errno.h b/arch/mips/include/asm/errno.h index 3c0d840e457..a0efc73819e 100644 --- a/arch/mips/include/asm/errno.h +++ b/arch/mips/include/asm/errno.h @@ -119,6 +119,8 @@ #define EOWNERDEAD 165 /* Owner died */ #define ENOTRECOVERABLE 166 /* State not recoverable */ +#define ERFKILL 167 /* Operation not possible due to RF-kill */ + #define EDQUOT 1133 /* Quota exceeded */ #ifdef __KERNEL__ diff --git a/arch/mips/include/asm/hugetlb.h b/arch/mips/include/asm/hugetlb.h new file mode 100644 index 00000000000..f5e85601532 --- /dev/null +++ b/arch/mips/include/asm/hugetlb.h @@ -0,0 +1,114 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2008, 2009 Cavium Networks, Inc. + */ + +#ifndef __ASM_HUGETLB_H +#define __ASM_HUGETLB_H + +#include <asm/page.h> + + +static inline int is_hugepage_only_range(struct mm_struct *mm, + unsigned long addr, + unsigned long len) +{ + return 0; +} + +static inline int prepare_hugepage_range(struct file *file, + unsigned long addr, + unsigned long len) +{ + unsigned long task_size = STACK_TOP; + struct hstate *h = hstate_file(file); + + if (len & ~huge_page_mask(h)) + return -EINVAL; + if (addr & ~huge_page_mask(h)) + return -EINVAL; + if (len > task_size) + return -ENOMEM; + if (task_size - len < addr) + return -EINVAL; + return 0; +} + +static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) +{ +} + +static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, + unsigned long addr, + unsigned long end, + unsigned long floor, + unsigned long ceiling) +{ + free_pgd_range(tlb, addr, end, floor, ceiling); +} + +static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte) +{ + set_pte_at(mm, addr, ptep, pte); +} + +static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + pte_t clear; + pte_t pte = *ptep; + + pte_val(clear) = (unsigned long)invalid_pte_table; + set_pte_at(mm, addr, ptep, clear); + return pte; +} + +static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ +} + +static inline int huge_pte_none(pte_t pte) +{ + unsigned long val = pte_val(pte) & ~_PAGE_GLOBAL; + return !val || (val == (unsigned long)invalid_pte_table); +} + +static inline pte_t huge_pte_wrprotect(pte_t pte) +{ + return pte_wrprotect(pte); +} + +static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + ptep_set_wrprotect(mm, addr, ptep); +} + +static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, + pte_t *ptep, pte_t pte, + int dirty) +{ + return ptep_set_access_flags(vma, addr, ptep, pte, dirty); +} + +static inline pte_t huge_ptep_get(pte_t *ptep) +{ + return *ptep; +} + +static inline int arch_prepare_hugepage(struct page *page) +{ + return 0; +} + +static inline void arch_release_hugepage(struct page *page) +{ +} + +#endif /* __ASM_HUGETLB_H */ diff --git a/arch/mips/include/asm/i8253.h b/arch/mips/include/asm/i8253.h index 5dabc870b32..032ca73f181 100644 --- a/arch/mips/include/asm/i8253.h +++ b/arch/mips/include/asm/i8253.h @@ -12,8 +12,6 @@ #define PIT_CH0 0x40 #define PIT_CH2 0x42 -#define PIT_TICK_RATE 1193182UL - extern spinlock_t i8253_lock; extern void setup_pit_timer(void); diff --git a/arch/mips/include/asm/ioctl.h b/arch/mips/include/asm/ioctl.h index 916163401b2..c515a1a4c47 100644 --- a/arch/mips/include/asm/ioctl.h +++ b/arch/mips/include/asm/ioctl.h @@ -3,40 +3,16 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 1995, 96, 99, 2001 Ralf Baechle + * Copyright (C) 1995, 96, 99, 2001 Ralf Baechle <ralf@linux-mips.org> + * Copyright (C) 2009 Wind River Systems + * Written by Ralf Baechle <ralf@linux-mips.org> */ -#ifndef _ASM_IOCTL_H -#define _ASM_IOCTL_H +#ifndef __ASM_IOCTL_H +#define __ASM_IOCTL_H -/* - * The original linux ioctl numbering scheme was just a general - * "anything goes" setup, where more or less random numbers were - * assigned. Sorry, I was clueless when I started out on this. - * - * On the alpha, we'll try to clean it up a bit, using a more sane - * ioctl numbering, and also trying to be compatible with OSF/1 in - * the process. I'd like to clean it up for the i386 as well, but - * it's so painful recognizing both the new and the old numbers.. - * - * The same applies for for the MIPS ABI; in fact even the macros - * from Linux/Alpha fit almost perfectly. - */ - -#define _IOC_NRBITS 8 -#define _IOC_TYPEBITS 8 #define _IOC_SIZEBITS 13 #define _IOC_DIRBITS 3 -#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1) -#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1) -#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1) -#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1) - -#define _IOC_NRSHIFT 0 -#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS) -#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS) -#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS) - /* * Direction bits _IOC_NONE could be 0, but OSF/1 gives it a bit. * And this turns out useful to catch old ioctl numbers in header @@ -46,53 +22,6 @@ #define _IOC_READ 2U #define _IOC_WRITE 4U -/* - * The following are included for compatibility - */ -#define _IOC_VOID 0x20000000 -#define _IOC_OUT 0x40000000 -#define _IOC_IN 0x80000000 -#define _IOC_INOUT (IOC_IN|IOC_OUT) - -#define _IOC(dir, type, nr, size) \ - (((dir) << _IOC_DIRSHIFT) | \ - ((type) << _IOC_TYPESHIFT) | \ - ((nr) << _IOC_NRSHIFT) | \ - ((size) << _IOC_SIZESHIFT)) - -#ifdef __KERNEL__ -/* provoke compile error for invalid uses of size argument */ -extern unsigned int __invalid_size_argument_for_IOC; -#define _IOC_TYPECHECK(t) \ - ((sizeof(t) == sizeof(t[1]) && \ - sizeof(t) < (1 << _IOC_SIZEBITS)) ? \ - sizeof(t) : __invalid_size_argument_for_IOC) -#else -#define _IOC_TYPECHECK(t) (sizeof(t)) -#endif - -/* used to create numbers */ -#define _IO(type, nr) _IOC(_IOC_NONE, (type), (nr), 0) -#define _IOR(type, nr, size) _IOC(_IOC_READ, (type), (nr), (_IOC_TYPECHECK(size))) -#define _IOW(type, nr, size) _IOC(_IOC_WRITE, (type), (nr), (_IOC_TYPECHECK(size))) -#define _IOWR(type, nr, size) _IOC(_IOC_READ|_IOC_WRITE, (type), (nr), (_IOC_TYPECHECK(size))) -#define _IOR_BAD(type, nr, size) _IOC(_IOC_READ, (type), (nr), sizeof(size)) -#define _IOW_BAD(type, nr, size) _IOC(_IOC_WRITE, (type), (nr), sizeof(size)) -#define _IOWR_BAD(type, nr, size) _IOC(_IOC_READ|_IOC_WRITE, (type), (nr), sizeof(size)) - - -/* used to decode them.. */ -#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK) -#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK) -#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK) -#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK) - -/* ...and for the drivers/sound files... */ - -#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT) -#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT) -#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT) -#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT) -#define IOCSIZE_SHIFT (_IOC_SIZESHIFT) +#include <asm-generic/ioctl.h> -#endif /* _ASM_IOCTL_H */ +#endif /* __ASM_IOCTL_H */ diff --git a/arch/mips/include/asm/kmap_types.h b/arch/mips/include/asm/kmap_types.h index 806aae3c533..58e91ed0388 100644 --- a/arch/mips/include/asm/kmap_types.h +++ b/arch/mips/include/asm/kmap_types.h @@ -1,30 +1,12 @@ #ifndef _ASM_KMAP_TYPES_H #define _ASM_KMAP_TYPES_H - #ifdef CONFIG_DEBUG_HIGHMEM -# define D(n) __KM_FENCE_##n , -#else -# define D(n) +#define __WITH_KM_FENCE #endif -enum km_type { -D(0) KM_BOUNCE_READ, -D(1) KM_SKB_SUNRPC_DATA, -D(2) KM_SKB_DATA_SOFTIRQ, -D(3) KM_USER0, -D(4) KM_USER1, -D(5) KM_BIO_SRC_IRQ, -D(6) KM_BIO_DST_IRQ, -D(7) KM_PTE0, -D(8) KM_PTE1, -D(9) KM_IRQ0, -D(10) KM_IRQ1, -D(11) KM_SOFTIRQ0, -D(12) KM_SOFTIRQ1, -D(13) KM_TYPE_NR -}; +#include <asm-generic/kmap_types.h> -#undef D +#undef __WITH_KM_FENCE #endif diff --git a/arch/mips/include/asm/mach-au1x00/au1000_gpio.h b/arch/mips/include/asm/mach-au1x00/au1000_gpio.h deleted file mode 100644 index d8c96fda554..00000000000 --- a/arch/mips/include/asm/mach-au1x00/au1000_gpio.h +++ /dev/null @@ -1,56 +0,0 @@ -/* - * FILE NAME au1000_gpio.h - * - * BRIEF MODULE DESCRIPTION - * API to Alchemy Au1xx0 GPIO device. - * - * Author: MontaVista Software, Inc. <source@mvista.com> - * Steve Longerbeam - * - * Copyright 2001, 2008 MontaVista Software Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN - * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#ifndef __AU1000_GPIO_H -#define __AU1000_GPIO_H - -#include <linux/ioctl.h> - -#define AU1000GPIO_IOC_MAGIC 'A' - -#define AU1000GPIO_IN _IOR(AU1000GPIO_IOC_MAGIC, 0, int) -#define AU1000GPIO_SET _IOW(AU1000GPIO_IOC_MAGIC, 1, int) -#define AU1000GPIO_CLEAR _IOW(AU1000GPIO_IOC_MAGIC, 2, int) -#define AU1000GPIO_OUT _IOW(AU1000GPIO_IOC_MAGIC, 3, int) -#define AU1000GPIO_TRISTATE _IOW(AU1000GPIO_IOC_MAGIC, 4, int) -#define AU1000GPIO_AVAIL_MASK _IOR(AU1000GPIO_IOC_MAGIC, 5, int) - -#ifdef __KERNEL__ -extern u32 get_au1000_avail_gpio_mask(void); -extern int au1000gpio_tristate(u32 data); -extern int au1000gpio_in(u32 *data); -extern int au1000gpio_set(u32 data); -extern int au1000gpio_clear(u32 data); -extern int au1000gpio_out(u32 data); -#endif - -#endif diff --git a/arch/mips/include/asm/mach-au1x00/gpio-au1000.h b/arch/mips/include/asm/mach-au1x00/gpio-au1000.h new file mode 100644 index 00000000000..127d4ed9f07 --- /dev/null +++ b/arch/mips/include/asm/mach-au1x00/gpio-au1000.h @@ -0,0 +1,604 @@ +/* + * GPIO functions for Au1000, Au1500, Au1100, Au1550, Au1200 + * + * Copyright (c) 2009 Manuel Lauss. + * + * Licensed under the terms outlined in the file COPYING. + */ + +#ifndef _ALCHEMY_GPIO_AU1000_H_ +#define _ALCHEMY_GPIO_AU1000_H_ + +#include <asm/mach-au1x00/au1000.h> + +/* The default GPIO numberspace as documented in the Alchemy manuals. + * GPIO0-31 from GPIO1 block, GPIO200-215 from GPIO2 block. + */ +#define ALCHEMY_GPIO1_BASE 0 +#define ALCHEMY_GPIO2_BASE 200 + +#define ALCHEMY_GPIO1_NUM 32 +#define ALCHEMY_GPIO2_NUM 16 +#define ALCHEMY_GPIO1_MAX (ALCHEMY_GPIO1_BASE + ALCHEMY_GPIO1_NUM - 1) +#define ALCHEMY_GPIO2_MAX (ALCHEMY_GPIO2_BASE + ALCHEMY_GPIO2_NUM - 1) + +#define MAKE_IRQ(intc, off) (AU1000_INTC##intc##_INT_BASE + (off)) + + +static inline int au1000_gpio1_to_irq(int gpio) +{ + return MAKE_IRQ(1, gpio - ALCHEMY_GPIO1_BASE); +} + +static inline int au1000_gpio2_to_irq(int gpio) +{ + return -ENXIO; +} + +#ifdef CONFIG_SOC_AU1000 +static inline int au1000_irq_to_gpio(int irq) +{ + if ((irq >= AU1000_GPIO_0) && (irq <= AU1000_GPIO_31)) + return ALCHEMY_GPIO1_BASE + (irq - AU1000_GPIO_0) + 0; + + return -ENXIO; +} +#endif + +static inline int au1500_gpio1_to_irq(int gpio) +{ + gpio -= ALCHEMY_GPIO1_BASE; + + switch (gpio) { + case 0 ... 15: + case 20: + case 23 ... 28: return MAKE_IRQ(1, gpio); + } + + return -ENXIO; +} + +static inline int au1500_gpio2_to_irq(int gpio) +{ + gpio -= ALCHEMY_GPIO2_BASE; + + switch (gpio) { + case 0 ... 3: return MAKE_IRQ(1, 16 + gpio - 0); + case 4 ... 5: return MAKE_IRQ(1, 21 + gpio - 4); + case 6 ... 7: return MAKE_IRQ(1, 29 + gpio - 6); + } + + return -ENXIO; +} + +#ifdef CONFIG_SOC_AU1500 +static inline int au1500_irq_to_gpio(int irq) +{ + switch (irq) { + case AU1000_GPIO_0 ... AU1000_GPIO_15: + case AU1500_GPIO_20: + case AU1500_GPIO_23 ... AU1500_GPIO_28: + return ALCHEMY_GPIO1_BASE + (irq - AU1000_GPIO_0) + 0; + case AU1500_GPIO_200 ... AU1500_GPIO_203: + return ALCHEMY_GPIO2_BASE + (irq - AU1500_GPIO_200) + 0; + case AU1500_GPIO_204 ... AU1500_GPIO_205: + return ALCHEMY_GPIO2_BASE + (irq - AU1500_GPIO_204) + 4; + case AU1500_GPIO_206 ... AU1500_GPIO_207: + return ALCHEMY_GPIO2_BASE + (irq - AU1500_GPIO_206) + 6; + case AU1500_GPIO_208_215: + return ALCHEMY_GPIO2_BASE + 8; + } + + return -ENXIO; +} +#endif + +static inline int au1100_gpio1_to_irq(int gpio) +{ + return MAKE_IRQ(1, gpio - ALCHEMY_GPIO1_BASE); +} + +static inline int au1100_gpio2_to_irq(int gpio) +{ + gpio -= ALCHEMY_GPIO2_BASE; + + if ((gpio >= 8) && (gpio <= 15)) + return MAKE_IRQ(0, 29); /* shared GPIO208_215 */ +} + +#ifdef CONFIG_SOC_AU1100 +static inline int au1100_irq_to_gpio(int irq) +{ + switch (irq) { + case AU1000_GPIO_0 ... AU1000_GPIO_31: + return ALCHEMY_GPIO1_BASE + (irq - AU1000_GPIO_0) + 0; + case AU1100_GPIO_208_215: + return ALCHEMY_GPIO2_BASE + 8; + } + + return -ENXIO; +} +#endif + +static inline int au1550_gpio1_to_irq(int gpio) +{ + gpio -= ALCHEMY_GPIO1_BASE; + + switch (gpio) { + case 0 ... 15: + case 20 ... 28: return MAKE_IRQ(1, gpio); + case 16 ... 17: return MAKE_IRQ(1, 18 + gpio - 16); + } + + return -ENXIO; +} + +static inline int au1550_gpio2_to_irq(int gpio) +{ + gpio -= ALCHEMY_GPIO2_BASE; + + switch (gpio) { + case 0: return MAKE_IRQ(1, 16); + case 1 ... 5: return MAKE_IRQ(1, 17); /* shared GPIO201_205 */ + case 6 ... 7: return MAKE_IRQ(1, 29 + gpio - 6); + case 8 ... 15: return MAKE_IRQ(1, 31); /* shared GPIO208_215 */ + } + + return -ENXIO; +} + +#ifdef CONFIG_SOC_AU1550 +static inline int au1550_irq_to_gpio(int irq) +{ + switch (irq) { + case AU1000_GPIO_0 ... AU1000_GPIO_15: + return ALCHEMY_GPIO1_BASE + (irq - AU1000_GPIO_0) + 0; + case AU1550_GPIO_200: + case AU1500_GPIO_201_205: + return ALCHEMY_GPIO2_BASE + (irq - AU1550_GPIO_200) + 0; + case AU1500_GPIO_16 ... AU1500_GPIO_28: + return ALCHEMY_GPIO1_BASE + (irq - AU1500_GPIO_16) + 16; + case AU1500_GPIO_206 ... AU1500_GPIO_208_218: + return ALCHEMY_GPIO2_BASE + (irq - AU1500_GPIO_206) + 6; + } + + return -ENXIO; +} +#endif + +static inline int au1200_gpio1_to_irq(int gpio) +{ + return MAKE_IRQ(1, gpio - ALCHEMY_GPIO1_BASE); +} + +static inline int au1200_gpio2_to_irq(int gpio) +{ + gpio -= ALCHEMY_GPIO2_BASE; + + switch (gpio) { + case 0 ... 2: return MAKE_IRQ(0, 5 + gpio - 0); + case 3: return MAKE_IRQ(0, 22); + case 4 ... 7: return MAKE_IRQ(0, 24 + gpio - 4); + case 8 ... 15: return MAKE_IRQ(0, 28); /* shared GPIO208_215 */ + } + + return -ENXIO; +} + +#ifdef CONFIG_SOC_AU1200 +static inline int au1200_irq_to_gpio(int irq) +{ + switch (irq) { + case AU1000_GPIO_0 ... AU1000_GPIO_31: + return ALCHEMY_GPIO1_BASE + (irq - AU1000_GPIO_0) + 0; + case AU1200_GPIO_200 ... AU1200_GPIO_202: + return ALCHEMY_GPIO2_BASE + (irq - AU1200_GPIO_200) + 0; + case AU1200_GPIO_203: + return ALCHEMY_GPIO2_BASE + 3; + case AU1200_GPIO_204 ... AU1200_GPIO_208_215: + return ALCHEMY_GPIO2_BASE + (irq - AU1200_GPIO_204) + 4; + } + + return -ENXIO; +} +#endif + +/* + * GPIO1 block macros for common linux gpio functions. + */ +static inline void alchemy_gpio1_set_value(int gpio, int v) +{ + unsigned long mask = 1 << (gpio - ALCHEMY_GPIO1_BASE); + unsigned long r = v ? SYS_OUTPUTSET : SYS_OUTPUTCLR; + au_writel(mask, r); + au_sync(); +} + +static inline int alchemy_gpio1_get_value(int gpio) +{ + unsigned long mask = 1 << (gpio - ALCHEMY_GPIO1_BASE); + return au_readl(SYS_PINSTATERD) & mask; +} + +static inline int alchemy_gpio1_direction_input(int gpio) +{ + unsigned long mask = 1 << (gpio - ALCHEMY_GPIO1_BASE); + au_writel(mask, SYS_TRIOUTCLR); + au_sync(); + return 0; +} + +static inline int alchemy_gpio1_direction_output(int gpio, int v) +{ + /* hardware switches to "output" mode when one of the two + * "set_value" registers is accessed. + */ + alchemy_gpio1_set_value(gpio, v); + return 0; +} + +static inline int alchemy_gpio1_is_valid(int gpio) +{ + return ((gpio >= ALCHEMY_GPIO1_BASE) && (gpio <= ALCHEMY_GPIO1_MAX)); +} + +static inline int alchemy_gpio1_to_irq(int gpio) +{ +#if defined(CONFIG_SOC_AU1000) + return au1000_gpio1_to_irq(gpio); +#elif defined(CONFIG_SOC_AU1100) + return au1100_gpio1_to_irq(gpio); +#elif defined(CONFIG_SOC_AU1500) + return au1500_gpio1_to_irq(gpio); +#elif defined(CONFIG_SOC_AU1550) + return au1550_gpio1_to_irq(gpio); +#elif defined(CONFIG_SOC_AU1200) + return au1200_gpio1_to_irq(gpio); +#else + return -ENXIO; +#endif +} + +/* + * GPIO2 block macros for common linux GPIO functions. The 'gpio' + * parameter must be in range of ALCHEMY_GPIO2_BASE..ALCHEMY_GPIO2_MAX. + */ +static inline void __alchemy_gpio2_mod_dir(int gpio, int to_out) +{ + unsigned long mask = 1 << (gpio - ALCHEMY_GPIO2_BASE); + unsigned long d = au_readl(GPIO2_DIR); + if (to_out) + d |= mask; + else + d &= ~mask; + au_writel(d, GPIO2_DIR); + au_sync(); +} + +static inline void alchemy_gpio2_set_value(int gpio, int v) +{ + unsigned long mask; + mask = ((v) ? 0x00010001 : 0x00010000) << (gpio - ALCHEMY_GPIO2_BASE); + au_writel(mask, GPIO2_OUTPUT); + au_sync(); +} + +static inline int alchemy_gpio2_get_value(int gpio) +{ + return au_readl(GPIO2_PINSTATE) & (1 << (gpio - ALCHEMY_GPIO2_BASE)); +} + +static inline int alchemy_gpio2_direction_input(int gpio) +{ + unsigned long flags; + local_irq_save(flags); + __alchemy_gpio2_mod_dir(gpio, 0); + local_irq_restore(flags); + return 0; +} + +static inline int alchemy_gpio2_direction_output(int gpio, int v) +{ + unsigned long flags; + alchemy_gpio2_set_value(gpio, v); + local_irq_save(flags); + __alchemy_gpio2_mod_dir(gpio, 1); + local_irq_restore(flags); + return 0; +} + +static inline int alchemy_gpio2_is_valid(int gpio) +{ + return ((gpio >= ALCHEMY_GPIO2_BASE) && (gpio <= ALCHEMY_GPIO2_MAX)); +} + +static inline int alchemy_gpio2_to_irq(int gpio) +{ +#if defined(CONFIG_SOC_AU1000) + return au1000_gpio2_to_irq(gpio); +#elif defined(CONFIG_SOC_AU1100) + return au1100_gpio2_to_irq(gpio); +#elif defined(CONFIG_SOC_AU1500) + return au1500_gpio2_to_irq(gpio); +#elif defined(CONFIG_SOC_AU1550) + return au1550_gpio2_to_irq(gpio); +#elif defined(CONFIG_SOC_AU1200) + return au1200_gpio2_to_irq(gpio); +#else + return -ENXIO; +#endif +} + +/**********************************************************************/ + +/* On Au1000, Au1500 and Au1100 GPIOs won't work as inputs before + * SYS_PININPUTEN is written to at least once. On Au1550/Au1200 this + * register enables use of GPIOs as wake source. + */ +static inline void alchemy_gpio1_input_enable(void) +{ + au_writel(0, SYS_PININPUTEN); /* the write op is key */ + au_sync(); +} + +/* GPIO2 shared interrupts and control */ + +static inline void __alchemy_gpio2_mod_int(int gpio2, int en) +{ + unsigned long r = au_readl(GPIO2_INTENABLE); + if (en) + r |= 1 << gpio2; + else + r &= ~(1 << gpio2); + au_writel(r, GPIO2_INTENABLE); + au_sync(); +} + +/** + * alchemy_gpio2_enable_int - Enable a GPIO2 pins' shared irq contribution. + * @gpio2: The GPIO2 pin to activate (200...215). + * + * GPIO208-215 have one shared interrupt line to the INTC. They are + * and'ed with a per-pin enable bit and finally or'ed together to form + * a single irq request (useful for active-high sources). + * With this function, a pins' individual contribution to the int request + * can be enabled. As with all other GPIO-based interrupts, the INTC + * must be programmed to accept the GPIO208_215 interrupt as well. + * + * NOTE: Calling this macro is only necessary for GPIO208-215; all other + * GPIO2-based interrupts have their own request to the INTC. Please + * consult your Alchemy databook for more information! + * + * NOTE: On the Au1550, GPIOs 201-205 also have a shared interrupt request + * line to the INTC, GPIO201_205. This function can be used for those + * as well. + * + * NOTE: 'gpio2' parameter must be in range of the GPIO2 numberspace + * (200-215 by default). No sanity checks are made, + */ +static inline void alchemy_gpio2_enable_int(int gpio2) +{ + unsigned long flags; + + gpio2 -= ALCHEMY_GPIO2_BASE; + +#if defined(CONFIG_SOC_AU1100) || defined(CONFIG_SOC_AU1500) + /* Au1100/Au1500 have GPIO208-215 enable bits at 0..7 */ + gpio2 -= 8; +#endif + local_irq_save(flags); + __alchemy_gpio2_mod_int(gpio2, 1); + local_irq_restore(flags); +} + +/** + * alchemy_gpio2_disable_int - Disable a GPIO2 pins' shared irq contribution. + * @gpio2: The GPIO2 pin to activate (200...215). + * + * see function alchemy_gpio2_enable_int() for more information. + */ +static inline void alchemy_gpio2_disable_int(int gpio2) +{ + unsigned long flags; + + gpio2 -= ALCHEMY_GPIO2_BASE; + +#if defined(CONFIG_SOC_AU1100) || defined(CONFIG_SOC_AU1500) + /* Au1100/Au1500 have GPIO208-215 enable bits at 0..7 */ + gpio2 -= 8; +#endif + local_irq_save(flags); + __alchemy_gpio2_mod_int(gpio2, 0); + local_irq_restore(flags); +} + +/** + * alchemy_gpio2_enable - Activate GPIO2 block. + * + * The GPIO2 block must be enabled excplicitly to work. On systems + * where this isn't done by the bootloader, this macro can be used. + */ +static inline void alchemy_gpio2_enable(void) +{ + au_writel(3, GPIO2_ENABLE); /* reset, clock enabled */ + au_sync(); + au_writel(1, GPIO2_ENABLE); /* clock enabled */ + au_sync(); +} + +/** + * alchemy_gpio2_disable - disable GPIO2 block. + * + * Disable and put GPIO2 block in low-power mode. + */ +static inline void alchemy_gpio2_disable(void) +{ + au_writel(2, GPIO2_ENABLE); /* reset, clock disabled */ + au_sync(); +} + +/**********************************************************************/ + +/* wrappers for on-chip gpios; can be used before gpio chips have been + * registered with gpiolib. + */ +static inline int alchemy_gpio_direction_input(int gpio) +{ + return (gpio >= ALCHEMY_GPIO2_BASE) ? + alchemy_gpio2_direction_input(gpio) : + alchemy_gpio1_direction_input(gpio); +} + +static inline int alchemy_gpio_direction_output(int gpio, int v) +{ + return (gpio >= ALCHEMY_GPIO2_BASE) ? + alchemy_gpio2_direction_output(gpio, v) : + alchemy_gpio1_direction_output(gpio, v); +} + +static inline int alchemy_gpio_get_value(int gpio) +{ + return (gpio >= ALCHEMY_GPIO2_BASE) ? + alchemy_gpio2_get_value(gpio) : + alchemy_gpio1_get_value(gpio); +} + +static inline void alchemy_gpio_set_value(int gpio, int v) +{ + if (gpio >= ALCHEMY_GPIO2_BASE) + alchemy_gpio2_set_value(gpio, v); + else + alchemy_gpio1_set_value(gpio, v); +} + +static inline int alchemy_gpio_is_valid(int gpio) +{ + return (gpio >= ALCHEMY_GPIO2_BASE) ? + alchemy_gpio2_is_valid(gpio) : + alchemy_gpio1_is_valid(gpio); +} + +static inline int alchemy_gpio_cansleep(int gpio) +{ + return 0; /* Alchemy never gets tired */ +} + +static inline int alchemy_gpio_to_irq(int gpio) +{ + return (gpio >= ALCHEMY_GPIO2_BASE) ? + alchemy_gpio2_to_irq(gpio) : + alchemy_gpio1_to_irq(gpio); +} + +static inline int alchemy_irq_to_gpio(int irq) +{ +#if defined(CONFIG_SOC_AU1000) + return au1000_irq_to_gpio(irq); +#elif defined(CONFIG_SOC_AU1100) + return au1100_irq_to_gpio(irq); +#elif defined(CONFIG_SOC_AU1500) + return au1500_irq_to_gpio(irq); +#elif defined(CONFIG_SOC_AU1550) + return au1550_irq_to_gpio(irq); +#elif defined(CONFIG_SOC_AU1200) + return au1200_irq_to_gpio(irq); +#else + return -ENXIO; +#endif +} + +/**********************************************************************/ + +/* Linux gpio framework integration. + * + * 4 use cases of Au1000-Au1200 GPIOS: + *(1) GPIOLIB=y, ALCHEMY_GPIO_INDIRECT=y: + * Board must register gpiochips. + *(2) GPIOLIB=y, ALCHEMY_GPIO_INDIRECT=n: + * 2 (1 for Au1000) gpio_chips are registered. + * + *(3) GPIOLIB=n, ALCHEMY_GPIO_INDIRECT=y: + * the boards' gpio.h must provide the linux gpio wrapper functions, + * + *(4) GPIOLIB=n, ALCHEMY_GPIO_INDIRECT=n: + * inlinable gpio functions are provided which enable access to the + * Au1000 gpios only by using the numbers straight out of the data- + * sheets. + + * Cases 1 and 3 are intended for boards which want to provide their own + * GPIO namespace and -operations (i.e. for example you have 8 GPIOs + * which are in part provided by spare Au1000 GPIO pins and in part by + * an external FPGA but you still want them to be accssible in linux + * as gpio0-7. The board can of course use the alchemy_gpioX_* functions + * as required). + */ + +#ifndef CONFIG_GPIOLIB + + +#ifndef CONFIG_ALCHEMY_GPIO_INDIRECT /* case (4) */ + +static inline int gpio_direction_input(int gpio) +{ + return alchemy_gpio_direction_input(gpio); +} + +static inline int gpio_direction_output(int gpio, int v) +{ + return alchemy_gpio_direction_output(gpio, v); +} + +static inline int gpio_get_value(int gpio) +{ + return alchemy_gpio_get_value(gpio); +} + +static inline void gpio_set_value(int gpio, int v) +{ + alchemy_gpio_set_value(gpio, v); +} + +static inline int gpio_is_valid(int gpio) +{ + return alchemy_gpio_is_valid(gpio); +} + +static inline int gpio_cansleep(int gpio) +{ + return alchemy_gpio_cansleep(gpio); +} + +static inline int gpio_to_irq(int gpio) +{ + return alchemy_gpio_to_irq(gpio); +} + +static inline int irq_to_gpio(int irq) +{ + return alchemy_irq_to_gpio(irq); +} + +#endif /* !CONFIG_ALCHEMY_GPIO_INDIRECT */ + + +#else /* CONFIG GPIOLIB */ + + + /* using gpiolib to provide up to 2 gpio_chips for on-chip gpios */ +#ifndef CONFIG_ALCHEMY_GPIO_INDIRECT /* case (2) */ + +/* get everything through gpiolib */ +#define gpio_to_irq __gpio_to_irq +#define gpio_get_value __gpio_get_value +#define gpio_set_value __gpio_set_value +#define gpio_cansleep __gpio_cansleep +#define irq_to_gpio alchemy_irq_to_gpio + +#include <asm-generic/gpio.h> + +#endif /* !CONFIG_ALCHEMY_GPIO_INDIRECT */ + + +#endif /* !CONFIG_GPIOLIB */ + +#endif /* _ALCHEMY_GPIO_AU1000_H_ */ diff --git a/arch/mips/include/asm/mach-au1x00/gpio.h b/arch/mips/include/asm/mach-au1x00/gpio.h index 34d9b727902..f9b7d41c659 100644 --- a/arch/mips/include/asm/mach-au1x00/gpio.h +++ b/arch/mips/include/asm/mach-au1x00/gpio.h @@ -1,33 +1,10 @@ -#ifndef _AU1XXX_GPIO_H_ -#define _AU1XXX_GPIO_H_ +#ifndef _ALCHEMY_GPIO_H_ +#define _ALCHEMY_GPIO_H_ -#include <linux/types.h> +#if defined(CONFIG_ALCHEMY_GPIO_AU1000) -#define AU1XXX_GPIO_BASE 200 +#include <asm/mach-au1x00/gpio-au1000.h> -/* GPIO bank 1 offsets */ -#define AU1000_GPIO1_TRI_OUT 0x0100 -#define AU1000_GPIO1_OUT 0x0108 -#define AU1000_GPIO1_ST 0x0110 -#define AU1000_GPIO1_CLR 0x010C +#endif -/* GPIO bank 2 offsets */ -#define AU1000_GPIO2_DIR 0x00 -#define AU1000_GPIO2_RSVD 0x04 -#define AU1000_GPIO2_OUT 0x08 -#define AU1000_GPIO2_ST 0x0C -#define AU1000_GPIO2_INT 0x10 -#define AU1000_GPIO2_EN 0x14 - -#define GPIO2_OUT_EN_MASK 0x00010000 - -#define gpio_to_irq(gpio) NULL - -#define gpio_get_value __gpio_get_value -#define gpio_set_value __gpio_set_value - -#define gpio_cansleep __gpio_cansleep - -#include <asm-generic/gpio.h> - -#endif /* _AU1XXX_GPIO_H_ */ +#endif /* _ALCHEMY_GPIO_H_ */ diff --git a/arch/mips/include/asm/mach-bcm47xx/gpio.h b/arch/mips/include/asm/mach-bcm47xx/gpio.h index 1784fde2e28..98504142124 100644 --- a/arch/mips/include/asm/mach-bcm47xx/gpio.h +++ b/arch/mips/include/asm/mach-bcm47xx/gpio.h @@ -37,6 +37,9 @@ static inline int gpio_direction_input(unsigned gpio) static inline int gpio_direction_output(unsigned gpio, int value) { + /* first set the gpio out value */ + ssb_gpio_out(&ssb_bcm47xx, 1 << gpio, value ? 1 << gpio : 0); + /* then set the gpio mode */ ssb_gpio_outen(&ssb_bcm47xx, 1 << gpio, 1 << gpio); return 0; } diff --git a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h index 04ce6e6569d..3d830756b13 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h @@ -47,11 +47,13 @@ #define cpu_has_mips32r2 0 #define cpu_has_mips64r1 0 #define cpu_has_mips64r2 1 +#define cpu_has_mips_r2_exec_hazard 0 #define cpu_has_dsp 0 #define cpu_has_mipsmt 0 #define cpu_has_userlocal 0 #define cpu_has_vint 0 #define cpu_has_veic 0 +#define cpu_hwrena_impl_bits 0xc0000000 #define ARCH_HAS_READ_CURRENT_TIMER 1 #define ARCH_HAS_IRQ_PER_CPU 1 #define ARCH_HAS_SPINLOCK_PREFETCH 1 diff --git a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h index f30fce92aab..17d579471ec 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h +++ b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h @@ -30,12 +30,14 @@ static inline dma_addr_t plat_map_dma_mem_page(struct device *dev, return octeon_map_dma_mem(dev, page_address(page), PAGE_SIZE); } -static inline unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr) +static inline unsigned long plat_dma_addr_to_phys(struct device *dev, + dma_addr_t dma_addr) { return dma_addr; } -static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr) +static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr, + size_t size, enum dma_data_direction direction) { octeon_unmap_dma_mem(dev, dma_addr); } diff --git a/arch/mips/include/asm/mach-generic/dma-coherence.h b/arch/mips/include/asm/mach-generic/dma-coherence.h index 36c611b6c59..8da98073e95 100644 --- a/arch/mips/include/asm/mach-generic/dma-coherence.h +++ b/arch/mips/include/asm/mach-generic/dma-coherence.h @@ -23,12 +23,14 @@ static inline dma_addr_t plat_map_dma_mem_page(struct device *dev, return page_to_phys(page); } -static inline unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr) +static inline unsigned long plat_dma_addr_to_phys(struct device *dev, + dma_addr_t dma_addr) { return dma_addr; } -static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr) +static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr, + size_t size, enum dma_data_direction direction) { } diff --git a/arch/mips/include/asm/mach-ip27/dma-coherence.h b/arch/mips/include/asm/mach-ip27/dma-coherence.h index 4c21bfca10c..d3d04018a85 100644 --- a/arch/mips/include/asm/mach-ip27/dma-coherence.h +++ b/arch/mips/include/asm/mach-ip27/dma-coherence.h @@ -33,12 +33,14 @@ static dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page) return pa; } -static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr) +static unsigned long plat_dma_addr_to_phys(struct device *dev, + dma_addr_t dma_addr) { return dma_addr & ~(0xffUL << 56); } -static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr) +static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr, + size_t size, enum dma_data_direction direction) { } diff --git a/arch/mips/include/asm/mach-ip32/dma-coherence.h b/arch/mips/include/asm/mach-ip32/dma-coherence.h index 7ae40f4b1c8..37855955b31 100644 --- a/arch/mips/include/asm/mach-ip32/dma-coherence.h +++ b/arch/mips/include/asm/mach-ip32/dma-coherence.h @@ -50,7 +50,8 @@ static dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page) } /* This is almost certainly wrong but it's what dma-ip32.c used to use */ -static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr) +static unsigned long plat_dma_addr_to_phys(struct device *dev, + dma_addr_t dma_addr) { unsigned long addr = dma_addr & RAM_OFFSET_MASK; @@ -60,7 +61,8 @@ static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr) return addr; } -static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr) +static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr, + size_t size, enum dma_data_direction direction) { } diff --git a/arch/mips/include/asm/mach-jazz/dma-coherence.h b/arch/mips/include/asm/mach-jazz/dma-coherence.h index 1c7cd27efa7..f93aee59454 100644 --- a/arch/mips/include/asm/mach-jazz/dma-coherence.h +++ b/arch/mips/include/asm/mach-jazz/dma-coherence.h @@ -22,12 +22,14 @@ static dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page) return vdma_alloc(page_to_phys(page), PAGE_SIZE); } -static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr) +static unsigned long plat_dma_addr_to_phys(struct device *dev, + dma_addr_t dma_addr) { return vdma_log2phys(dma_addr); } -static void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr) +static void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr, + size_t size, enum dma_data_direction direction) { vdma_free(dma_addr); } diff --git a/arch/mips/include/asm/mach-lemote/dma-coherence.h b/arch/mips/include/asm/mach-lemote/dma-coherence.h index 38fad7dfe7d..c8de5e75077 100644 --- a/arch/mips/include/asm/mach-lemote/dma-coherence.h +++ b/arch/mips/include/asm/mach-lemote/dma-coherence.h @@ -25,12 +25,14 @@ static inline dma_addr_t plat_map_dma_mem_page(struct device *dev, return page_to_phys(page) | 0x80000000; } -static inline unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr) +static inline unsigned long plat_dma_addr_to_phys(struct device *dev, + dma_addr_t dma_addr) { return dma_addr & 0x7fffffff; } -static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr) +static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr, + size_t size, enum dma_data_direction direction) { } diff --git a/arch/mips/include/asm/mach-rc32434/cpu-feature-overrides.h b/arch/mips/include/asm/mach-rc32434/cpu-feature-overrides.h index f3bc7efa260..c3e4d3a4c95 100644 --- a/arch/mips/include/asm/mach-rc32434/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-rc32434/cpu-feature-overrides.h @@ -53,11 +53,6 @@ #define cpu_has_smartmips 0 #define cpu_has_vtag_icache 0 -/* #define cpu_has_dc_aliases ? */ -/* #define cpu_has_ic_fills_f_dc ? */ -/* #define cpu_has_pindexed_dcache ? */ - -/* #define cpu_icache_snoops_remote_store ? */ #define cpu_has_mips32r1 1 #define cpu_has_mips32r2 0 diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 32ef8bec5c8..a581d60cbcc 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -220,6 +220,22 @@ #error Bad page size configuration! #endif +/* + * Default huge tlb size for a given kernel configuration + */ +#ifdef CONFIG_PAGE_SIZE_4KB +#define PM_HUGE_MASK PM_1M +#elif defined(CONFIG_PAGE_SIZE_8KB) +#define PM_HUGE_MASK PM_4M +#elif defined(CONFIG_PAGE_SIZE_16KB) +#define PM_HUGE_MASK PM_16M +#elif defined(CONFIG_PAGE_SIZE_32KB) +#define PM_HUGE_MASK PM_64M +#elif defined(CONFIG_PAGE_SIZE_64KB) +#define PM_HUGE_MASK PM_256M +#elif defined(CONFIG_HUGETLB_PAGE) +#error Bad page size configuration for hugetlbfs! +#endif /* * Values used for computation of new tlb entries diff --git a/arch/mips/include/asm/octeon/cvmx-bootinfo.h b/arch/mips/include/asm/octeon/cvmx-bootinfo.h index 692989acd8a..f3c23a43f84 100644 --- a/arch/mips/include/asm/octeon/cvmx-bootinfo.h +++ b/arch/mips/include/asm/octeon/cvmx-bootinfo.h @@ -157,6 +157,13 @@ enum cvmx_board_types_enum { CVMX_BOARD_TYPE_NIC_XLE_4G = 21, CVMX_BOARD_TYPE_EBT5600 = 22, CVMX_BOARD_TYPE_EBH5201 = 23, + CVMX_BOARD_TYPE_EBT5200 = 24, + CVMX_BOARD_TYPE_CB5600 = 25, + CVMX_BOARD_TYPE_CB5601 = 26, + CVMX_BOARD_TYPE_CB5200 = 27, + /* Special 'generic' board type, supports many boards */ + CVMX_BOARD_TYPE_GENERIC = 28, + CVMX_BOARD_TYPE_EBH5610 = 29, CVMX_BOARD_TYPE_MAX, /* @@ -228,6 +235,12 @@ static inline const char *cvmx_board_type_to_string(enum ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC_XLE_4G) ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT5600) ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5201) + ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT5200) + ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CB5600) + ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CB5601) + ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CB5200) + ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_GENERIC) + ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5610) ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MAX) /* Customer boards listed here */ diff --git a/arch/mips/include/asm/octeon/cvmx-bootmem.h b/arch/mips/include/asm/octeon/cvmx-bootmem.h index 1cbe4b55889..8e708bdb43f 100644 --- a/arch/mips/include/asm/octeon/cvmx-bootmem.h +++ b/arch/mips/include/asm/octeon/cvmx-bootmem.h @@ -183,6 +183,64 @@ extern void *cvmx_bootmem_alloc_range(uint64_t size, uint64_t alignment, * Returns 0 on failure, * !0 on success */ + + +/** + * Allocate a block of memory from the free list that was passed + * to the application by the bootloader, and assign it a name in the + * global named block table. (part of the cvmx_bootmem_descriptor_t structure) + * Named blocks can later be freed. + * + * @size: Size in bytes of block to allocate + * @alignment: Alignment required - must be power of 2 + * @name: name of block - must be less than CVMX_BOOTMEM_NAME_LEN bytes + * + * Returns a pointer to block of memory, NULL on error + */ +extern void *cvmx_bootmem_alloc_named(uint64_t size, uint64_t alignment, + char *name); + + + +/** + * Allocate a block of memory from the free list that was passed + * to the application by the bootloader, and assign it a name in the + * global named block table. (part of the cvmx_bootmem_descriptor_t structure) + * Named blocks can later be freed. + * + * @size: Size in bytes of block to allocate + * @address: Physical address to allocate memory at. If this + * memory is not available, the allocation fails. + * @name: name of block - must be less than CVMX_BOOTMEM_NAME_LEN + * bytes + * + * Returns a pointer to block of memory, NULL on error + */ +extern void *cvmx_bootmem_alloc_named_address(uint64_t size, uint64_t address, + char *name); + + + +/** + * Allocate a block of memory from a specific range of the free list + * that was passed to the application by the bootloader, and assign it + * a name in the global named block table. (part of the + * cvmx_bootmem_descriptor_t structure) Named blocks can later be + * freed. If request cannot be satisfied within the address range + * specified, NULL is returned + * + * @size: Size in bytes of block to allocate + * @min_addr: minimum address of range + * @max_addr: maximum address of range + * @align: Alignment of memory to be allocated. (must be a power of 2) + * @name: name of block - must be less than CVMX_BOOTMEM_NAME_LEN bytes + * + * Returns a pointer to block of memory, NULL on error + */ +extern void *cvmx_bootmem_alloc_named_range(uint64_t size, uint64_t min_addr, + uint64_t max_addr, uint64_t align, + char *name); + extern int cvmx_bootmem_free_named(char *name); /** @@ -224,6 +282,33 @@ int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min, uint32_t flags); /** + * Allocates a named block of physical memory from the free list, at + * (optional) requested address and alignment. + * + * @param size size of region to allocate. All requests are rounded + * up to be a multiple CVMX_BOOTMEM_ALIGNMENT_SIZE + * bytes size + * @param min_addr Minimum address that block can occupy. + * @param max_addr Specifies the maximum address_min (inclusive) that + * the allocation can use. + * @param alignment Requested alignment of the block. If this + * alignment cannot be met, the allocation fails. + * This must be a power of 2. (Note: Alignment of + * CVMX_BOOTMEM_ALIGNMENT_SIZE bytes is required, and + * internally enforced. Requested alignments of less + * than CVMX_BOOTMEM_ALIGNMENT_SIZE are set to + * CVMX_BOOTMEM_ALIGNMENT_SIZE.) + * @param name name to assign to named block + * @param flags Flags to control options for the allocation. + * + * @return physical address of block allocated, or -1 on failure + */ +int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr, + uint64_t max_addr, + uint64_t alignment, + char *name, uint32_t flags); + +/** * Finds a named memory block by name. * Also used for finding an unused entry in the named block table. * diff --git a/arch/mips/include/asm/octeon/cvmx-helper-errata.h b/arch/mips/include/asm/octeon/cvmx-helper-errata.h new file mode 100644 index 00000000000..5fc99189ff5 --- /dev/null +++ b/arch/mips/include/asm/octeon/cvmx-helper-errata.h @@ -0,0 +1,33 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +#ifndef __CVMX_HELPER_ERRATA_H__ +#define __CVMX_HELPER_ERRATA_H__ + +extern void __cvmx_helper_errata_qlm_disable_2nd_order_cdr(int qlm); + +#endif diff --git a/arch/mips/include/asm/octeon/cvmx-helper-jtag.h b/arch/mips/include/asm/octeon/cvmx-helper-jtag.h new file mode 100644 index 00000000000..29f016ddb89 --- /dev/null +++ b/arch/mips/include/asm/octeon/cvmx-helper-jtag.h @@ -0,0 +1,43 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/** + * @file + * + * Helper utilities for qlm_jtag. + * + */ + +#ifndef __CVMX_HELPER_JTAG_H__ +#define __CVMX_HELPER_JTAG_H__ + +extern void cvmx_helper_qlm_jtag_init(void); +extern uint32_t cvmx_helper_qlm_jtag_shift(int qlm, int bits, uint32_t data); +extern void cvmx_helper_qlm_jtag_shift_zeros(int qlm, int bits); +extern void cvmx_helper_qlm_jtag_update(int qlm); + +#endif /* __CVMX_HELPER_JTAG_H__ */ diff --git a/arch/mips/include/asm/octeon/cvmx-npei-defs.h b/arch/mips/include/asm/octeon/cvmx-npei-defs.h new file mode 100644 index 00000000000..4b347bb8ce8 --- /dev/null +++ b/arch/mips/include/asm/octeon/cvmx-npei-defs.h @@ -0,0 +1,2560 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +#ifndef __CVMX_NPEI_DEFS_H__ +#define __CVMX_NPEI_DEFS_H__ + +#define CVMX_NPEI_BAR1_INDEXX(offset) \ + (0x0000000000000000ull + (((offset) & 31) * 16)) +#define CVMX_NPEI_BIST_STATUS \ + (0x0000000000000580ull) +#define CVMX_NPEI_BIST_STATUS2 \ + (0x0000000000000680ull) +#define CVMX_NPEI_CTL_PORT0 \ + (0x0000000000000250ull) +#define CVMX_NPEI_CTL_PORT1 \ + (0x0000000000000260ull) +#define CVMX_NPEI_CTL_STATUS \ + (0x0000000000000570ull) +#define CVMX_NPEI_CTL_STATUS2 \ + (0x0000000000003C00ull) +#define CVMX_NPEI_DATA_OUT_CNT \ + (0x00000000000005F0ull) +#define CVMX_NPEI_DBG_DATA \ + (0x0000000000000510ull) +#define CVMX_NPEI_DBG_SELECT \ + (0x0000000000000500ull) +#define CVMX_NPEI_DMA0_INT_LEVEL \ + (0x00000000000005C0ull) +#define CVMX_NPEI_DMA1_INT_LEVEL \ + (0x00000000000005D0ull) +#define CVMX_NPEI_DMAX_COUNTS(offset) \ + (0x0000000000000450ull + (((offset) & 7) * 16)) +#define CVMX_NPEI_DMAX_DBELL(offset) \ + (0x00000000000003B0ull + (((offset) & 7) * 16)) +#define CVMX_NPEI_DMAX_IBUFF_SADDR(offset) \ + (0x0000000000000400ull + (((offset) & 7) * 16)) +#define CVMX_NPEI_DMAX_NADDR(offset) \ + (0x00000000000004A0ull + (((offset) & 7) * 16)) +#define CVMX_NPEI_DMA_CNTS \ + (0x00000000000005E0ull) +#define CVMX_NPEI_DMA_CONTROL \ + (0x00000000000003A0ull) +#define CVMX_NPEI_INT_A_ENB \ + (0x0000000000000560ull) +#define CVMX_NPEI_INT_A_ENB2 \ + (0x0000000000003CE0ull) +#define CVMX_NPEI_INT_A_SUM \ + (0x0000000000000550ull) +#define CVMX_NPEI_INT_ENB \ + (0x0000000000000540ull) +#define CVMX_NPEI_INT_ENB2 \ + (0x0000000000003CD0ull) +#define CVMX_NPEI_INT_INFO \ + (0x0000000000000590ull) +#define CVMX_NPEI_INT_SUM \ + (0x0000000000000530ull) +#define CVMX_NPEI_INT_SUM2 \ + (0x0000000000003CC0ull) +#define CVMX_NPEI_LAST_WIN_RDATA0 \ + (0x0000000000000600ull) +#define CVMX_NPEI_LAST_WIN_RDATA1 \ + (0x0000000000000610ull) +#define CVMX_NPEI_MEM_ACCESS_CTL \ + (0x00000000000004F0ull) +#define CVMX_NPEI_MEM_ACCESS_SUBIDX(offset) \ + (0x0000000000000340ull + (((offset) & 31) * 16) - 16 * 12) +#define CVMX_NPEI_MSI_ENB0 \ + (0x0000000000003C50ull) +#define CVMX_NPEI_MSI_ENB1 \ + (0x0000000000003C60ull) +#define CVMX_NPEI_MSI_ENB2 \ + (0x0000000000003C70ull) +#define CVMX_NPEI_MSI_ENB3 \ + (0x0000000000003C80ull) +#define CVMX_NPEI_MSI_RCV0 \ + (0x0000000000003C10ull) +#define CVMX_NPEI_MSI_RCV1 \ + (0x0000000000003C20ull) +#define CVMX_NPEI_MSI_RCV2 \ + (0x0000000000003C30ull) +#define CVMX_NPEI_MSI_RCV3 \ + (0x0000000000003C40ull) +#define CVMX_NPEI_MSI_RD_MAP \ + (0x0000000000003CA0ull) +#define CVMX_NPEI_MSI_W1C_ENB0 \ + (0x0000000000003CF0ull) +#define CVMX_NPEI_MSI_W1C_ENB1 \ + (0x0000000000003D00ull) +#define CVMX_NPEI_MSI_W1C_ENB2 \ + (0x0000000000003D10ull) +#define CVMX_NPEI_MSI_W1C_ENB3 \ + (0x0000000000003D20ull) +#define CVMX_NPEI_MSI_W1S_ENB0 \ + (0x0000000000003D30ull) +#define CVMX_NPEI_MSI_W1S_ENB1 \ + (0x0000000000003D40ull) +#define CVMX_NPEI_MSI_W1S_ENB2 \ + (0x0000000000003D50ull) +#define CVMX_NPEI_MSI_W1S_ENB3 \ + (0x0000000000003D60ull) +#define CVMX_NPEI_MSI_WR_MAP \ + (0x0000000000003C90ull) +#define CVMX_NPEI_PCIE_CREDIT_CNT \ + (0x0000000000003D70ull) +#define CVMX_NPEI_PCIE_MSI_RCV \ + (0x0000000000003CB0ull) +#define CVMX_NPEI_PCIE_MSI_RCV_B1 \ + (0x0000000000000650ull) +#define CVMX_NPEI_PCIE_MSI_RCV_B2 \ + (0x0000000000000660ull) +#define CVMX_NPEI_PCIE_MSI_RCV_B3 \ + (0x0000000000000670ull) +#define CVMX_NPEI_PKTX_CNTS(offset) \ + (0x0000000000002400ull + (((offset) & 31) * 16)) +#define CVMX_NPEI_PKTX_INSTR_BADDR(offset) \ + (0x0000000000002800ull + (((offset) & 31) * 16)) +#define CVMX_NPEI_PKTX_INSTR_BAOFF_DBELL(offset) \ + (0x0000000000002C00ull + (((offset) & 31) * 16)) +#define CVMX_NPEI_PKTX_INSTR_FIFO_RSIZE(offset) \ + (0x0000000000003000ull + (((offset) & 31) * 16)) +#define CVMX_NPEI_PKTX_INSTR_HEADER(offset) \ + (0x0000000000003400ull + (((offset) & 31) * 16)) +#define CVMX_NPEI_PKTX_IN_BP(offset) \ + (0x0000000000003800ull + (((offset) & 31) * 16)) +#define CVMX_NPEI_PKTX_SLIST_BADDR(offset) \ + (0x0000000000001400ull + (((offset) & 31) * 16)) +#define CVMX_NPEI_PKTX_SLIST_BAOFF_DBELL(offset) \ + (0x0000000000001800ull + (((offset) & 31) * 16)) +#define CVMX_NPEI_PKTX_SLIST_FIFO_RSIZE(offset) \ + (0x0000000000001C00ull + (((offset) & 31) * 16)) +#define CVMX_NPEI_PKT_CNT_INT \ + (0x0000000000001110ull) +#define CVMX_NPEI_PKT_CNT_INT_ENB \ + (0x0000000000001130ull) +#define CVMX_NPEI_PKT_DATA_OUT_ES \ + (0x00000000000010B0ull) +#define CVMX_NPEI_PKT_DATA_OUT_NS \ + (0x00000000000010A0ull) +#define CVMX_NPEI_PKT_DATA_OUT_ROR \ + (0x0000000000001090ull) +#define CVMX_NPEI_PKT_DPADDR \ + (0x0000000000001080ull) +#define CVMX_NPEI_PKT_INPUT_CONTROL \ + (0x0000000000001150ull) +#define CVMX_NPEI_PKT_INSTR_ENB \ + (0x0000000000001000ull) +#define CVMX_NPEI_PKT_INSTR_RD_SIZE \ + (0x0000000000001190ull) +#define CVMX_NPEI_PKT_INSTR_SIZE \ + (0x0000000000001020ull) +#define CVMX_NPEI_PKT_INT_LEVELS \ + (0x0000000000001100ull) +#define CVMX_NPEI_PKT_IN_BP \ + (0x00000000000006B0ull) +#define CVMX_NPEI_PKT_IN_DONEX_CNTS(offset) \ + (0x0000000000002000ull + (((offset) & 31) * 16)) +#define CVMX_NPEI_PKT_IN_INSTR_COUNTS \ + (0x00000000000006A0ull) +#define CVMX_NPEI_PKT_IN_PCIE_PORT \ + (0x00000000000011A0ull) +#define CVMX_NPEI_PKT_IPTR \ + (0x0000000000001070ull) +#define CVMX_NPEI_PKT_OUTPUT_WMARK \ + (0x0000000000001160ull) +#define CVMX_NPEI_PKT_OUT_BMODE \ + (0x00000000000010D0ull) +#define CVMX_NPEI_PKT_OUT_ENB \ + (0x0000000000001010ull) +#define CVMX_NPEI_PKT_PCIE_PORT \ + (0x00000000000010E0ull) +#define CVMX_NPEI_PKT_PORT_IN_RST \ + (0x0000000000000690ull) +#define CVMX_NPEI_PKT_SLIST_ES \ + (0x0000000000001050ull) +#define CVMX_NPEI_PKT_SLIST_ID_SIZE \ + (0x0000000000001180ull) +#define CVMX_NPEI_PKT_SLIST_NS \ + (0x0000000000001040ull) +#define CVMX_NPEI_PKT_SLIST_ROR \ + (0x0000000000001030ull) +#define CVMX_NPEI_PKT_TIME_INT \ + (0x0000000000001120ull) +#define CVMX_NPEI_PKT_TIME_INT_ENB \ + (0x0000000000001140ull) +#define CVMX_NPEI_RSL_INT_BLOCKS \ + (0x0000000000000520ull) +#define CVMX_NPEI_SCRATCH_1 \ + (0x0000000000000270ull) +#define CVMX_NPEI_STATE1 \ + (0x0000000000000620ull) +#define CVMX_NPEI_STATE2 \ + (0x0000000000000630ull) +#define CVMX_NPEI_STATE3 \ + (0x0000000000000640ull) +#define CVMX_NPEI_WINDOW_CTL \ + (0x0000000000000380ull) +#define CVMX_NPEI_WIN_RD_ADDR \ + (0x0000000000000210ull) +#define CVMX_NPEI_WIN_RD_DATA \ + (0x0000000000000240ull) +#define CVMX_NPEI_WIN_WR_ADDR \ + (0x0000000000000200ull) +#define CVMX_NPEI_WIN_WR_DATA \ + (0x0000000000000220ull) +#define CVMX_NPEI_WIN_WR_MASK \ + (0x0000000000000230ull) + +union cvmx_npei_bar1_indexx { + uint32_t u32; + struct cvmx_npei_bar1_indexx_s { + uint32_t reserved_18_31:14; + uint32_t addr_idx:14; + uint32_t ca:1; + uint32_t end_swp:2; + uint32_t addr_v:1; + } s; + struct cvmx_npei_bar1_indexx_s cn52xx; + struct cvmx_npei_bar1_indexx_s cn52xxp1; + struct cvmx_npei_bar1_indexx_s cn56xx; + struct cvmx_npei_bar1_indexx_s cn56xxp1; +}; + +union cvmx_npei_bist_status { + uint64_t u64; + struct cvmx_npei_bist_status_s { + uint64_t pkt_rdf:1; + uint64_t pkt_pmem:1; + uint64_t pkt_p1:1; + uint64_t reserved_60_60:1; + uint64_t pcr_gim:1; + uint64_t pkt_pif:1; + uint64_t pcsr_int:1; + uint64_t pcsr_im:1; + uint64_t pcsr_cnt:1; + uint64_t pcsr_id:1; + uint64_t pcsr_sl:1; + uint64_t reserved_50_52:3; + uint64_t pkt_ind:1; + uint64_t pkt_slm:1; + uint64_t reserved_36_47:12; + uint64_t d0_pst:1; + uint64_t d1_pst:1; + uint64_t d2_pst:1; + uint64_t d3_pst:1; + uint64_t reserved_31_31:1; + uint64_t n2p0_c:1; + uint64_t n2p0_o:1; + uint64_t n2p1_c:1; + uint64_t n2p1_o:1; + uint64_t cpl_p0:1; + uint64_t cpl_p1:1; + uint64_t p2n1_po:1; + uint64_t p2n1_no:1; + uint64_t p2n1_co:1; + uint64_t p2n0_po:1; + uint64_t p2n0_no:1; + uint64_t p2n0_co:1; + uint64_t p2n0_c0:1; + uint64_t p2n0_c1:1; + uint64_t p2n0_n:1; + uint64_t p2n0_p0:1; + uint64_t p2n0_p1:1; + uint64_t p2n1_c0:1; + uint64_t p2n1_c1:1; + uint64_t p2n1_n:1; + uint64_t p2n1_p0:1; + uint64_t p2n1_p1:1; + uint64_t csm0:1; + uint64_t csm1:1; + uint64_t dif0:1; + uint64_t dif1:1; + uint64_t dif2:1; + uint64_t dif3:1; + uint64_t reserved_2_2:1; + uint64_t msi:1; + uint64_t ncb_cmd:1; + } s; + struct cvmx_npei_bist_status_cn52xx { + uint64_t pkt_rdf:1; + uint64_t pkt_pmem:1; + uint64_t pkt_p1:1; + uint64_t reserved_60_60:1; + uint64_t pcr_gim:1; + uint64_t pkt_pif:1; + uint64_t pcsr_int:1; + uint64_t pcsr_im:1; + uint64_t pcsr_cnt:1; + uint64_t pcsr_id:1; + uint64_t pcsr_sl:1; + uint64_t pkt_imem:1; + uint64_t pkt_pfm:1; + uint64_t pkt_pof:1; + uint64_t reserved_48_49:2; + uint64_t pkt_pop0:1; + uint64_t pkt_pop1:1; + uint64_t d0_mem:1; + uint64_t d1_mem:1; + uint64_t d2_mem:1; + uint64_t d3_mem:1; + uint64_t d4_mem:1; + uint64_t ds_mem:1; + uint64_t reserved_36_39:4; + uint64_t d0_pst:1; + uint64_t d1_pst:1; + uint64_t d2_pst:1; + uint64_t d3_pst:1; + uint64_t d4_pst:1; + uint64_t n2p0_c:1; + uint64_t n2p0_o:1; + uint64_t n2p1_c:1; + uint64_t n2p1_o:1; + uint64_t cpl_p0:1; + uint64_t cpl_p1:1; + uint64_t p2n1_po:1; + uint64_t p2n1_no:1; + uint64_t p2n1_co:1; + uint64_t p2n0_po:1; + uint64_t p2n0_no:1; + uint64_t p2n0_co:1; + uint64_t p2n0_c0:1; + uint64_t p2n0_c1:1; + uint64_t p2n0_n:1; + uint64_t p2n0_p0:1; + uint64_t p2n0_p1:1; + uint64_t p2n1_c0:1; + uint64_t p2n1_c1:1; + uint64_t p2n1_n:1; + uint64_t p2n1_p0:1; + uint64_t p2n1_p1:1; + uint64_t csm0:1; + uint64_t csm1:1; + uint64_t dif0:1; + uint64_t dif1:1; + uint64_t dif2:1; + uint64_t dif3:1; + uint64_t dif4:1; + uint64_t msi:1; + uint64_t ncb_cmd:1; + } cn52xx; + struct cvmx_npei_bist_status_cn52xxp1 { + uint64_t reserved_46_63:18; + uint64_t d0_mem0:1; + uint64_t d1_mem1:1; + uint64_t d2_mem2:1; + uint64_t d3_mem3:1; + uint64_t dr0_mem:1; + uint64_t d0_mem:1; + uint64_t d1_mem:1; + uint64_t d2_mem:1; + uint64_t d3_mem:1; + uint64_t dr1_mem:1; + uint64_t d0_pst:1; + uint64_t d1_pst:1; + uint64_t d2_pst:1; + uint64_t d3_pst:1; + uint64_t dr2_mem:1; + uint64_t n2p0_c:1; + uint64_t n2p0_o:1; + uint64_t n2p1_c:1; + uint64_t n2p1_o:1; + uint64_t cpl_p0:1; + uint64_t cpl_p1:1; + uint64_t p2n1_po:1; + uint64_t p2n1_no:1; + uint64_t p2n1_co:1; + uint64_t p2n0_po:1; + uint64_t p2n0_no:1; + uint64_t p2n0_co:1; + uint64_t p2n0_c0:1; + uint64_t p2n0_c1:1; + uint64_t p2n0_n:1; + uint64_t p2n0_p0:1; + uint64_t p2n0_p1:1; + uint64_t p2n1_c0:1; + uint64_t p2n1_c1:1; + uint64_t p2n1_n:1; + uint64_t p2n1_p0:1; + uint64_t p2n1_p1:1; + uint64_t csm0:1; + uint64_t csm1:1; + uint64_t dif0:1; + uint64_t dif1:1; + uint64_t dif2:1; + uint64_t dif3:1; + uint64_t dr3_mem:1; + uint64_t msi:1; + uint64_t ncb_cmd:1; + } cn52xxp1; + struct cvmx_npei_bist_status_cn56xx { + uint64_t pkt_rdf:1; + uint64_t reserved_60_62:3; + uint64_t pcr_gim:1; + uint64_t pkt_pif:1; + uint64_t pcsr_int:1; + uint64_t pcsr_im:1; + uint64_t pcsr_cnt:1; + uint64_t pcsr_id:1; + uint64_t pcsr_sl:1; + uint64_t pkt_imem:1; + uint64_t pkt_pfm:1; + uint64_t pkt_pof:1; + uint64_t reserved_48_49:2; + uint64_t pkt_pop0:1; + uint64_t pkt_pop1:1; + uint64_t d0_mem:1; + uint64_t d1_mem:1; + uint64_t d2_mem:1; + uint64_t d3_mem:1; + uint64_t d4_mem:1; + uint64_t ds_mem:1; + uint64_t reserved_36_39:4; + uint64_t d0_pst:1; + uint64_t d1_pst:1; + uint64_t d2_pst:1; + uint64_t d3_pst:1; + uint64_t d4_pst:1; + uint64_t n2p0_c:1; + uint64_t n2p0_o:1; + uint64_t n2p1_c:1; + uint64_t n2p1_o:1; + uint64_t cpl_p0:1; + uint64_t cpl_p1:1; + uint64_t p2n1_po:1; + uint64_t p2n1_no:1; + uint64_t p2n1_co:1; + uint64_t p2n0_po:1; + uint64_t p2n0_no:1; + uint64_t p2n0_co:1; + uint64_t p2n0_c0:1; + uint64_t p2n0_c1:1; + uint64_t p2n0_n:1; + uint64_t p2n0_p0:1; + uint64_t p2n0_p1:1; + uint64_t p2n1_c0:1; + uint64_t p2n1_c1:1; + uint64_t p2n1_n:1; + uint64_t p2n1_p0:1; + uint64_t p2n1_p1:1; + uint64_t csm0:1; + uint64_t csm1:1; + uint64_t dif0:1; + uint64_t dif1:1; + uint64_t dif2:1; + uint64_t dif3:1; + uint64_t dif4:1; + uint64_t msi:1; + uint64_t ncb_cmd:1; + } cn56xx; + struct cvmx_npei_bist_status_cn56xxp1 { + uint64_t reserved_58_63:6; + uint64_t pcsr_int:1; + uint64_t pcsr_im:1; + uint64_t pcsr_cnt:1; + uint64_t pcsr_id:1; + uint64_t pcsr_sl:1; + uint64_t pkt_pout:1; + uint64_t pkt_imem:1; + uint64_t pkt_cntm:1; + uint64_t pkt_ind:1; + uint64_t pkt_slm:1; + uint64_t pkt_odf:1; + uint64_t pkt_oif:1; + uint64_t pkt_out:1; + uint64_t pkt_i0:1; + uint64_t pkt_i1:1; + uint64_t pkt_s0:1; + uint64_t pkt_s1:1; + uint64_t d0_mem:1; + uint64_t d1_mem:1; + uint64_t d2_mem:1; + uint64_t d3_mem:1; + uint64_t d4_mem:1; + uint64_t d0_pst:1; + uint64_t d1_pst:1; + uint64_t d2_pst:1; + uint64_t d3_pst:1; + uint64_t d4_pst:1; + uint64_t n2p0_c:1; + uint64_t n2p0_o:1; + uint64_t n2p1_c:1; + uint64_t n2p1_o:1; + uint64_t cpl_p0:1; + uint64_t cpl_p1:1; + uint64_t p2n1_po:1; + uint64_t p2n1_no:1; + uint64_t p2n1_co:1; + uint64_t p2n0_po:1; + uint64_t p2n0_no:1; + uint64_t p2n0_co:1; + uint64_t p2n0_c0:1; + uint64_t p2n0_c1:1; + uint64_t p2n0_n:1; + uint64_t p2n0_p0:1; + uint64_t p2n0_p1:1; + uint64_t p2n1_c0:1; + uint64_t p2n1_c1:1; + uint64_t p2n1_n:1; + uint64_t p2n1_p0:1; + uint64_t p2n1_p1:1; + uint64_t csm0:1; + uint64_t csm1:1; + uint64_t dif0:1; + uint64_t dif1:1; + uint64_t dif2:1; + uint64_t dif3:1; + uint64_t dif4:1; + uint64_t msi:1; + uint64_t ncb_cmd:1; + } cn56xxp1; +}; + +union cvmx_npei_bist_status2 { + uint64_t u64; + struct cvmx_npei_bist_status2_s { + uint64_t reserved_5_63:59; + uint64_t psc_p0:1; + uint64_t psc_p1:1; + uint64_t pkt_gd:1; + uint64_t pkt_gl:1; + uint64_t pkt_blk:1; + } s; + struct cvmx_npei_bist_status2_s cn52xx; + struct cvmx_npei_bist_status2_s cn56xx; +}; + +union cvmx_npei_ctl_port0 { + uint64_t u64; + struct cvmx_npei_ctl_port0_s { + uint64_t reserved_21_63:43; + uint64_t waitl_com:1; + uint64_t intd:1; + uint64_t intc:1; + uint64_t intb:1; + uint64_t inta:1; + uint64_t intd_map:2; + uint64_t intc_map:2; + uint64_t intb_map:2; + uint64_t inta_map:2; + uint64_t ctlp_ro:1; + uint64_t reserved_6_6:1; + uint64_t ptlp_ro:1; + uint64_t bar2_enb:1; + uint64_t bar2_esx:2; + uint64_t bar2_cax:1; + uint64_t wait_com:1; + } s; + struct cvmx_npei_ctl_port0_s cn52xx; + struct cvmx_npei_ctl_port0_s cn52xxp1; + struct cvmx_npei_ctl_port0_s cn56xx; + struct cvmx_npei_ctl_port0_s cn56xxp1; +}; + +union cvmx_npei_ctl_port1 { + uint64_t u64; + struct cvmx_npei_ctl_port1_s { + uint64_t reserved_21_63:43; + uint64_t waitl_com:1; + uint64_t intd:1; + uint64_t intc:1; + uint64_t intb:1; + uint64_t inta:1; + uint64_t intd_map:2; + uint64_t intc_map:2; + uint64_t intb_map:2; + uint64_t inta_map:2; + uint64_t ctlp_ro:1; + uint64_t reserved_6_6:1; + uint64_t ptlp_ro:1; + uint64_t bar2_enb:1; + uint64_t bar2_esx:2; + uint64_t bar2_cax:1; + uint64_t wait_com:1; + } s; + struct cvmx_npei_ctl_port1_s cn52xx; + struct cvmx_npei_ctl_port1_s cn52xxp1; + struct cvmx_npei_ctl_port1_s cn56xx; + struct cvmx_npei_ctl_port1_s cn56xxp1; +}; + +union cvmx_npei_ctl_status { + uint64_t u64; + struct cvmx_npei_ctl_status_s { + uint64_t reserved_44_63:20; + uint64_t p1_ntags:6; + uint64_t p0_ntags:6; + uint64_t cfg_rtry:16; + uint64_t ring_en:1; + uint64_t lnk_rst:1; + uint64_t arb:1; + uint64_t pkt_bp:4; + uint64_t host_mode:1; + uint64_t chip_rev:8; + } s; + struct cvmx_npei_ctl_status_s cn52xx; + struct cvmx_npei_ctl_status_cn52xxp1 { + uint64_t reserved_44_63:20; + uint64_t p1_ntags:6; + uint64_t p0_ntags:6; + uint64_t cfg_rtry:16; + uint64_t reserved_15_15:1; + uint64_t lnk_rst:1; + uint64_t arb:1; + uint64_t reserved_9_12:4; + uint64_t host_mode:1; + uint64_t chip_rev:8; + } cn52xxp1; + struct cvmx_npei_ctl_status_s cn56xx; + struct cvmx_npei_ctl_status_cn56xxp1 { + uint64_t reserved_16_63:48; + uint64_t ring_en:1; + uint64_t lnk_rst:1; + uint64_t arb:1; + uint64_t pkt_bp:4; + uint64_t host_mode:1; + uint64_t chip_rev:8; + } cn56xxp1; +}; + +union cvmx_npei_ctl_status2 { + uint64_t u64; + struct cvmx_npei_ctl_status2_s { + uint64_t reserved_16_63:48; + uint64_t mps:1; + uint64_t mrrs:3; + uint64_t c1_w_flt:1; + uint64_t c0_w_flt:1; + uint64_t c1_b1_s:3; + uint64_t c0_b1_s:3; + uint64_t c1_wi_d:1; + uint64_t c1_b0_d:1; + uint64_t c0_wi_d:1; + uint64_t c0_b0_d:1; + } s; + struct cvmx_npei_ctl_status2_s cn52xx; + struct cvmx_npei_ctl_status2_s cn52xxp1; + struct cvmx_npei_ctl_status2_s cn56xx; + struct cvmx_npei_ctl_status2_s cn56xxp1; +}; + +union cvmx_npei_data_out_cnt { + uint64_t u64; + struct cvmx_npei_data_out_cnt_s { + uint64_t reserved_44_63:20; + uint64_t p1_ucnt:16; + uint64_t p1_fcnt:6; + uint64_t p0_ucnt:16; + uint64_t p0_fcnt:6; + } s; + struct cvmx_npei_data_out_cnt_s cn52xx; + struct cvmx_npei_data_out_cnt_s cn52xxp1; + struct cvmx_npei_data_out_cnt_s cn56xx; + struct cvmx_npei_data_out_cnt_s cn56xxp1; +}; + +union cvmx_npei_dbg_data { + uint64_t u64; + struct cvmx_npei_dbg_data_s { + uint64_t reserved_28_63:36; + uint64_t qlm0_rev_lanes:1; + uint64_t reserved_25_26:2; + uint64_t qlm1_spd:2; + uint64_t c_mul:5; + uint64_t dsel_ext:1; + uint64_t data:17; + } s; + struct cvmx_npei_dbg_data_cn52xx { + uint64_t reserved_29_63:35; + uint64_t qlm0_link_width:1; + uint64_t qlm0_rev_lanes:1; + uint64_t qlm1_mode:2; + uint64_t qlm1_spd:2; + uint64_t c_mul:5; + uint64_t dsel_ext:1; + uint64_t data:17; + } cn52xx; + struct cvmx_npei_dbg_data_cn52xx cn52xxp1; + struct cvmx_npei_dbg_data_cn56xx { + uint64_t reserved_29_63:35; + uint64_t qlm2_rev_lanes:1; + uint64_t qlm0_rev_lanes:1; + uint64_t qlm3_spd:2; + uint64_t qlm1_spd:2; + uint64_t c_mul:5; + uint64_t dsel_ext:1; + uint64_t data:17; + } cn56xx; + struct cvmx_npei_dbg_data_cn56xx cn56xxp1; +}; + +union cvmx_npei_dbg_select { + uint64_t u64; + struct cvmx_npei_dbg_select_s { + uint64_t reserved_16_63:48; + uint64_t dbg_sel:16; + } s; + struct cvmx_npei_dbg_select_s cn52xx; + struct cvmx_npei_dbg_select_s cn52xxp1; + struct cvmx_npei_dbg_select_s cn56xx; + struct cvmx_npei_dbg_select_s cn56xxp1; +}; + +union cvmx_npei_dmax_counts { + uint64_t u64; + struct cvmx_npei_dmax_counts_s { + uint64_t reserved_39_63:25; + uint64_t fcnt:7; + uint64_t dbell:32; + } s; + struct cvmx_npei_dmax_counts_s cn52xx; + struct cvmx_npei_dmax_counts_s cn52xxp1; + struct cvmx_npei_dmax_counts_s cn56xx; + struct cvmx_npei_dmax_counts_s cn56xxp1; +}; + +union cvmx_npei_dmax_dbell { + uint32_t u32; + struct cvmx_npei_dmax_dbell_s { + uint32_t reserved_16_31:16; + uint32_t dbell:16; + } s; + struct cvmx_npei_dmax_dbell_s cn52xx; + struct cvmx_npei_dmax_dbell_s cn52xxp1; + struct cvmx_npei_dmax_dbell_s cn56xx; + struct cvmx_npei_dmax_dbell_s cn56xxp1; +}; + +union cvmx_npei_dmax_ibuff_saddr { + uint64_t u64; + struct cvmx_npei_dmax_ibuff_saddr_s { + uint64_t reserved_37_63:27; + uint64_t idle:1; + uint64_t saddr:29; + uint64_t reserved_0_6:7; + } s; + struct cvmx_npei_dmax_ibuff_saddr_cn52xx { + uint64_t reserved_36_63:28; + uint64_t saddr:29; + uint64_t reserved_0_6:7; + } cn52xx; + struct cvmx_npei_dmax_ibuff_saddr_cn52xx cn52xxp1; + struct cvmx_npei_dmax_ibuff_saddr_s cn56xx; + struct cvmx_npei_dmax_ibuff_saddr_cn52xx cn56xxp1; +}; + +union cvmx_npei_dmax_naddr { + uint64_t u64; + struct cvmx_npei_dmax_naddr_s { + uint64_t reserved_36_63:28; + uint64_t addr:36; + } s; + struct cvmx_npei_dmax_naddr_s cn52xx; + struct cvmx_npei_dmax_naddr_s cn52xxp1; + struct cvmx_npei_dmax_naddr_s cn56xx; + struct cvmx_npei_dmax_naddr_s cn56xxp1; +}; + +union cvmx_npei_dma0_int_level { + uint64_t u64; + struct cvmx_npei_dma0_int_level_s { + uint64_t time:32; + uint64_t cnt:32; + } s; + struct cvmx_npei_dma0_int_level_s cn52xx; + struct cvmx_npei_dma0_int_level_s cn52xxp1; + struct cvmx_npei_dma0_int_level_s cn56xx; + struct cvmx_npei_dma0_int_level_s cn56xxp1; +}; + +union cvmx_npei_dma1_int_level { + uint64_t u64; + struct cvmx_npei_dma1_int_level_s { + uint64_t time:32; + uint64_t cnt:32; + } s; + struct cvmx_npei_dma1_int_level_s cn52xx; + struct cvmx_npei_dma1_int_level_s cn52xxp1; + struct cvmx_npei_dma1_int_level_s cn56xx; + struct cvmx_npei_dma1_int_level_s cn56xxp1; +}; + +union cvmx_npei_dma_cnts { + uint64_t u64; + struct cvmx_npei_dma_cnts_s { + uint64_t dma1:32; + uint64_t dma0:32; + } s; + struct cvmx_npei_dma_cnts_s cn52xx; + struct cvmx_npei_dma_cnts_s cn52xxp1; + struct cvmx_npei_dma_cnts_s cn56xx; + struct cvmx_npei_dma_cnts_s cn56xxp1; +}; + +union cvmx_npei_dma_control { + uint64_t u64; + struct cvmx_npei_dma_control_s { + uint64_t reserved_39_63:25; + uint64_t dma4_enb:1; + uint64_t dma3_enb:1; + uint64_t dma2_enb:1; + uint64_t dma1_enb:1; + uint64_t dma0_enb:1; + uint64_t b0_lend:1; + uint64_t dwb_denb:1; + uint64_t dwb_ichk:9; + uint64_t fpa_que:3; + uint64_t o_add1:1; + uint64_t o_ro:1; + uint64_t o_ns:1; + uint64_t o_es:2; + uint64_t o_mode:1; + uint64_t csize:14; + } s; + struct cvmx_npei_dma_control_s cn52xx; + struct cvmx_npei_dma_control_cn52xxp1 { + uint64_t reserved_38_63:26; + uint64_t dma3_enb:1; + uint64_t dma2_enb:1; + uint64_t dma1_enb:1; + uint64_t dma0_enb:1; + uint64_t b0_lend:1; + uint64_t dwb_denb:1; + uint64_t dwb_ichk:9; + uint64_t fpa_que:3; + uint64_t o_add1:1; + uint64_t o_ro:1; + uint64_t o_ns:1; + uint64_t o_es:2; + uint64_t o_mode:1; + uint64_t csize:14; + } cn52xxp1; + struct cvmx_npei_dma_control_s cn56xx; + struct cvmx_npei_dma_control_s cn56xxp1; +}; + +union cvmx_npei_int_a_enb { + uint64_t u64; + struct cvmx_npei_int_a_enb_s { + uint64_t reserved_10_63:54; + uint64_t pout_err:1; + uint64_t pin_bp:1; + uint64_t p1_rdlk:1; + uint64_t p0_rdlk:1; + uint64_t pgl_err:1; + uint64_t pdi_err:1; + uint64_t pop_err:1; + uint64_t pins_err:1; + uint64_t dma1_cpl:1; + uint64_t dma0_cpl:1; + } s; + struct cvmx_npei_int_a_enb_cn52xx { + uint64_t reserved_8_63:56; + uint64_t p1_rdlk:1; + uint64_t p0_rdlk:1; + uint64_t pgl_err:1; + uint64_t pdi_err:1; + uint64_t pop_err:1; + uint64_t pins_err:1; + uint64_t dma1_cpl:1; + uint64_t dma0_cpl:1; + } cn52xx; + struct cvmx_npei_int_a_enb_cn52xxp1 { + uint64_t reserved_2_63:62; + uint64_t dma1_cpl:1; + uint64_t dma0_cpl:1; + } cn52xxp1; + struct cvmx_npei_int_a_enb_s cn56xx; +}; + +union cvmx_npei_int_a_enb2 { + uint64_t u64; + struct cvmx_npei_int_a_enb2_s { + uint64_t reserved_10_63:54; + uint64_t pout_err:1; + uint64_t pin_bp:1; + uint64_t p1_rdlk:1; + uint64_t p0_rdlk:1; + uint64_t pgl_err:1; + uint64_t pdi_err:1; + uint64_t pop_err:1; + uint64_t pins_err:1; + uint64_t dma1_cpl:1; + uint64_t dma0_cpl:1; + } s; + struct cvmx_npei_int_a_enb2_cn52xx { + uint64_t reserved_8_63:56; + uint64_t p1_rdlk:1; + uint64_t p0_rdlk:1; + uint64_t pgl_err:1; + uint64_t pdi_err:1; + uint64_t pop_err:1; + uint64_t pins_err:1; + uint64_t reserved_0_1:2; + } cn52xx; + struct cvmx_npei_int_a_enb2_cn52xxp1 { + uint64_t reserved_2_63:62; + uint64_t dma1_cpl:1; + uint64_t dma0_cpl:1; + } cn52xxp1; + struct cvmx_npei_int_a_enb2_s cn56xx; +}; + +union cvmx_npei_int_a_sum { + uint64_t u64; + struct cvmx_npei_int_a_sum_s { + uint64_t reserved_10_63:54; + uint64_t pout_err:1; + uint64_t pin_bp:1; + uint64_t p1_rdlk:1; + uint64_t p0_rdlk:1; + uint64_t pgl_err:1; + uint64_t pdi_err:1; + uint64_t pop_err:1; + uint64_t pins_err:1; + uint64_t dma1_cpl:1; + uint64_t dma0_cpl:1; + } s; + struct cvmx_npei_int_a_sum_cn52xx { + uint64_t reserved_8_63:56; + uint64_t p1_rdlk:1; + uint64_t p0_rdlk:1; + uint64_t pgl_err:1; + uint64_t pdi_err:1; + uint64_t pop_err:1; + uint64_t pins_err:1; + uint64_t dma1_cpl:1; + uint64_t dma0_cpl:1; + } cn52xx; + struct cvmx_npei_int_a_sum_cn52xxp1 { + uint64_t reserved_2_63:62; + uint64_t dma1_cpl:1; + uint64_t dma0_cpl:1; + } cn52xxp1; + struct cvmx_npei_int_a_sum_s cn56xx; +}; + +union cvmx_npei_int_enb { + uint64_t u64; + struct cvmx_npei_int_enb_s { + uint64_t mio_inta:1; + uint64_t reserved_62_62:1; + uint64_t int_a:1; + uint64_t c1_ldwn:1; + uint64_t c0_ldwn:1; + uint64_t c1_exc:1; + uint64_t c0_exc:1; + uint64_t c1_up_wf:1; + uint64_t c0_up_wf:1; + uint64_t c1_un_wf:1; + uint64_t c0_un_wf:1; + uint64_t c1_un_bx:1; + uint64_t c1_un_wi:1; + uint64_t c1_un_b2:1; + uint64_t c1_un_b1:1; + uint64_t c1_un_b0:1; + uint64_t c1_up_bx:1; + uint64_t c1_up_wi:1; + uint64_t c1_up_b2:1; + uint64_t c1_up_b1:1; + uint64_t c1_up_b0:1; + uint64_t c0_un_bx:1; + uint64_t c0_un_wi:1; + uint64_t c0_un_b2:1; + uint64_t c0_un_b1:1; + uint64_t c0_un_b0:1; + uint64_t c0_up_bx:1; + uint64_t c0_up_wi:1; + uint64_t c0_up_b2:1; + uint64_t c0_up_b1:1; + uint64_t c0_up_b0:1; + uint64_t c1_hpint:1; + uint64_t c1_pmei:1; + uint64_t c1_wake:1; + uint64_t crs1_dr:1; + uint64_t c1_se:1; + uint64_t crs1_er:1; + uint64_t c1_aeri:1; + uint64_t c0_hpint:1; + uint64_t c0_pmei:1; + uint64_t c0_wake:1; + uint64_t crs0_dr:1; + uint64_t c0_se:1; + uint64_t crs0_er:1; + uint64_t c0_aeri:1; + uint64_t ptime:1; + uint64_t pcnt:1; + uint64_t pidbof:1; + uint64_t psldbof:1; + uint64_t dtime1:1; + uint64_t dtime0:1; + uint64_t dcnt1:1; + uint64_t dcnt0:1; + uint64_t dma1fi:1; + uint64_t dma0fi:1; + uint64_t dma4dbo:1; + uint64_t dma3dbo:1; + uint64_t dma2dbo:1; + uint64_t dma1dbo:1; + uint64_t dma0dbo:1; + uint64_t iob2big:1; + uint64_t bar0_to:1; + uint64_t rml_wto:1; + uint64_t rml_rto:1; + } s; + struct cvmx_npei_int_enb_s cn52xx; + struct cvmx_npei_int_enb_cn52xxp1 { + uint64_t mio_inta:1; + uint64_t reserved_62_62:1; + uint64_t int_a:1; + uint64_t c1_ldwn:1; + uint64_t c0_ldwn:1; + uint64_t c1_exc:1; + uint64_t c0_exc:1; + uint64_t c1_up_wf:1; + uint64_t c0_up_wf:1; + uint64_t c1_un_wf:1; + uint64_t c0_un_wf:1; + uint64_t c1_un_bx:1; + uint64_t c1_un_wi:1; + uint64_t c1_un_b2:1; + uint64_t c1_un_b1:1; + uint64_t c1_un_b0:1; + uint64_t c1_up_bx:1; + uint64_t c1_up_wi:1; + uint64_t c1_up_b2:1; + uint64_t c1_up_b1:1; + uint64_t c1_up_b0:1; + uint64_t c0_un_bx:1; + uint64_t c0_un_wi:1; + uint64_t c0_un_b2:1; + uint64_t c0_un_b1:1; + uint64_t c0_un_b0:1; + uint64_t c0_up_bx:1; + uint64_t c0_up_wi:1; + uint64_t c0_up_b2:1; + uint64_t c0_up_b1:1; + uint64_t c0_up_b0:1; + uint64_t c1_hpint:1; + uint64_t c1_pmei:1; + uint64_t c1_wake:1; + uint64_t crs1_dr:1; + uint64_t c1_se:1; + uint64_t crs1_er:1; + uint64_t c1_aeri:1; + uint64_t c0_hpint:1; + uint64_t c0_pmei:1; + uint64_t c0_wake:1; + uint64_t crs0_dr:1; + uint64_t c0_se:1; + uint64_t crs0_er:1; + uint64_t c0_aeri:1; + uint64_t ptime:1; + uint64_t pcnt:1; + uint64_t pidbof:1; + uint64_t psldbof:1; + uint64_t dtime1:1; + uint64_t dtime0:1; + uint64_t dcnt1:1; + uint64_t dcnt0:1; + uint64_t dma1fi:1; + uint64_t dma0fi:1; + uint64_t reserved_8_8:1; + uint64_t dma3dbo:1; + uint64_t dma2dbo:1; + uint64_t dma1dbo:1; + uint64_t dma0dbo:1; + uint64_t iob2big:1; + uint64_t bar0_to:1; + uint64_t rml_wto:1; + uint64_t rml_rto:1; + } cn52xxp1; + struct cvmx_npei_int_enb_s cn56xx; + struct cvmx_npei_int_enb_cn56xxp1 { + uint64_t mio_inta:1; + uint64_t reserved_61_62:2; + uint64_t c1_ldwn:1; + uint64_t c0_ldwn:1; + uint64_t c1_exc:1; + uint64_t c0_exc:1; + uint64_t c1_up_wf:1; + uint64_t c0_up_wf:1; + uint64_t c1_un_wf:1; + uint64_t c0_un_wf:1; + uint64_t c1_un_bx:1; + uint64_t c1_un_wi:1; + uint64_t c1_un_b2:1; + uint64_t c1_un_b1:1; + uint64_t c1_un_b0:1; + uint64_t c1_up_bx:1; + uint64_t c1_up_wi:1; + uint64_t c1_up_b2:1; + uint64_t c1_up_b1:1; + uint64_t c1_up_b0:1; + uint64_t c0_un_bx:1; + uint64_t c0_un_wi:1; + uint64_t c0_un_b2:1; + uint64_t c0_un_b1:1; + uint64_t c0_un_b0:1; + uint64_t c0_up_bx:1; + uint64_t c0_up_wi:1; + uint64_t c0_up_b2:1; + uint64_t c0_up_b1:1; + uint64_t c0_up_b0:1; + uint64_t c1_hpint:1; + uint64_t c1_pmei:1; + uint64_t c1_wake:1; + uint64_t reserved_29_29:1; + uint64_t c1_se:1; + uint64_t reserved_27_27:1; + uint64_t c1_aeri:1; + uint64_t c0_hpint:1; + uint64_t c0_pmei:1; + uint64_t c0_wake:1; + uint64_t reserved_22_22:1; + uint64_t c0_se:1; + uint64_t reserved_20_20:1; + uint64_t c0_aeri:1; + uint64_t ptime:1; + uint64_t pcnt:1; + uint64_t pidbof:1; + uint64_t psldbof:1; + uint64_t dtime1:1; + uint64_t dtime0:1; + uint64_t dcnt1:1; + uint64_t dcnt0:1; + uint64_t dma1fi:1; + uint64_t dma0fi:1; + uint64_t dma4dbo:1; + uint64_t dma3dbo:1; + uint64_t dma2dbo:1; + uint64_t dma1dbo:1; + uint64_t dma0dbo:1; + uint64_t iob2big:1; + uint64_t bar0_to:1; + uint64_t rml_wto:1; + uint64_t rml_rto:1; + } cn56xxp1; +}; + +union cvmx_npei_int_enb2 { + uint64_t u64; + struct cvmx_npei_int_enb2_s { + uint64_t reserved_62_63:2; + uint64_t int_a:1; + uint64_t c1_ldwn:1; + uint64_t c0_ldwn:1; + uint64_t c1_exc:1; + uint64_t c0_exc:1; + uint64_t c1_up_wf:1; + uint64_t c0_up_wf:1; + uint64_t c1_un_wf:1; + uint64_t c0_un_wf:1; + uint64_t c1_un_bx:1; + uint64_t c1_un_wi:1; + uint64_t c1_un_b2:1; + uint64_t c1_un_b1:1; + uint64_t c1_un_b0:1; + uint64_t c1_up_bx:1; + uint64_t c1_up_wi:1; + uint64_t c1_up_b2:1; + uint64_t c1_up_b1:1; + uint64_t c1_up_b0:1; + uint64_t c0_un_bx:1; + uint64_t c0_un_wi:1; + uint64_t c0_un_b2:1; + uint64_t c0_un_b1:1; + uint64_t c0_un_b0:1; + uint64_t c0_up_bx:1; + uint64_t c0_up_wi:1; + uint64_t c0_up_b2:1; + uint64_t c0_up_b1:1; + uint64_t c0_up_b0:1; + uint64_t c1_hpint:1; + uint64_t c1_pmei:1; + uint64_t c1_wake:1; + uint64_t crs1_dr:1; + uint64_t c1_se:1; + uint64_t crs1_er:1; + uint64_t c1_aeri:1; + uint64_t c0_hpint:1; + uint64_t c0_pmei:1; + uint64_t c0_wake:1; + uint64_t crs0_dr:1; + uint64_t c0_se:1; + uint64_t crs0_er:1; + uint64_t c0_aeri:1; + uint64_t ptime:1; + uint64_t pcnt:1; + uint64_t pidbof:1; + uint64_t psldbof:1; + uint64_t dtime1:1; + uint64_t dtime0:1; + uint64_t dcnt1:1; + uint64_t dcnt0:1; + uint64_t dma1fi:1; + uint64_t dma0fi:1; + uint64_t dma4dbo:1; + uint64_t dma3dbo:1; + uint64_t dma2dbo:1; + uint64_t dma1dbo:1; + uint64_t dma0dbo:1; + uint64_t iob2big:1; + uint64_t bar0_to:1; + uint64_t rml_wto:1; + uint64_t rml_rto:1; + } s; + struct cvmx_npei_int_enb2_s cn52xx; + struct cvmx_npei_int_enb2_cn52xxp1 { + uint64_t reserved_62_63:2; + uint64_t int_a:1; + uint64_t c1_ldwn:1; + uint64_t c0_ldwn:1; + uint64_t c1_exc:1; + uint64_t c0_exc:1; + uint64_t c1_up_wf:1; + uint64_t c0_up_wf:1; + uint64_t c1_un_wf:1; + uint64_t c0_un_wf:1; + uint64_t c1_un_bx:1; + uint64_t c1_un_wi:1; + uint64_t c1_un_b2:1; + uint64_t c1_un_b1:1; + uint64_t c1_un_b0:1; + uint64_t c1_up_bx:1; + uint64_t c1_up_wi:1; + uint64_t c1_up_b2:1; + uint64_t c1_up_b1:1; + uint64_t c1_up_b0:1; + uint64_t c0_un_bx:1; + uint64_t c0_un_wi:1; + uint64_t c0_un_b2:1; + uint64_t c0_un_b1:1; + uint64_t c0_un_b0:1; + uint64_t c0_up_bx:1; + uint64_t c0_up_wi:1; + uint64_t c0_up_b2:1; + uint64_t c0_up_b1:1; + uint64_t c0_up_b0:1; + uint64_t c1_hpint:1; + uint64_t c1_pmei:1; + uint64_t c1_wake:1; + uint64_t crs1_dr:1; + uint64_t c1_se:1; + uint64_t crs1_er:1; + uint64_t c1_aeri:1; + uint64_t c0_hpint:1; + uint64_t c0_pmei:1; + uint64_t c0_wake:1; + uint64_t crs0_dr:1; + uint64_t c0_se:1; + uint64_t crs0_er:1; + uint64_t c0_aeri:1; + uint64_t ptime:1; + uint64_t pcnt:1; + uint64_t pidbof:1; + uint64_t psldbof:1; + uint64_t dtime1:1; + uint64_t dtime0:1; + uint64_t dcnt1:1; + uint64_t dcnt0:1; + uint64_t dma1fi:1; + uint64_t dma0fi:1; + uint64_t reserved_8_8:1; + uint64_t dma3dbo:1; + uint64_t dma2dbo:1; + uint64_t dma1dbo:1; + uint64_t dma0dbo:1; + uint64_t iob2big:1; + uint64_t bar0_to:1; + uint64_t rml_wto:1; + uint64_t rml_rto:1; + } cn52xxp1; + struct cvmx_npei_int_enb2_s cn56xx; + struct cvmx_npei_int_enb2_cn56xxp1 { + uint64_t reserved_61_63:3; + uint64_t c1_ldwn:1; + uint64_t c0_ldwn:1; + uint64_t c1_exc:1; + uint64_t c0_exc:1; + uint64_t c1_up_wf:1; + uint64_t c0_up_wf:1; + uint64_t c1_un_wf:1; + uint64_t c0_un_wf:1; + uint64_t c1_un_bx:1; + uint64_t c1_un_wi:1; + uint64_t c1_un_b2:1; + uint64_t c1_un_b1:1; + uint64_t c1_un_b0:1; + uint64_t c1_up_bx:1; + uint64_t c1_up_wi:1; + uint64_t c1_up_b2:1; + uint64_t c1_up_b1:1; + uint64_t c1_up_b0:1; + uint64_t c0_un_bx:1; + uint64_t c0_un_wi:1; + uint64_t c0_un_b2:1; + uint64_t c0_un_b1:1; + uint64_t c0_un_b0:1; + uint64_t c0_up_bx:1; + uint64_t c0_up_wi:1; + uint64_t c0_up_b2:1; + uint64_t c0_up_b1:1; + uint64_t c0_up_b0:1; + uint64_t c1_hpint:1; + uint64_t c1_pmei:1; + uint64_t c1_wake:1; + uint64_t reserved_29_29:1; + uint64_t c1_se:1; + uint64_t reserved_27_27:1; + uint64_t c1_aeri:1; + uint64_t c0_hpint:1; + uint64_t c0_pmei:1; + uint64_t c0_wake:1; + uint64_t reserved_22_22:1; + uint64_t c0_se:1; + uint64_t reserved_20_20:1; + uint64_t c0_aeri:1; + uint64_t ptime:1; + uint64_t pcnt:1; + uint64_t pidbof:1; + uint64_t psldbof:1; + uint64_t dtime1:1; + uint64_t dtime0:1; + uint64_t dcnt1:1; + uint64_t dcnt0:1; + uint64_t dma1fi:1; + uint64_t dma0fi:1; + uint64_t dma4dbo:1; + uint64_t dma3dbo:1; + uint64_t dma2dbo:1; + uint64_t dma1dbo:1; + uint64_t dma0dbo:1; + uint64_t iob2big:1; + uint64_t bar0_to:1; + uint64_t rml_wto:1; + uint64_t rml_rto:1; + } cn56xxp1; +}; + +union cvmx_npei_int_info { + uint64_t u64; + struct cvmx_npei_int_info_s { + uint64_t reserved_12_63:52; + uint64_t pidbof:6; + uint64_t psldbof:6; + } s; + struct cvmx_npei_int_info_s cn52xx; + struct cvmx_npei_int_info_s cn56xx; + struct cvmx_npei_int_info_s cn56xxp1; +}; + +union cvmx_npei_int_sum { + uint64_t u64; + struct cvmx_npei_int_sum_s { + uint64_t mio_inta:1; + uint64_t reserved_62_62:1; + uint64_t int_a:1; + uint64_t c1_ldwn:1; + uint64_t c0_ldwn:1; + uint64_t c1_exc:1; + uint64_t c0_exc:1; + uint64_t c1_up_wf:1; + uint64_t c0_up_wf:1; + uint64_t c1_un_wf:1; + uint64_t c0_un_wf:1; + uint64_t c1_un_bx:1; + uint64_t c1_un_wi:1; + uint64_t c1_un_b2:1; + uint64_t c1_un_b1:1; + uint64_t c1_un_b0:1; + uint64_t c1_up_bx:1; + uint64_t c1_up_wi:1; + uint64_t c1_up_b2:1; + uint64_t c1_up_b1:1; + uint64_t c1_up_b0:1; + uint64_t c0_un_bx:1; + uint64_t c0_un_wi:1; + uint64_t c0_un_b2:1; + uint64_t c0_un_b1:1; + uint64_t c0_un_b0:1; + uint64_t c0_up_bx:1; + uint64_t c0_up_wi:1; + uint64_t c0_up_b2:1; + uint64_t c0_up_b1:1; + uint64_t c0_up_b0:1; + uint64_t c1_hpint:1; + uint64_t c1_pmei:1; + uint64_t c1_wake:1; + uint64_t crs1_dr:1; + uint64_t c1_se:1; + uint64_t crs1_er:1; + uint64_t c1_aeri:1; + uint64_t c0_hpint:1; + uint64_t c0_pmei:1; + uint64_t c0_wake:1; + uint64_t crs0_dr:1; + uint64_t c0_se:1; + uint64_t crs0_er:1; + uint64_t c0_aeri:1; + uint64_t ptime:1; + uint64_t pcnt:1; + uint64_t pidbof:1; + uint64_t psldbof:1; + uint64_t dtime1:1; + uint64_t dtime0:1; + uint64_t dcnt1:1; + uint64_t dcnt0:1; + uint64_t dma1fi:1; + uint64_t dma0fi:1; + uint64_t dma4dbo:1; + uint64_t dma3dbo:1; + uint64_t dma2dbo:1; + uint64_t dma1dbo:1; + uint64_t dma0dbo:1; + uint64_t iob2big:1; + uint64_t bar0_to:1; + uint64_t rml_wto:1; + uint64_t rml_rto:1; + } s; + struct cvmx_npei_int_sum_s cn52xx; + struct cvmx_npei_int_sum_cn52xxp1 { + uint64_t mio_inta:1; + uint64_t reserved_62_62:1; + uint64_t int_a:1; + uint64_t c1_ldwn:1; + uint64_t c0_ldwn:1; + uint64_t c1_exc:1; + uint64_t c0_exc:1; + uint64_t c1_up_wf:1; + uint64_t c0_up_wf:1; + uint64_t c1_un_wf:1; + uint64_t c0_un_wf:1; + uint64_t c1_un_bx:1; + uint64_t c1_un_wi:1; + uint64_t c1_un_b2:1; + uint64_t c1_un_b1:1; + uint64_t c1_un_b0:1; + uint64_t c1_up_bx:1; + uint64_t c1_up_wi:1; + uint64_t c1_up_b2:1; + uint64_t c1_up_b1:1; + uint64_t c1_up_b0:1; + uint64_t c0_un_bx:1; + uint64_t c0_un_wi:1; + uint64_t c0_un_b2:1; + uint64_t c0_un_b1:1; + uint64_t c0_un_b0:1; + uint64_t c0_up_bx:1; + uint64_t c0_up_wi:1; + uint64_t c0_up_b2:1; + uint64_t c0_up_b1:1; + uint64_t c0_up_b0:1; + uint64_t c1_hpint:1; + uint64_t c1_pmei:1; + uint64_t c1_wake:1; + uint64_t crs1_dr:1; + uint64_t c1_se:1; + uint64_t crs1_er:1; + uint64_t c1_aeri:1; + uint64_t c0_hpint:1; + uint64_t c0_pmei:1; + uint64_t c0_wake:1; + uint64_t crs0_dr:1; + uint64_t c0_se:1; + uint64_t crs0_er:1; + uint64_t c0_aeri:1; + uint64_t reserved_15_18:4; + uint64_t dtime1:1; + uint64_t dtime0:1; + uint64_t dcnt1:1; + uint64_t dcnt0:1; + uint64_t dma1fi:1; + uint64_t dma0fi:1; + uint64_t reserved_8_8:1; + uint64_t dma3dbo:1; + uint64_t dma2dbo:1; + uint64_t dma1dbo:1; + uint64_t dma0dbo:1; + uint64_t iob2big:1; + uint64_t bar0_to:1; + uint64_t rml_wto:1; + uint64_t rml_rto:1; + } cn52xxp1; + struct cvmx_npei_int_sum_s cn56xx; + struct cvmx_npei_int_sum_cn56xxp1 { + uint64_t mio_inta:1; + uint64_t reserved_61_62:2; + uint64_t c1_ldwn:1; + uint64_t c0_ldwn:1; + uint64_t c1_exc:1; + uint64_t c0_exc:1; + uint64_t c1_up_wf:1; + uint64_t c0_up_wf:1; + uint64_t c1_un_wf:1; + uint64_t c0_un_wf:1; + uint64_t c1_un_bx:1; + uint64_t c1_un_wi:1; + uint64_t c1_un_b2:1; + uint64_t c1_un_b1:1; + uint64_t c1_un_b0:1; + uint64_t c1_up_bx:1; + uint64_t c1_up_wi:1; + uint64_t c1_up_b2:1; + uint64_t c1_up_b1:1; + uint64_t c1_up_b0:1; + uint64_t c0_un_bx:1; + uint64_t c0_un_wi:1; + uint64_t c0_un_b2:1; + uint64_t c0_un_b1:1; + uint64_t c0_un_b0:1; + uint64_t c0_up_bx:1; + uint64_t c0_up_wi:1; + uint64_t c0_up_b2:1; + uint64_t c0_up_b1:1; + uint64_t c0_up_b0:1; + uint64_t c1_hpint:1; + uint64_t c1_pmei:1; + uint64_t c1_wake:1; + uint64_t reserved_29_29:1; + uint64_t c1_se:1; + uint64_t reserved_27_27:1; + uint64_t c1_aeri:1; + uint64_t c0_hpint:1; + uint64_t c0_pmei:1; + uint64_t c0_wake:1; + uint64_t reserved_22_22:1; + uint64_t c0_se:1; + uint64_t reserved_20_20:1; + uint64_t c0_aeri:1; + uint64_t ptime:1; + uint64_t pcnt:1; + uint64_t pidbof:1; + uint64_t psldbof:1; + uint64_t dtime1:1; + uint64_t dtime0:1; + uint64_t dcnt1:1; + uint64_t dcnt0:1; + uint64_t dma1fi:1; + uint64_t dma0fi:1; + uint64_t dma4dbo:1; + uint64_t dma3dbo:1; + uint64_t dma2dbo:1; + uint64_t dma1dbo:1; + uint64_t dma0dbo:1; + uint64_t iob2big:1; + uint64_t bar0_to:1; + uint64_t rml_wto:1; + uint64_t rml_rto:1; + } cn56xxp1; +}; + +union cvmx_npei_int_sum2 { + uint64_t u64; + struct cvmx_npei_int_sum2_s { + uint64_t mio_inta:1; + uint64_t reserved_62_62:1; + uint64_t int_a:1; + uint64_t c1_ldwn:1; + uint64_t c0_ldwn:1; + uint64_t c1_exc:1; + uint64_t c0_exc:1; + uint64_t c1_up_wf:1; + uint64_t c0_up_wf:1; + uint64_t c1_un_wf:1; + uint64_t c0_un_wf:1; + uint64_t c1_un_bx:1; + uint64_t c1_un_wi:1; + uint64_t c1_un_b2:1; + uint64_t c1_un_b1:1; + uint64_t c1_un_b0:1; + uint64_t c1_up_bx:1; + uint64_t c1_up_wi:1; + uint64_t c1_up_b2:1; + uint64_t c1_up_b1:1; + uint64_t c1_up_b0:1; + uint64_t c0_un_bx:1; + uint64_t c0_un_wi:1; + uint64_t c0_un_b2:1; + uint64_t c0_un_b1:1; + uint64_t c0_un_b0:1; + uint64_t c0_up_bx:1; + uint64_t c0_up_wi:1; + uint64_t c0_up_b2:1; + uint64_t c0_up_b1:1; + uint64_t c0_up_b0:1; + uint64_t c1_hpint:1; + uint64_t c1_pmei:1; + uint64_t c1_wake:1; + uint64_t crs1_dr:1; + uint64_t c1_se:1; + uint64_t crs1_er:1; + uint64_t c1_aeri:1; + uint64_t c0_hpint:1; + uint64_t c0_pmei:1; + uint64_t c0_wake:1; + uint64_t crs0_dr:1; + uint64_t c0_se:1; + uint64_t crs0_er:1; + uint64_t c0_aeri:1; + uint64_t reserved_15_18:4; + uint64_t dtime1:1; + uint64_t dtime0:1; + uint64_t dcnt1:1; + uint64_t dcnt0:1; + uint64_t dma1fi:1; + uint64_t dma0fi:1; + uint64_t reserved_8_8:1; + uint64_t dma3dbo:1; + uint64_t dma2dbo:1; + uint64_t dma1dbo:1; + uint64_t dma0dbo:1; + uint64_t iob2big:1; + uint64_t bar0_to:1; + uint64_t rml_wto:1; + uint64_t rml_rto:1; + } s; + struct cvmx_npei_int_sum2_s cn52xx; + struct cvmx_npei_int_sum2_s cn52xxp1; + struct cvmx_npei_int_sum2_s cn56xx; +}; + +union cvmx_npei_last_win_rdata0 { + uint64_t u64; + struct cvmx_npei_last_win_rdata0_s { + uint64_t data:64; + } s; + struct cvmx_npei_last_win_rdata0_s cn52xx; + struct cvmx_npei_last_win_rdata0_s cn52xxp1; + struct cvmx_npei_last_win_rdata0_s cn56xx; + struct cvmx_npei_last_win_rdata0_s cn56xxp1; +}; + +union cvmx_npei_last_win_rdata1 { + uint64_t u64; + struct cvmx_npei_last_win_rdata1_s { + uint64_t data:64; + } s; + struct cvmx_npei_last_win_rdata1_s cn52xx; + struct cvmx_npei_last_win_rdata1_s cn52xxp1; + struct cvmx_npei_last_win_rdata1_s cn56xx; + struct cvmx_npei_last_win_rdata1_s cn56xxp1; +}; + +union cvmx_npei_mem_access_ctl { + uint64_t u64; + struct cvmx_npei_mem_access_ctl_s { + uint64_t reserved_14_63:50; + uint64_t max_word:4; + uint64_t timer:10; + } s; + struct cvmx_npei_mem_access_ctl_s cn52xx; + struct cvmx_npei_mem_access_ctl_s cn52xxp1; + struct cvmx_npei_mem_access_ctl_s cn56xx; + struct cvmx_npei_mem_access_ctl_s cn56xxp1; +}; + +union cvmx_npei_mem_access_subidx { + uint64_t u64; + struct cvmx_npei_mem_access_subidx_s { + uint64_t reserved_42_63:22; + uint64_t zero:1; + uint64_t port:2; + uint64_t nmerge:1; + uint64_t esr:2; + uint64_t esw:2; + uint64_t nsr:1; + uint64_t nsw:1; + uint64_t ror:1; + uint64_t row:1; + uint64_t ba:30; + } s; + struct cvmx_npei_mem_access_subidx_s cn52xx; + struct cvmx_npei_mem_access_subidx_s cn52xxp1; + struct cvmx_npei_mem_access_subidx_s cn56xx; + struct cvmx_npei_mem_access_subidx_s cn56xxp1; +}; + +union cvmx_npei_msi_enb0 { + uint64_t u64; + struct cvmx_npei_msi_enb0_s { + uint64_t enb:64; + } s; + struct cvmx_npei_msi_enb0_s cn52xx; + struct cvmx_npei_msi_enb0_s cn52xxp1; + struct cvmx_npei_msi_enb0_s cn56xx; + struct cvmx_npei_msi_enb0_s cn56xxp1; +}; + +union cvmx_npei_msi_enb1 { + uint64_t u64; + struct cvmx_npei_msi_enb1_s { + uint64_t enb:64; + } s; + struct cvmx_npei_msi_enb1_s cn52xx; + struct cvmx_npei_msi_enb1_s cn52xxp1; + struct cvmx_npei_msi_enb1_s cn56xx; + struct cvmx_npei_msi_enb1_s cn56xxp1; +}; + +union cvmx_npei_msi_enb2 { + uint64_t u64; + struct cvmx_npei_msi_enb2_s { + uint64_t enb:64; + } s; + struct cvmx_npei_msi_enb2_s cn52xx; + struct cvmx_npei_msi_enb2_s cn52xxp1; + struct cvmx_npei_msi_enb2_s cn56xx; + struct cvmx_npei_msi_enb2_s cn56xxp1; +}; + +union cvmx_npei_msi_enb3 { + uint64_t u64; + struct cvmx_npei_msi_enb3_s { + uint64_t enb:64; + } s; + struct cvmx_npei_msi_enb3_s cn52xx; + struct cvmx_npei_msi_enb3_s cn52xxp1; + struct cvmx_npei_msi_enb3_s cn56xx; + struct cvmx_npei_msi_enb3_s cn56xxp1; +}; + +union cvmx_npei_msi_rcv0 { + uint64_t u64; + struct cvmx_npei_msi_rcv0_s { + uint64_t intr:64; + } s; + struct cvmx_npei_msi_rcv0_s cn52xx; + struct cvmx_npei_msi_rcv0_s cn52xxp1; + struct cvmx_npei_msi_rcv0_s cn56xx; + struct cvmx_npei_msi_rcv0_s cn56xxp1; +}; + +union cvmx_npei_msi_rcv1 { + uint64_t u64; + struct cvmx_npei_msi_rcv1_s { + uint64_t intr:64; + } s; + struct cvmx_npei_msi_rcv1_s cn52xx; + struct cvmx_npei_msi_rcv1_s cn52xxp1; + struct cvmx_npei_msi_rcv1_s cn56xx; + struct cvmx_npei_msi_rcv1_s cn56xxp1; +}; + +union cvmx_npei_msi_rcv2 { + uint64_t u64; + struct cvmx_npei_msi_rcv2_s { + uint64_t intr:64; + } s; + struct cvmx_npei_msi_rcv2_s cn52xx; + struct cvmx_npei_msi_rcv2_s cn52xxp1; + struct cvmx_npei_msi_rcv2_s cn56xx; + struct cvmx_npei_msi_rcv2_s cn56xxp1; +}; + +union cvmx_npei_msi_rcv3 { + uint64_t u64; + struct cvmx_npei_msi_rcv3_s { + uint64_t intr:64; + } s; + struct cvmx_npei_msi_rcv3_s cn52xx; + struct cvmx_npei_msi_rcv3_s cn52xxp1; + struct cvmx_npei_msi_rcv3_s cn56xx; + struct cvmx_npei_msi_rcv3_s cn56xxp1; +}; + +union cvmx_npei_msi_rd_map { + uint64_t u64; + struct cvmx_npei_msi_rd_map_s { + uint64_t reserved_16_63:48; + uint64_t rd_int:8; + uint64_t msi_int:8; + } s; + struct cvmx_npei_msi_rd_map_s cn52xx; + struct cvmx_npei_msi_rd_map_s cn52xxp1; + struct cvmx_npei_msi_rd_map_s cn56xx; + struct cvmx_npei_msi_rd_map_s cn56xxp1; +}; + +union cvmx_npei_msi_w1c_enb0 { + uint64_t u64; + struct cvmx_npei_msi_w1c_enb0_s { + uint64_t clr:64; + } s; + struct cvmx_npei_msi_w1c_enb0_s cn52xx; + struct cvmx_npei_msi_w1c_enb0_s cn56xx; +}; + +union cvmx_npei_msi_w1c_enb1 { + uint64_t u64; + struct cvmx_npei_msi_w1c_enb1_s { + uint64_t clr:64; + } s; + struct cvmx_npei_msi_w1c_enb1_s cn52xx; + struct cvmx_npei_msi_w1c_enb1_s cn56xx; +}; + +union cvmx_npei_msi_w1c_enb2 { + uint64_t u64; + struct cvmx_npei_msi_w1c_enb2_s { + uint64_t clr:64; + } s; + struct cvmx_npei_msi_w1c_enb2_s cn52xx; + struct cvmx_npei_msi_w1c_enb2_s cn56xx; +}; + +union cvmx_npei_msi_w1c_enb3 { + uint64_t u64; + struct cvmx_npei_msi_w1c_enb3_s { + uint64_t clr:64; + } s; + struct cvmx_npei_msi_w1c_enb3_s cn52xx; + struct cvmx_npei_msi_w1c_enb3_s cn56xx; +}; + +union cvmx_npei_msi_w1s_enb0 { + uint64_t u64; + struct cvmx_npei_msi_w1s_enb0_s { + uint64_t set:64; + } s; + struct cvmx_npei_msi_w1s_enb0_s cn52xx; + struct cvmx_npei_msi_w1s_enb0_s cn56xx; +}; + +union cvmx_npei_msi_w1s_enb1 { + uint64_t u64; + struct cvmx_npei_msi_w1s_enb1_s { + uint64_t set:64; + } s; + struct cvmx_npei_msi_w1s_enb1_s cn52xx; + struct cvmx_npei_msi_w1s_enb1_s cn56xx; +}; + +union cvmx_npei_msi_w1s_enb2 { + uint64_t u64; + struct cvmx_npei_msi_w1s_enb2_s { + uint64_t set:64; + } s; + struct cvmx_npei_msi_w1s_enb2_s cn52xx; + struct cvmx_npei_msi_w1s_enb2_s cn56xx; +}; + +union cvmx_npei_msi_w1s_enb3 { + uint64_t u64; + struct cvmx_npei_msi_w1s_enb3_s { + uint64_t set:64; + } s; + struct cvmx_npei_msi_w1s_enb3_s cn52xx; + struct cvmx_npei_msi_w1s_enb3_s cn56xx; +}; + +union cvmx_npei_msi_wr_map { + uint64_t u64; + struct cvmx_npei_msi_wr_map_s { + uint64_t reserved_16_63:48; + uint64_t ciu_int:8; + uint64_t msi_int:8; + } s; + struct cvmx_npei_msi_wr_map_s cn52xx; + struct cvmx_npei_msi_wr_map_s cn52xxp1; + struct cvmx_npei_msi_wr_map_s cn56xx; + struct cvmx_npei_msi_wr_map_s cn56xxp1; +}; + +union cvmx_npei_pcie_credit_cnt { + uint64_t u64; + struct cvmx_npei_pcie_credit_cnt_s { + uint64_t reserved_48_63:16; + uint64_t p1_ccnt:8; + uint64_t p1_ncnt:8; + uint64_t p1_pcnt:8; + uint64_t p0_ccnt:8; + uint64_t p0_ncnt:8; + uint64_t p0_pcnt:8; + } s; + struct cvmx_npei_pcie_credit_cnt_s cn52xx; + struct cvmx_npei_pcie_credit_cnt_s cn56xx; +}; + +union cvmx_npei_pcie_msi_rcv { + uint64_t u64; + struct cvmx_npei_pcie_msi_rcv_s { + uint64_t reserved_8_63:56; + uint64_t intr:8; + } s; + struct cvmx_npei_pcie_msi_rcv_s cn52xx; + struct cvmx_npei_pcie_msi_rcv_s cn52xxp1; + struct cvmx_npei_pcie_msi_rcv_s cn56xx; + struct cvmx_npei_pcie_msi_rcv_s cn56xxp1; +}; + +union cvmx_npei_pcie_msi_rcv_b1 { + uint64_t u64; + struct cvmx_npei_pcie_msi_rcv_b1_s { + uint64_t reserved_16_63:48; + uint64_t intr:8; + uint64_t reserved_0_7:8; + } s; + struct cvmx_npei_pcie_msi_rcv_b1_s cn52xx; + struct cvmx_npei_pcie_msi_rcv_b1_s cn52xxp1; + struct cvmx_npei_pcie_msi_rcv_b1_s cn56xx; + struct cvmx_npei_pcie_msi_rcv_b1_s cn56xxp1; +}; + +union cvmx_npei_pcie_msi_rcv_b2 { + uint64_t u64; + struct cvmx_npei_pcie_msi_rcv_b2_s { + uint64_t reserved_24_63:40; + uint64_t intr:8; + uint64_t reserved_0_15:16; + } s; + struct cvmx_npei_pcie_msi_rcv_b2_s cn52xx; + struct cvmx_npei_pcie_msi_rcv_b2_s cn52xxp1; + struct cvmx_npei_pcie_msi_rcv_b2_s cn56xx; + struct cvmx_npei_pcie_msi_rcv_b2_s cn56xxp1; +}; + +union cvmx_npei_pcie_msi_rcv_b3 { + uint64_t u64; + struct cvmx_npei_pcie_msi_rcv_b3_s { + uint64_t reserved_32_63:32; + uint64_t intr:8; + uint64_t reserved_0_23:24; + } s; + struct cvmx_npei_pcie_msi_rcv_b3_s cn52xx; + struct cvmx_npei_pcie_msi_rcv_b3_s cn52xxp1; + struct cvmx_npei_pcie_msi_rcv_b3_s cn56xx; + struct cvmx_npei_pcie_msi_rcv_b3_s cn56xxp1; +}; + +union cvmx_npei_pktx_cnts { + uint64_t u64; + struct cvmx_npei_pktx_cnts_s { + uint64_t reserved_54_63:10; + uint64_t timer:22; + uint64_t cnt:32; + } s; + struct cvmx_npei_pktx_cnts_s cn52xx; + struct cvmx_npei_pktx_cnts_s cn56xx; + struct cvmx_npei_pktx_cnts_s cn56xxp1; +}; + +union cvmx_npei_pktx_in_bp { + uint64_t u64; + struct cvmx_npei_pktx_in_bp_s { + uint64_t wmark:32; + uint64_t cnt:32; + } s; + struct cvmx_npei_pktx_in_bp_s cn52xx; + struct cvmx_npei_pktx_in_bp_s cn56xx; + struct cvmx_npei_pktx_in_bp_s cn56xxp1; +}; + +union cvmx_npei_pktx_instr_baddr { + uint64_t u64; + struct cvmx_npei_pktx_instr_baddr_s { + uint64_t addr:61; + uint64_t reserved_0_2:3; + } s; + struct cvmx_npei_pktx_instr_baddr_s cn52xx; + struct cvmx_npei_pktx_instr_baddr_s cn56xx; + struct cvmx_npei_pktx_instr_baddr_s cn56xxp1; +}; + +union cvmx_npei_pktx_instr_baoff_dbell { + uint64_t u64; + struct cvmx_npei_pktx_instr_baoff_dbell_s { + uint64_t aoff:32; + uint64_t dbell:32; + } s; + struct cvmx_npei_pktx_instr_baoff_dbell_s cn52xx; + struct cvmx_npei_pktx_instr_baoff_dbell_s cn56xx; + struct cvmx_npei_pktx_instr_baoff_dbell_s cn56xxp1; +}; + +union cvmx_npei_pktx_instr_fifo_rsize { + uint64_t u64; + struct cvmx_npei_pktx_instr_fifo_rsize_s { + uint64_t max:9; + uint64_t rrp:9; + uint64_t wrp:9; + uint64_t fcnt:5; + uint64_t rsize:32; + } s; + struct cvmx_npei_pktx_instr_fifo_rsize_s cn52xx; + struct cvmx_npei_pktx_instr_fifo_rsize_s cn56xx; + struct cvmx_npei_pktx_instr_fifo_rsize_s cn56xxp1; +}; + +union cvmx_npei_pktx_instr_header { + uint64_t u64; + struct cvmx_npei_pktx_instr_header_s { + uint64_t reserved_44_63:20; + uint64_t pbp:1; + uint64_t rsv_f:5; + uint64_t rparmode:2; + uint64_t rsv_e:1; + uint64_t rskp_len:7; + uint64_t rsv_d:6; + uint64_t use_ihdr:1; + uint64_t rsv_c:5; + uint64_t par_mode:2; + uint64_t rsv_b:1; + uint64_t skp_len:7; + uint64_t rsv_a:6; + } s; + struct cvmx_npei_pktx_instr_header_s cn52xx; + struct cvmx_npei_pktx_instr_header_s cn56xx; + struct cvmx_npei_pktx_instr_header_s cn56xxp1; +}; + +union cvmx_npei_pktx_slist_baddr { + uint64_t u64; + struct cvmx_npei_pktx_slist_baddr_s { + uint64_t addr:60; + uint64_t reserved_0_3:4; + } s; + struct cvmx_npei_pktx_slist_baddr_s cn52xx; + struct cvmx_npei_pktx_slist_baddr_s cn56xx; + struct cvmx_npei_pktx_slist_baddr_s cn56xxp1; +}; + +union cvmx_npei_pktx_slist_baoff_dbell { + uint64_t u64; + struct cvmx_npei_pktx_slist_baoff_dbell_s { + uint64_t aoff:32; + uint64_t dbell:32; + } s; + struct cvmx_npei_pktx_slist_baoff_dbell_s cn52xx; + struct cvmx_npei_pktx_slist_baoff_dbell_s cn56xx; + struct cvmx_npei_pktx_slist_baoff_dbell_s cn56xxp1; +}; + +union cvmx_npei_pktx_slist_fifo_rsize { + uint64_t u64; + struct cvmx_npei_pktx_slist_fifo_rsize_s { + uint64_t reserved_32_63:32; + uint64_t rsize:32; + } s; + struct cvmx_npei_pktx_slist_fifo_rsize_s cn52xx; + struct cvmx_npei_pktx_slist_fifo_rsize_s cn56xx; + struct cvmx_npei_pktx_slist_fifo_rsize_s cn56xxp1; +}; + +union cvmx_npei_pkt_cnt_int { + uint64_t u64; + struct cvmx_npei_pkt_cnt_int_s { + uint64_t reserved_32_63:32; + uint64_t port:32; + } s; + struct cvmx_npei_pkt_cnt_int_s cn52xx; + struct cvmx_npei_pkt_cnt_int_s cn56xx; + struct cvmx_npei_pkt_cnt_int_s cn56xxp1; +}; + +union cvmx_npei_pkt_cnt_int_enb { + uint64_t u64; + struct cvmx_npei_pkt_cnt_int_enb_s { + uint64_t reserved_32_63:32; + uint64_t port:32; + } s; + struct cvmx_npei_pkt_cnt_int_enb_s cn52xx; + struct cvmx_npei_pkt_cnt_int_enb_s cn56xx; + struct cvmx_npei_pkt_cnt_int_enb_s cn56xxp1; +}; + +union cvmx_npei_pkt_data_out_es { + uint64_t u64; + struct cvmx_npei_pkt_data_out_es_s { + uint64_t es:64; + } s; + struct cvmx_npei_pkt_data_out_es_s cn52xx; + struct cvmx_npei_pkt_data_out_es_s cn56xx; + struct cvmx_npei_pkt_data_out_es_s cn56xxp1; +}; + +union cvmx_npei_pkt_data_out_ns { + uint64_t u64; + struct cvmx_npei_pkt_data_out_ns_s { + uint64_t reserved_32_63:32; + uint64_t nsr:32; + } s; + struct cvmx_npei_pkt_data_out_ns_s cn52xx; + struct cvmx_npei_pkt_data_out_ns_s cn56xx; + struct cvmx_npei_pkt_data_out_ns_s cn56xxp1; +}; + +union cvmx_npei_pkt_data_out_ror { + uint64_t u64; + struct cvmx_npei_pkt_data_out_ror_s { + uint64_t reserved_32_63:32; + uint64_t ror:32; + } s; + struct cvmx_npei_pkt_data_out_ror_s cn52xx; + struct cvmx_npei_pkt_data_out_ror_s cn56xx; + struct cvmx_npei_pkt_data_out_ror_s cn56xxp1; +}; + +union cvmx_npei_pkt_dpaddr { + uint64_t u64; + struct cvmx_npei_pkt_dpaddr_s { + uint64_t reserved_32_63:32; + uint64_t dptr:32; + } s; + struct cvmx_npei_pkt_dpaddr_s cn52xx; + struct cvmx_npei_pkt_dpaddr_s cn56xx; + struct cvmx_npei_pkt_dpaddr_s cn56xxp1; +}; + +union cvmx_npei_pkt_in_bp { + uint64_t u64; + struct cvmx_npei_pkt_in_bp_s { + uint64_t reserved_32_63:32; + uint64_t bp:32; + } s; + struct cvmx_npei_pkt_in_bp_s cn56xx; +}; + +union cvmx_npei_pkt_in_donex_cnts { + uint64_t u64; + struct cvmx_npei_pkt_in_donex_cnts_s { + uint64_t reserved_32_63:32; + uint64_t cnt:32; + } s; + struct cvmx_npei_pkt_in_donex_cnts_s cn52xx; + struct cvmx_npei_pkt_in_donex_cnts_s cn56xx; + struct cvmx_npei_pkt_in_donex_cnts_s cn56xxp1; +}; + +union cvmx_npei_pkt_in_instr_counts { + uint64_t u64; + struct cvmx_npei_pkt_in_instr_counts_s { + uint64_t wr_cnt:32; + uint64_t rd_cnt:32; + } s; + struct cvmx_npei_pkt_in_instr_counts_s cn52xx; + struct cvmx_npei_pkt_in_instr_counts_s cn56xx; +}; + +union cvmx_npei_pkt_in_pcie_port { + uint64_t u64; + struct cvmx_npei_pkt_in_pcie_port_s { + uint64_t pp:64; + } s; + struct cvmx_npei_pkt_in_pcie_port_s cn52xx; + struct cvmx_npei_pkt_in_pcie_port_s cn56xx; +}; + +union cvmx_npei_pkt_input_control { + uint64_t u64; + struct cvmx_npei_pkt_input_control_s { + uint64_t reserved_23_63:41; + uint64_t pkt_rr:1; + uint64_t pbp_dhi:13; + uint64_t d_nsr:1; + uint64_t d_esr:2; + uint64_t d_ror:1; + uint64_t use_csr:1; + uint64_t nsr:1; + uint64_t esr:2; + uint64_t ror:1; + } s; + struct cvmx_npei_pkt_input_control_s cn52xx; + struct cvmx_npei_pkt_input_control_s cn56xx; + struct cvmx_npei_pkt_input_control_s cn56xxp1; +}; + +union cvmx_npei_pkt_instr_enb { + uint64_t u64; + struct cvmx_npei_pkt_instr_enb_s { + uint64_t reserved_32_63:32; + uint64_t enb:32; + } s; + struct cvmx_npei_pkt_instr_enb_s cn52xx; + struct cvmx_npei_pkt_instr_enb_s cn56xx; + struct cvmx_npei_pkt_instr_enb_s cn56xxp1; +}; + +union cvmx_npei_pkt_instr_rd_size { + uint64_t u64; + struct cvmx_npei_pkt_instr_rd_size_s { + uint64_t rdsize:64; + } s; + struct cvmx_npei_pkt_instr_rd_size_s cn52xx; + struct cvmx_npei_pkt_instr_rd_size_s cn56xx; +}; + +union cvmx_npei_pkt_instr_size { + uint64_t u64; + struct cvmx_npei_pkt_instr_size_s { + uint64_t reserved_32_63:32; + uint64_t is_64b:32; + } s; + struct cvmx_npei_pkt_instr_size_s cn52xx; + struct cvmx_npei_pkt_instr_size_s cn56xx; + struct cvmx_npei_pkt_instr_size_s cn56xxp1; +}; + +union cvmx_npei_pkt_int_levels { + uint64_t u64; + struct cvmx_npei_pkt_int_levels_s { + uint64_t reserved_54_63:10; + uint64_t time:22; + uint64_t cnt:32; + } s; + struct cvmx_npei_pkt_int_levels_s cn52xx; + struct cvmx_npei_pkt_int_levels_s cn56xx; + struct cvmx_npei_pkt_int_levels_s cn56xxp1; +}; + +union cvmx_npei_pkt_iptr { + uint64_t u64; + struct cvmx_npei_pkt_iptr_s { + uint64_t reserved_32_63:32; + uint64_t iptr:32; + } s; + struct cvmx_npei_pkt_iptr_s cn52xx; + struct cvmx_npei_pkt_iptr_s cn56xx; + struct cvmx_npei_pkt_iptr_s cn56xxp1; +}; + +union cvmx_npei_pkt_out_bmode { + uint64_t u64; + struct cvmx_npei_pkt_out_bmode_s { + uint64_t reserved_32_63:32; + uint64_t bmode:32; + } s; + struct cvmx_npei_pkt_out_bmode_s cn52xx; + struct cvmx_npei_pkt_out_bmode_s cn56xx; + struct cvmx_npei_pkt_out_bmode_s cn56xxp1; +}; + +union cvmx_npei_pkt_out_enb { + uint64_t u64; + struct cvmx_npei_pkt_out_enb_s { + uint64_t reserved_32_63:32; + uint64_t enb:32; + } s; + struct cvmx_npei_pkt_out_enb_s cn52xx; + struct cvmx_npei_pkt_out_enb_s cn56xx; + struct cvmx_npei_pkt_out_enb_s cn56xxp1; +}; + +union cvmx_npei_pkt_output_wmark { + uint64_t u64; + struct cvmx_npei_pkt_output_wmark_s { + uint64_t reserved_32_63:32; + uint64_t wmark:32; + } s; + struct cvmx_npei_pkt_output_wmark_s cn52xx; + struct cvmx_npei_pkt_output_wmark_s cn56xx; +}; + +union cvmx_npei_pkt_pcie_port { + uint64_t u64; + struct cvmx_npei_pkt_pcie_port_s { + uint64_t pp:64; + } s; + struct cvmx_npei_pkt_pcie_port_s cn52xx; + struct cvmx_npei_pkt_pcie_port_s cn56xx; + struct cvmx_npei_pkt_pcie_port_s cn56xxp1; +}; + +union cvmx_npei_pkt_port_in_rst { + uint64_t u64; + struct cvmx_npei_pkt_port_in_rst_s { + uint64_t in_rst:32; + uint64_t out_rst:32; + } s; + struct cvmx_npei_pkt_port_in_rst_s cn52xx; + struct cvmx_npei_pkt_port_in_rst_s cn56xx; +}; + +union cvmx_npei_pkt_slist_es { + uint64_t u64; + struct cvmx_npei_pkt_slist_es_s { + uint64_t es:64; + } s; + struct cvmx_npei_pkt_slist_es_s cn52xx; + struct cvmx_npei_pkt_slist_es_s cn56xx; + struct cvmx_npei_pkt_slist_es_s cn56xxp1; +}; + +union cvmx_npei_pkt_slist_id_size { + uint64_t u64; + struct cvmx_npei_pkt_slist_id_size_s { + uint64_t reserved_23_63:41; + uint64_t isize:7; + uint64_t bsize:16; + } s; + struct cvmx_npei_pkt_slist_id_size_s cn52xx; + struct cvmx_npei_pkt_slist_id_size_s cn56xx; + struct cvmx_npei_pkt_slist_id_size_s cn56xxp1; +}; + +union cvmx_npei_pkt_slist_ns { + uint64_t u64; + struct cvmx_npei_pkt_slist_ns_s { + uint64_t reserved_32_63:32; + uint64_t nsr:32; + } s; + struct cvmx_npei_pkt_slist_ns_s cn52xx; + struct cvmx_npei_pkt_slist_ns_s cn56xx; + struct cvmx_npei_pkt_slist_ns_s cn56xxp1; +}; + +union cvmx_npei_pkt_slist_ror { + uint64_t u64; + struct cvmx_npei_pkt_slist_ror_s { + uint64_t reserved_32_63:32; + uint64_t ror:32; + } s; + struct cvmx_npei_pkt_slist_ror_s cn52xx; + struct cvmx_npei_pkt_slist_ror_s cn56xx; + struct cvmx_npei_pkt_slist_ror_s cn56xxp1; +}; + +union cvmx_npei_pkt_time_int { + uint64_t u64; + struct cvmx_npei_pkt_time_int_s { + uint64_t reserved_32_63:32; + uint64_t port:32; + } s; + struct cvmx_npei_pkt_time_int_s cn52xx; + struct cvmx_npei_pkt_time_int_s cn56xx; + struct cvmx_npei_pkt_time_int_s cn56xxp1; +}; + +union cvmx_npei_pkt_time_int_enb { + uint64_t u64; + struct cvmx_npei_pkt_time_int_enb_s { + uint64_t reserved_32_63:32; + uint64_t port:32; + } s; + struct cvmx_npei_pkt_time_int_enb_s cn52xx; + struct cvmx_npei_pkt_time_int_enb_s cn56xx; + struct cvmx_npei_pkt_time_int_enb_s cn56xxp1; +}; + +union cvmx_npei_rsl_int_blocks { + uint64_t u64; + struct cvmx_npei_rsl_int_blocks_s { + uint64_t reserved_31_63:33; + uint64_t iob:1; + uint64_t lmc1:1; + uint64_t agl:1; + uint64_t reserved_24_27:4; + uint64_t asxpcs1:1; + uint64_t asxpcs0:1; + uint64_t reserved_21_21:1; + uint64_t pip:1; + uint64_t reserved_18_19:2; + uint64_t lmc0:1; + uint64_t l2c:1; + uint64_t usb1:1; + uint64_t rad:1; + uint64_t usb:1; + uint64_t pow:1; + uint64_t tim:1; + uint64_t pko:1; + uint64_t ipd:1; + uint64_t reserved_8_8:1; + uint64_t zip:1; + uint64_t reserved_6_6:1; + uint64_t fpa:1; + uint64_t key:1; + uint64_t npei:1; + uint64_t gmx1:1; + uint64_t gmx0:1; + uint64_t mio:1; + } s; + struct cvmx_npei_rsl_int_blocks_s cn52xx; + struct cvmx_npei_rsl_int_blocks_s cn52xxp1; + struct cvmx_npei_rsl_int_blocks_cn56xx { + uint64_t reserved_31_63:33; + uint64_t iob:1; + uint64_t lmc1:1; + uint64_t agl:1; + uint64_t reserved_24_27:4; + uint64_t asxpcs1:1; + uint64_t asxpcs0:1; + uint64_t reserved_21_21:1; + uint64_t pip:1; + uint64_t reserved_18_19:2; + uint64_t lmc0:1; + uint64_t l2c:1; + uint64_t reserved_15_15:1; + uint64_t rad:1; + uint64_t usb:1; + uint64_t pow:1; + uint64_t tim:1; + uint64_t pko:1; + uint64_t ipd:1; + uint64_t reserved_8_8:1; + uint64_t zip:1; + uint64_t reserved_6_6:1; + uint64_t fpa:1; + uint64_t key:1; + uint64_t npei:1; + uint64_t gmx1:1; + uint64_t gmx0:1; + uint64_t mio:1; + } cn56xx; + struct cvmx_npei_rsl_int_blocks_cn56xx cn56xxp1; +}; + +union cvmx_npei_scratch_1 { + uint64_t u64; + struct cvmx_npei_scratch_1_s { + uint64_t data:64; + } s; + struct cvmx_npei_scratch_1_s cn52xx; + struct cvmx_npei_scratch_1_s cn52xxp1; + struct cvmx_npei_scratch_1_s cn56xx; + struct cvmx_npei_scratch_1_s cn56xxp1; +}; + +union cvmx_npei_state1 { + uint64_t u64; + struct cvmx_npei_state1_s { + uint64_t cpl1:12; + uint64_t cpl0:12; + uint64_t arb:1; + uint64_t csr:39; + } s; + struct cvmx_npei_state1_s cn52xx; + struct cvmx_npei_state1_s cn52xxp1; + struct cvmx_npei_state1_s cn56xx; + struct cvmx_npei_state1_s cn56xxp1; +}; + +union cvmx_npei_state2 { + uint64_t u64; + struct cvmx_npei_state2_s { + uint64_t reserved_48_63:16; + uint64_t npei:1; + uint64_t rac:1; + uint64_t csm1:15; + uint64_t csm0:15; + uint64_t nnp0:8; + uint64_t nnd:8; + } s; + struct cvmx_npei_state2_s cn52xx; + struct cvmx_npei_state2_s cn52xxp1; + struct cvmx_npei_state2_s cn56xx; + struct cvmx_npei_state2_s cn56xxp1; +}; + +union cvmx_npei_state3 { + uint64_t u64; + struct cvmx_npei_state3_s { + uint64_t reserved_56_63:8; + uint64_t psm1:15; + uint64_t psm0:15; + uint64_t nsm1:13; + uint64_t nsm0:13; + } s; + struct cvmx_npei_state3_s cn52xx; + struct cvmx_npei_state3_s cn52xxp1; + struct cvmx_npei_state3_s cn56xx; + struct cvmx_npei_state3_s cn56xxp1; +}; + +union cvmx_npei_win_rd_addr { + uint64_t u64; + struct cvmx_npei_win_rd_addr_s { + uint64_t reserved_51_63:13; + uint64_t ld_cmd:2; + uint64_t iobit:1; + uint64_t rd_addr:48; + } s; + struct cvmx_npei_win_rd_addr_s cn52xx; + struct cvmx_npei_win_rd_addr_s cn52xxp1; + struct cvmx_npei_win_rd_addr_s cn56xx; + struct cvmx_npei_win_rd_addr_s cn56xxp1; +}; + +union cvmx_npei_win_rd_data { + uint64_t u64; + struct cvmx_npei_win_rd_data_s { + uint64_t rd_data:64; + } s; + struct cvmx_npei_win_rd_data_s cn52xx; + struct cvmx_npei_win_rd_data_s cn52xxp1; + struct cvmx_npei_win_rd_data_s cn56xx; + struct cvmx_npei_win_rd_data_s cn56xxp1; +}; + +union cvmx_npei_win_wr_addr { + uint64_t u64; + struct cvmx_npei_win_wr_addr_s { + uint64_t reserved_49_63:15; + uint64_t iobit:1; + uint64_t wr_addr:46; + uint64_t reserved_0_1:2; + } s; + struct cvmx_npei_win_wr_addr_s cn52xx; + struct cvmx_npei_win_wr_addr_s cn52xxp1; + struct cvmx_npei_win_wr_addr_s cn56xx; + struct cvmx_npei_win_wr_addr_s cn56xxp1; +}; + +union cvmx_npei_win_wr_data { + uint64_t u64; + struct cvmx_npei_win_wr_data_s { + uint64_t wr_data:64; + } s; + struct cvmx_npei_win_wr_data_s cn52xx; + struct cvmx_npei_win_wr_data_s cn52xxp1; + struct cvmx_npei_win_wr_data_s cn56xx; + struct cvmx_npei_win_wr_data_s cn56xxp1; +}; + +union cvmx_npei_win_wr_mask { + uint64_t u64; + struct cvmx_npei_win_wr_mask_s { + uint64_t reserved_8_63:56; + uint64_t wr_mask:8; + } s; + struct cvmx_npei_win_wr_mask_s cn52xx; + struct cvmx_npei_win_wr_mask_s cn52xxp1; + struct cvmx_npei_win_wr_mask_s cn56xx; + struct cvmx_npei_win_wr_mask_s cn56xxp1; +}; + +union cvmx_npei_window_ctl { + uint64_t u64; + struct cvmx_npei_window_ctl_s { + uint64_t reserved_32_63:32; + uint64_t time:32; + } s; + struct cvmx_npei_window_ctl_s cn52xx; + struct cvmx_npei_window_ctl_s cn52xxp1; + struct cvmx_npei_window_ctl_s cn56xx; + struct cvmx_npei_window_ctl_s cn56xxp1; +}; + +#endif diff --git a/arch/mips/include/asm/octeon/cvmx-npi-defs.h b/arch/mips/include/asm/octeon/cvmx-npi-defs.h new file mode 100644 index 00000000000..4e03cd8561e --- /dev/null +++ b/arch/mips/include/asm/octeon/cvmx-npi-defs.h @@ -0,0 +1,1735 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +#ifndef __CVMX_NPI_DEFS_H__ +#define __CVMX_NPI_DEFS_H__ + +#define CVMX_NPI_BASE_ADDR_INPUT0 \ + CVMX_ADD_IO_SEG(0x00011F0000000070ull) +#define CVMX_NPI_BASE_ADDR_INPUT1 \ + CVMX_ADD_IO_SEG(0x00011F0000000080ull) +#define CVMX_NPI_BASE_ADDR_INPUT2 \ + CVMX_ADD_IO_SEG(0x00011F0000000090ull) +#define CVMX_NPI_BASE_ADDR_INPUT3 \ + CVMX_ADD_IO_SEG(0x00011F00000000A0ull) +#define CVMX_NPI_BASE_ADDR_INPUTX(offset) \ + CVMX_ADD_IO_SEG(0x00011F0000000070ull + (((offset) & 3) * 16)) +#define CVMX_NPI_BASE_ADDR_OUTPUT0 \ + CVMX_ADD_IO_SEG(0x00011F00000000B8ull) +#define CVMX_NPI_BASE_ADDR_OUTPUT1 \ + CVMX_ADD_IO_SEG(0x00011F00000000C0ull) +#define CVMX_NPI_BASE_ADDR_OUTPUT2 \ + CVMX_ADD_IO_SEG(0x00011F00000000C8ull) +#define CVMX_NPI_BASE_ADDR_OUTPUT3 \ + CVMX_ADD_IO_SEG(0x00011F00000000D0ull) +#define CVMX_NPI_BASE_ADDR_OUTPUTX(offset) \ + CVMX_ADD_IO_SEG(0x00011F00000000B8ull + (((offset) & 3) * 8)) +#define CVMX_NPI_BIST_STATUS \ + CVMX_ADD_IO_SEG(0x00011F00000003F8ull) +#define CVMX_NPI_BUFF_SIZE_OUTPUT0 \ + CVMX_ADD_IO_SEG(0x00011F00000000E0ull) +#define CVMX_NPI_BUFF_SIZE_OUTPUT1 \ + CVMX_ADD_IO_SEG(0x00011F00000000E8ull) +#define CVMX_NPI_BUFF_SIZE_OUTPUT2 \ + CVMX_ADD_IO_SEG(0x00011F00000000F0ull) +#define CVMX_NPI_BUFF_SIZE_OUTPUT3 \ + CVMX_ADD_IO_SEG(0x00011F00000000F8ull) +#define CVMX_NPI_BUFF_SIZE_OUTPUTX(offset) \ + CVMX_ADD_IO_SEG(0x00011F00000000E0ull + (((offset) & 3) * 8)) +#define CVMX_NPI_COMP_CTL \ + CVMX_ADD_IO_SEG(0x00011F0000000218ull) +#define CVMX_NPI_CTL_STATUS \ + CVMX_ADD_IO_SEG(0x00011F0000000010ull) +#define CVMX_NPI_DBG_SELECT \ + CVMX_ADD_IO_SEG(0x00011F0000000008ull) +#define CVMX_NPI_DMA_CONTROL \ + CVMX_ADD_IO_SEG(0x00011F0000000128ull) +#define CVMX_NPI_DMA_HIGHP_COUNTS \ + CVMX_ADD_IO_SEG(0x00011F0000000148ull) +#define CVMX_NPI_DMA_HIGHP_NADDR \ + CVMX_ADD_IO_SEG(0x00011F0000000158ull) +#define CVMX_NPI_DMA_LOWP_COUNTS \ + CVMX_ADD_IO_SEG(0x00011F0000000140ull) +#define CVMX_NPI_DMA_LOWP_NADDR \ + CVMX_ADD_IO_SEG(0x00011F0000000150ull) +#define CVMX_NPI_HIGHP_DBELL \ + CVMX_ADD_IO_SEG(0x00011F0000000120ull) +#define CVMX_NPI_HIGHP_IBUFF_SADDR \ + CVMX_ADD_IO_SEG(0x00011F0000000110ull) +#define CVMX_NPI_INPUT_CONTROL \ + CVMX_ADD_IO_SEG(0x00011F0000000138ull) +#define CVMX_NPI_INT_ENB \ + CVMX_ADD_IO_SEG(0x00011F0000000020ull) +#define CVMX_NPI_INT_SUM \ + CVMX_ADD_IO_SEG(0x00011F0000000018ull) +#define CVMX_NPI_LOWP_DBELL \ + CVMX_ADD_IO_SEG(0x00011F0000000118ull) +#define CVMX_NPI_LOWP_IBUFF_SADDR \ + CVMX_ADD_IO_SEG(0x00011F0000000108ull) +#define CVMX_NPI_MEM_ACCESS_SUBID3 \ + CVMX_ADD_IO_SEG(0x00011F0000000028ull) +#define CVMX_NPI_MEM_ACCESS_SUBID4 \ + CVMX_ADD_IO_SEG(0x00011F0000000030ull) +#define CVMX_NPI_MEM_ACCESS_SUBID5 \ + CVMX_ADD_IO_SEG(0x00011F0000000038ull) +#define CVMX_NPI_MEM_ACCESS_SUBID6 \ + CVMX_ADD_IO_SEG(0x00011F0000000040ull) +#define CVMX_NPI_MEM_ACCESS_SUBIDX(offset) \ + CVMX_ADD_IO_SEG(0x00011F0000000028ull + (((offset) & 7) * 8) - 8 * 3) +#define CVMX_NPI_MSI_RCV \ + (0x0000000000000190ull) +#define CVMX_NPI_NPI_MSI_RCV \ + CVMX_ADD_IO_SEG(0x00011F0000001190ull) +#define CVMX_NPI_NUM_DESC_OUTPUT0 \ + CVMX_ADD_IO_SEG(0x00011F0000000050ull) +#define CVMX_NPI_NUM_DESC_OUTPUT1 \ + CVMX_ADD_IO_SEG(0x00011F0000000058ull) +#define CVMX_NPI_NUM_DESC_OUTPUT2 \ + CVMX_ADD_IO_SEG(0x00011F0000000060ull) +#define CVMX_NPI_NUM_DESC_OUTPUT3 \ + CVMX_ADD_IO_SEG(0x00011F0000000068ull) +#define CVMX_NPI_NUM_DESC_OUTPUTX(offset) \ + CVMX_ADD_IO_SEG(0x00011F0000000050ull + (((offset) & 3) * 8)) +#define CVMX_NPI_OUTPUT_CONTROL \ + CVMX_ADD_IO_SEG(0x00011F0000000100ull) +#define CVMX_NPI_P0_DBPAIR_ADDR \ + CVMX_ADD_IO_SEG(0x00011F0000000180ull) +#define CVMX_NPI_P0_INSTR_ADDR \ + CVMX_ADD_IO_SEG(0x00011F00000001C0ull) +#define CVMX_NPI_P0_INSTR_CNTS \ + CVMX_ADD_IO_SEG(0x00011F00000001A0ull) +#define CVMX_NPI_P0_PAIR_CNTS \ + CVMX_ADD_IO_SEG(0x00011F0000000160ull) +#define CVMX_NPI_P1_DBPAIR_ADDR \ + CVMX_ADD_IO_SEG(0x00011F0000000188ull) +#define CVMX_NPI_P1_INSTR_ADDR \ + CVMX_ADD_IO_SEG(0x00011F00000001C8ull) +#define CVMX_NPI_P1_INSTR_CNTS \ + CVMX_ADD_IO_SEG(0x00011F00000001A8ull) +#define CVMX_NPI_P1_PAIR_CNTS \ + CVMX_ADD_IO_SEG(0x00011F0000000168ull) +#define CVMX_NPI_P2_DBPAIR_ADDR \ + CVMX_ADD_IO_SEG(0x00011F0000000190ull) +#define CVMX_NPI_P2_INSTR_ADDR \ + CVMX_ADD_IO_SEG(0x00011F00000001D0ull) +#define CVMX_NPI_P2_INSTR_CNTS \ + CVMX_ADD_IO_SEG(0x00011F00000001B0ull) +#define CVMX_NPI_P2_PAIR_CNTS \ + CVMX_ADD_IO_SEG(0x00011F0000000170ull) +#define CVMX_NPI_P3_DBPAIR_ADDR \ + CVMX_ADD_IO_SEG(0x00011F0000000198ull) +#define CVMX_NPI_P3_INSTR_ADDR \ + CVMX_ADD_IO_SEG(0x00011F00000001D8ull) +#define CVMX_NPI_P3_INSTR_CNTS \ + CVMX_ADD_IO_SEG(0x00011F00000001B8ull) +#define CVMX_NPI_P3_PAIR_CNTS \ + CVMX_ADD_IO_SEG(0x00011F0000000178ull) +#define CVMX_NPI_PCI_BAR1_INDEXX(offset) \ + CVMX_ADD_IO_SEG(0x00011F0000001100ull + (((offset) & 31) * 4)) +#define CVMX_NPI_PCI_BIST_REG \ + CVMX_ADD_IO_SEG(0x00011F00000011C0ull) +#define CVMX_NPI_PCI_BURST_SIZE \ + CVMX_ADD_IO_SEG(0x00011F00000000D8ull) +#define CVMX_NPI_PCI_CFG00 \ + CVMX_ADD_IO_SEG(0x00011F0000001800ull) +#define CVMX_NPI_PCI_CFG01 \ + CVMX_ADD_IO_SEG(0x00011F0000001804ull) +#define CVMX_NPI_PCI_CFG02 \ + CVMX_ADD_IO_SEG(0x00011F0000001808ull) +#define CVMX_NPI_PCI_CFG03 \ + CVMX_ADD_IO_SEG(0x00011F000000180Cull) +#define CVMX_NPI_PCI_CFG04 \ + CVMX_ADD_IO_SEG(0x00011F0000001810ull) +#define CVMX_NPI_PCI_CFG05 \ + CVMX_ADD_IO_SEG(0x00011F0000001814ull) +#define CVMX_NPI_PCI_CFG06 \ + CVMX_ADD_IO_SEG(0x00011F0000001818ull) +#define CVMX_NPI_PCI_CFG07 \ + CVMX_ADD_IO_SEG(0x00011F000000181Cull) +#define CVMX_NPI_PCI_CFG08 \ + CVMX_ADD_IO_SEG(0x00011F0000001820ull) +#define CVMX_NPI_PCI_CFG09 \ + CVMX_ADD_IO_SEG(0x00011F0000001824ull) +#define CVMX_NPI_PCI_CFG10 \ + CVMX_ADD_IO_SEG(0x00011F0000001828ull) +#define CVMX_NPI_PCI_CFG11 \ + CVMX_ADD_IO_SEG(0x00011F000000182Cull) +#define CVMX_NPI_PCI_CFG12 \ + CVMX_ADD_IO_SEG(0x00011F0000001830ull) +#define CVMX_NPI_PCI_CFG13 \ + CVMX_ADD_IO_SEG(0x00011F0000001834ull) +#define CVMX_NPI_PCI_CFG15 \ + CVMX_ADD_IO_SEG(0x00011F000000183Cull) +#define CVMX_NPI_PCI_CFG16 \ + CVMX_ADD_IO_SEG(0x00011F0000001840ull) +#define CVMX_NPI_PCI_CFG17 \ + CVMX_ADD_IO_SEG(0x00011F0000001844ull) +#define CVMX_NPI_PCI_CFG18 \ + CVMX_ADD_IO_SEG(0x00011F0000001848ull) +#define CVMX_NPI_PCI_CFG19 \ + CVMX_ADD_IO_SEG(0x00011F000000184Cull) +#define CVMX_NPI_PCI_CFG20 \ + CVMX_ADD_IO_SEG(0x00011F0000001850ull) +#define CVMX_NPI_PCI_CFG21 \ + CVMX_ADD_IO_SEG(0x00011F0000001854ull) +#define CVMX_NPI_PCI_CFG22 \ + CVMX_ADD_IO_SEG(0x00011F0000001858ull) +#define CVMX_NPI_PCI_CFG56 \ + CVMX_ADD_IO_SEG(0x00011F00000018E0ull) +#define CVMX_NPI_PCI_CFG57 \ + CVMX_ADD_IO_SEG(0x00011F00000018E4ull) +#define CVMX_NPI_PCI_CFG58 \ + CVMX_ADD_IO_SEG(0x00011F00000018E8ull) +#define CVMX_NPI_PCI_CFG59 \ + CVMX_ADD_IO_SEG(0x00011F00000018ECull) +#define CVMX_NPI_PCI_CFG60 \ + CVMX_ADD_IO_SEG(0x00011F00000018F0ull) +#define CVMX_NPI_PCI_CFG61 \ + CVMX_ADD_IO_SEG(0x00011F00000018F4ull) +#define CVMX_NPI_PCI_CFG62 \ + CVMX_ADD_IO_SEG(0x00011F00000018F8ull) +#define CVMX_NPI_PCI_CFG63 \ + CVMX_ADD_IO_SEG(0x00011F00000018FCull) +#define CVMX_NPI_PCI_CNT_REG \ + CVMX_ADD_IO_SEG(0x00011F00000011B8ull) +#define CVMX_NPI_PCI_CTL_STATUS_2 \ + CVMX_ADD_IO_SEG(0x00011F000000118Cull) +#define CVMX_NPI_PCI_INT_ARB_CFG \ + CVMX_ADD_IO_SEG(0x00011F0000000130ull) +#define CVMX_NPI_PCI_INT_ENB2 \ + CVMX_ADD_IO_SEG(0x00011F00000011A0ull) +#define CVMX_NPI_PCI_INT_SUM2 \ + CVMX_ADD_IO_SEG(0x00011F0000001198ull) +#define CVMX_NPI_PCI_READ_CMD \ + CVMX_ADD_IO_SEG(0x00011F0000000048ull) +#define CVMX_NPI_PCI_READ_CMD_6 \ + CVMX_ADD_IO_SEG(0x00011F0000001180ull) +#define CVMX_NPI_PCI_READ_CMD_C \ + CVMX_ADD_IO_SEG(0x00011F0000001184ull) +#define CVMX_NPI_PCI_READ_CMD_E \ + CVMX_ADD_IO_SEG(0x00011F0000001188ull) +#define CVMX_NPI_PCI_SCM_REG \ + CVMX_ADD_IO_SEG(0x00011F00000011A8ull) +#define CVMX_NPI_PCI_TSR_REG \ + CVMX_ADD_IO_SEG(0x00011F00000011B0ull) +#define CVMX_NPI_PORT32_INSTR_HDR \ + CVMX_ADD_IO_SEG(0x00011F00000001F8ull) +#define CVMX_NPI_PORT33_INSTR_HDR \ + CVMX_ADD_IO_SEG(0x00011F0000000200ull) +#define CVMX_NPI_PORT34_INSTR_HDR \ + CVMX_ADD_IO_SEG(0x00011F0000000208ull) +#define CVMX_NPI_PORT35_INSTR_HDR \ + CVMX_ADD_IO_SEG(0x00011F0000000210ull) +#define CVMX_NPI_PORT_BP_CONTROL \ + CVMX_ADD_IO_SEG(0x00011F00000001F0ull) +#define CVMX_NPI_PX_DBPAIR_ADDR(offset) \ + CVMX_ADD_IO_SEG(0x00011F0000000180ull + (((offset) & 3) * 8)) +#define CVMX_NPI_PX_INSTR_ADDR(offset) \ + CVMX_ADD_IO_SEG(0x00011F00000001C0ull + (((offset) & 3) * 8)) +#define CVMX_NPI_PX_INSTR_CNTS(offset) \ + CVMX_ADD_IO_SEG(0x00011F00000001A0ull + (((offset) & 3) * 8)) +#define CVMX_NPI_PX_PAIR_CNTS(offset) \ + CVMX_ADD_IO_SEG(0x00011F0000000160ull + (((offset) & 3) * 8)) +#define CVMX_NPI_RSL_INT_BLOCKS \ + CVMX_ADD_IO_SEG(0x00011F0000000000ull) +#define CVMX_NPI_SIZE_INPUT0 \ + CVMX_ADD_IO_SEG(0x00011F0000000078ull) +#define CVMX_NPI_SIZE_INPUT1 \ + CVMX_ADD_IO_SEG(0x00011F0000000088ull) +#define CVMX_NPI_SIZE_INPUT2 \ + CVMX_ADD_IO_SEG(0x00011F0000000098ull) +#define CVMX_NPI_SIZE_INPUT3 \ + CVMX_ADD_IO_SEG(0x00011F00000000A8ull) +#define CVMX_NPI_SIZE_INPUTX(offset) \ + CVMX_ADD_IO_SEG(0x00011F0000000078ull + (((offset) & 3) * 16)) +#define CVMX_NPI_WIN_READ_TO \ + CVMX_ADD_IO_SEG(0x00011F00000001E0ull) + +union cvmx_npi_base_addr_inputx { + uint64_t u64; + struct cvmx_npi_base_addr_inputx_s { + uint64_t baddr:61; + uint64_t reserved_0_2:3; + } s; + struct cvmx_npi_base_addr_inputx_s cn30xx; + struct cvmx_npi_base_addr_inputx_s cn31xx; + struct cvmx_npi_base_addr_inputx_s cn38xx; + struct cvmx_npi_base_addr_inputx_s cn38xxp2; + struct cvmx_npi_base_addr_inputx_s cn50xx; + struct cvmx_npi_base_addr_inputx_s cn58xx; + struct cvmx_npi_base_addr_inputx_s cn58xxp1; +}; + +union cvmx_npi_base_addr_outputx { + uint64_t u64; + struct cvmx_npi_base_addr_outputx_s { + uint64_t baddr:61; + uint64_t reserved_0_2:3; + } s; + struct cvmx_npi_base_addr_outputx_s cn30xx; + struct cvmx_npi_base_addr_outputx_s cn31xx; + struct cvmx_npi_base_addr_outputx_s cn38xx; + struct cvmx_npi_base_addr_outputx_s cn38xxp2; + struct cvmx_npi_base_addr_outputx_s cn50xx; + struct cvmx_npi_base_addr_outputx_s cn58xx; + struct cvmx_npi_base_addr_outputx_s cn58xxp1; +}; + +union cvmx_npi_bist_status { + uint64_t u64; + struct cvmx_npi_bist_status_s { + uint64_t reserved_20_63:44; + uint64_t csr_bs:1; + uint64_t dif_bs:1; + uint64_t rdp_bs:1; + uint64_t pcnc_bs:1; + uint64_t pcn_bs:1; + uint64_t rdn_bs:1; + uint64_t pcac_bs:1; + uint64_t pcad_bs:1; + uint64_t rdnl_bs:1; + uint64_t pgf_bs:1; + uint64_t pig_bs:1; + uint64_t pof0_bs:1; + uint64_t pof1_bs:1; + uint64_t pof2_bs:1; + uint64_t pof3_bs:1; + uint64_t pos_bs:1; + uint64_t nus_bs:1; + uint64_t dob_bs:1; + uint64_t pdf_bs:1; + uint64_t dpi_bs:1; + } s; + struct cvmx_npi_bist_status_cn30xx { + uint64_t reserved_20_63:44; + uint64_t csr_bs:1; + uint64_t dif_bs:1; + uint64_t rdp_bs:1; + uint64_t pcnc_bs:1; + uint64_t pcn_bs:1; + uint64_t rdn_bs:1; + uint64_t pcac_bs:1; + uint64_t pcad_bs:1; + uint64_t rdnl_bs:1; + uint64_t pgf_bs:1; + uint64_t pig_bs:1; + uint64_t pof0_bs:1; + uint64_t reserved_5_7:3; + uint64_t pos_bs:1; + uint64_t nus_bs:1; + uint64_t dob_bs:1; + uint64_t pdf_bs:1; + uint64_t dpi_bs:1; + } cn30xx; + struct cvmx_npi_bist_status_s cn31xx; + struct cvmx_npi_bist_status_s cn38xx; + struct cvmx_npi_bist_status_s cn38xxp2; + struct cvmx_npi_bist_status_cn50xx { + uint64_t reserved_20_63:44; + uint64_t csr_bs:1; + uint64_t dif_bs:1; + uint64_t rdp_bs:1; + uint64_t pcnc_bs:1; + uint64_t pcn_bs:1; + uint64_t rdn_bs:1; + uint64_t pcac_bs:1; + uint64_t pcad_bs:1; + uint64_t rdnl_bs:1; + uint64_t pgf_bs:1; + uint64_t pig_bs:1; + uint64_t pof0_bs:1; + uint64_t pof1_bs:1; + uint64_t reserved_5_6:2; + uint64_t pos_bs:1; + uint64_t nus_bs:1; + uint64_t dob_bs:1; + uint64_t pdf_bs:1; + uint64_t dpi_bs:1; + } cn50xx; + struct cvmx_npi_bist_status_s cn58xx; + struct cvmx_npi_bist_status_s cn58xxp1; +}; + +union cvmx_npi_buff_size_outputx { + uint64_t u64; + struct cvmx_npi_buff_size_outputx_s { + uint64_t reserved_23_63:41; + uint64_t isize:7; + uint64_t bsize:16; + } s; + struct cvmx_npi_buff_size_outputx_s cn30xx; + struct cvmx_npi_buff_size_outputx_s cn31xx; + struct cvmx_npi_buff_size_outputx_s cn38xx; + struct cvmx_npi_buff_size_outputx_s cn38xxp2; + struct cvmx_npi_buff_size_outputx_s cn50xx; + struct cvmx_npi_buff_size_outputx_s cn58xx; + struct cvmx_npi_buff_size_outputx_s cn58xxp1; +}; + +union cvmx_npi_comp_ctl { + uint64_t u64; + struct cvmx_npi_comp_ctl_s { + uint64_t reserved_10_63:54; + uint64_t pctl:5; + uint64_t nctl:5; + } s; + struct cvmx_npi_comp_ctl_s cn50xx; + struct cvmx_npi_comp_ctl_s cn58xx; + struct cvmx_npi_comp_ctl_s cn58xxp1; +}; + +union cvmx_npi_ctl_status { + uint64_t u64; + struct cvmx_npi_ctl_status_s { + uint64_t reserved_63_63:1; + uint64_t chip_rev:8; + uint64_t dis_pniw:1; + uint64_t out3_enb:1; + uint64_t out2_enb:1; + uint64_t out1_enb:1; + uint64_t out0_enb:1; + uint64_t ins3_enb:1; + uint64_t ins2_enb:1; + uint64_t ins1_enb:1; + uint64_t ins0_enb:1; + uint64_t ins3_64b:1; + uint64_t ins2_64b:1; + uint64_t ins1_64b:1; + uint64_t ins0_64b:1; + uint64_t pci_wdis:1; + uint64_t wait_com:1; + uint64_t reserved_37_39:3; + uint64_t max_word:5; + uint64_t reserved_10_31:22; + uint64_t timer:10; + } s; + struct cvmx_npi_ctl_status_cn30xx { + uint64_t reserved_63_63:1; + uint64_t chip_rev:8; + uint64_t dis_pniw:1; + uint64_t reserved_51_53:3; + uint64_t out0_enb:1; + uint64_t reserved_47_49:3; + uint64_t ins0_enb:1; + uint64_t reserved_43_45:3; + uint64_t ins0_64b:1; + uint64_t pci_wdis:1; + uint64_t wait_com:1; + uint64_t reserved_37_39:3; + uint64_t max_word:5; + uint64_t reserved_10_31:22; + uint64_t timer:10; + } cn30xx; + struct cvmx_npi_ctl_status_cn31xx { + uint64_t reserved_63_63:1; + uint64_t chip_rev:8; + uint64_t dis_pniw:1; + uint64_t reserved_52_53:2; + uint64_t out1_enb:1; + uint64_t out0_enb:1; + uint64_t reserved_48_49:2; + uint64_t ins1_enb:1; + uint64_t ins0_enb:1; + uint64_t reserved_44_45:2; + uint64_t ins1_64b:1; + uint64_t ins0_64b:1; + uint64_t pci_wdis:1; + uint64_t wait_com:1; + uint64_t reserved_37_39:3; + uint64_t max_word:5; + uint64_t reserved_10_31:22; + uint64_t timer:10; + } cn31xx; + struct cvmx_npi_ctl_status_s cn38xx; + struct cvmx_npi_ctl_status_s cn38xxp2; + struct cvmx_npi_ctl_status_cn31xx cn50xx; + struct cvmx_npi_ctl_status_s cn58xx; + struct cvmx_npi_ctl_status_s cn58xxp1; +}; + +union cvmx_npi_dbg_select { + uint64_t u64; + struct cvmx_npi_dbg_select_s { + uint64_t reserved_16_63:48; + uint64_t dbg_sel:16; + } s; + struct cvmx_npi_dbg_select_s cn30xx; + struct cvmx_npi_dbg_select_s cn31xx; + struct cvmx_npi_dbg_select_s cn38xx; + struct cvmx_npi_dbg_select_s cn38xxp2; + struct cvmx_npi_dbg_select_s cn50xx; + struct cvmx_npi_dbg_select_s cn58xx; + struct cvmx_npi_dbg_select_s cn58xxp1; +}; + +union cvmx_npi_dma_control { + uint64_t u64; + struct cvmx_npi_dma_control_s { + uint64_t reserved_36_63:28; + uint64_t b0_lend:1; + uint64_t dwb_denb:1; + uint64_t dwb_ichk:9; + uint64_t fpa_que:3; + uint64_t o_add1:1; + uint64_t o_ro:1; + uint64_t o_ns:1; + uint64_t o_es:2; + uint64_t o_mode:1; + uint64_t hp_enb:1; + uint64_t lp_enb:1; + uint64_t csize:14; + } s; + struct cvmx_npi_dma_control_s cn30xx; + struct cvmx_npi_dma_control_s cn31xx; + struct cvmx_npi_dma_control_s cn38xx; + struct cvmx_npi_dma_control_s cn38xxp2; + struct cvmx_npi_dma_control_s cn50xx; + struct cvmx_npi_dma_control_s cn58xx; + struct cvmx_npi_dma_control_s cn58xxp1; +}; + +union cvmx_npi_dma_highp_counts { + uint64_t u64; + struct cvmx_npi_dma_highp_counts_s { + uint64_t reserved_39_63:25; + uint64_t fcnt:7; + uint64_t dbell:32; + } s; + struct cvmx_npi_dma_highp_counts_s cn30xx; + struct cvmx_npi_dma_highp_counts_s cn31xx; + struct cvmx_npi_dma_highp_counts_s cn38xx; + struct cvmx_npi_dma_highp_counts_s cn38xxp2; + struct cvmx_npi_dma_highp_counts_s cn50xx; + struct cvmx_npi_dma_highp_counts_s cn58xx; + struct cvmx_npi_dma_highp_counts_s cn58xxp1; +}; + +union cvmx_npi_dma_highp_naddr { + uint64_t u64; + struct cvmx_npi_dma_highp_naddr_s { + uint64_t reserved_40_63:24; + uint64_t state:4; + uint64_t addr:36; + } s; + struct cvmx_npi_dma_highp_naddr_s cn30xx; + struct cvmx_npi_dma_highp_naddr_s cn31xx; + struct cvmx_npi_dma_highp_naddr_s cn38xx; + struct cvmx_npi_dma_highp_naddr_s cn38xxp2; + struct cvmx_npi_dma_highp_naddr_s cn50xx; + struct cvmx_npi_dma_highp_naddr_s cn58xx; + struct cvmx_npi_dma_highp_naddr_s cn58xxp1; +}; + +union cvmx_npi_dma_lowp_counts { + uint64_t u64; + struct cvmx_npi_dma_lowp_counts_s { + uint64_t reserved_39_63:25; + uint64_t fcnt:7; + uint64_t dbell:32; + } s; + struct cvmx_npi_dma_lowp_counts_s cn30xx; + struct cvmx_npi_dma_lowp_counts_s cn31xx; + struct cvmx_npi_dma_lowp_counts_s cn38xx; + struct cvmx_npi_dma_lowp_counts_s cn38xxp2; + struct cvmx_npi_dma_lowp_counts_s cn50xx; + struct cvmx_npi_dma_lowp_counts_s cn58xx; + struct cvmx_npi_dma_lowp_counts_s cn58xxp1; +}; + +union cvmx_npi_dma_lowp_naddr { + uint64_t u64; + struct cvmx_npi_dma_lowp_naddr_s { + uint64_t reserved_40_63:24; + uint64_t state:4; + uint64_t addr:36; + } s; + struct cvmx_npi_dma_lowp_naddr_s cn30xx; + struct cvmx_npi_dma_lowp_naddr_s cn31xx; + struct cvmx_npi_dma_lowp_naddr_s cn38xx; + struct cvmx_npi_dma_lowp_naddr_s cn38xxp2; + struct cvmx_npi_dma_lowp_naddr_s cn50xx; + struct cvmx_npi_dma_lowp_naddr_s cn58xx; + struct cvmx_npi_dma_lowp_naddr_s cn58xxp1; +}; + +union cvmx_npi_highp_dbell { + uint64_t u64; + struct cvmx_npi_highp_dbell_s { + uint64_t reserved_16_63:48; + uint64_t dbell:16; + } s; + struct cvmx_npi_highp_dbell_s cn30xx; + struct cvmx_npi_highp_dbell_s cn31xx; + struct cvmx_npi_highp_dbell_s cn38xx; + struct cvmx_npi_highp_dbell_s cn38xxp2; + struct cvmx_npi_highp_dbell_s cn50xx; + struct cvmx_npi_highp_dbell_s cn58xx; + struct cvmx_npi_highp_dbell_s cn58xxp1; +}; + +union cvmx_npi_highp_ibuff_saddr { + uint64_t u64; + struct cvmx_npi_highp_ibuff_saddr_s { + uint64_t reserved_36_63:28; + uint64_t saddr:36; + } s; + struct cvmx_npi_highp_ibuff_saddr_s cn30xx; + struct cvmx_npi_highp_ibuff_saddr_s cn31xx; + struct cvmx_npi_highp_ibuff_saddr_s cn38xx; + struct cvmx_npi_highp_ibuff_saddr_s cn38xxp2; + struct cvmx_npi_highp_ibuff_saddr_s cn50xx; + struct cvmx_npi_highp_ibuff_saddr_s cn58xx; + struct cvmx_npi_highp_ibuff_saddr_s cn58xxp1; +}; + +union cvmx_npi_input_control { + uint64_t u64; + struct cvmx_npi_input_control_s { + uint64_t reserved_23_63:41; + uint64_t pkt_rr:1; + uint64_t pbp_dhi:13; + uint64_t d_nsr:1; + uint64_t d_esr:2; + uint64_t d_ror:1; + uint64_t use_csr:1; + uint64_t nsr:1; + uint64_t esr:2; + uint64_t ror:1; + } s; + struct cvmx_npi_input_control_cn30xx { + uint64_t reserved_22_63:42; + uint64_t pbp_dhi:13; + uint64_t d_nsr:1; + uint64_t d_esr:2; + uint64_t d_ror:1; + uint64_t use_csr:1; + uint64_t nsr:1; + uint64_t esr:2; + uint64_t ror:1; + } cn30xx; + struct cvmx_npi_input_control_cn30xx cn31xx; + struct cvmx_npi_input_control_s cn38xx; + struct cvmx_npi_input_control_cn30xx cn38xxp2; + struct cvmx_npi_input_control_s cn50xx; + struct cvmx_npi_input_control_s cn58xx; + struct cvmx_npi_input_control_s cn58xxp1; +}; + +union cvmx_npi_int_enb { + uint64_t u64; + struct cvmx_npi_int_enb_s { + uint64_t reserved_62_63:2; + uint64_t q1_a_f:1; + uint64_t q1_s_e:1; + uint64_t pdf_p_f:1; + uint64_t pdf_p_e:1; + uint64_t pcf_p_f:1; + uint64_t pcf_p_e:1; + uint64_t rdx_s_e:1; + uint64_t rwx_s_e:1; + uint64_t pnc_a_f:1; + uint64_t pnc_s_e:1; + uint64_t com_a_f:1; + uint64_t com_s_e:1; + uint64_t q3_a_f:1; + uint64_t q3_s_e:1; + uint64_t q2_a_f:1; + uint64_t q2_s_e:1; + uint64_t pcr_a_f:1; + uint64_t pcr_s_e:1; + uint64_t fcr_a_f:1; + uint64_t fcr_s_e:1; + uint64_t iobdma:1; + uint64_t p_dperr:1; + uint64_t win_rto:1; + uint64_t i3_pperr:1; + uint64_t i2_pperr:1; + uint64_t i1_pperr:1; + uint64_t i0_pperr:1; + uint64_t p3_ptout:1; + uint64_t p2_ptout:1; + uint64_t p1_ptout:1; + uint64_t p0_ptout:1; + uint64_t p3_pperr:1; + uint64_t p2_pperr:1; + uint64_t p1_pperr:1; + uint64_t p0_pperr:1; + uint64_t g3_rtout:1; + uint64_t g2_rtout:1; + uint64_t g1_rtout:1; + uint64_t g0_rtout:1; + uint64_t p3_perr:1; + uint64_t p2_perr:1; + uint64_t p1_perr:1; + uint64_t p0_perr:1; + uint64_t p3_rtout:1; + uint64_t p2_rtout:1; + uint64_t p1_rtout:1; + uint64_t p0_rtout:1; + uint64_t i3_overf:1; + uint64_t i2_overf:1; + uint64_t i1_overf:1; + uint64_t i0_overf:1; + uint64_t i3_rtout:1; + uint64_t i2_rtout:1; + uint64_t i1_rtout:1; + uint64_t i0_rtout:1; + uint64_t po3_2sml:1; + uint64_t po2_2sml:1; + uint64_t po1_2sml:1; + uint64_t po0_2sml:1; + uint64_t pci_rsl:1; + uint64_t rml_wto:1; + uint64_t rml_rto:1; + } s; + struct cvmx_npi_int_enb_cn30xx { + uint64_t reserved_62_63:2; + uint64_t q1_a_f:1; + uint64_t q1_s_e:1; + uint64_t pdf_p_f:1; + uint64_t pdf_p_e:1; + uint64_t pcf_p_f:1; + uint64_t pcf_p_e:1; + uint64_t rdx_s_e:1; + uint64_t rwx_s_e:1; + uint64_t pnc_a_f:1; + uint64_t pnc_s_e:1; + uint64_t com_a_f:1; + uint64_t com_s_e:1; + uint64_t q3_a_f:1; + uint64_t q3_s_e:1; + uint64_t q2_a_f:1; + uint64_t q2_s_e:1; + uint64_t pcr_a_f:1; + uint64_t pcr_s_e:1; + uint64_t fcr_a_f:1; + uint64_t fcr_s_e:1; + uint64_t iobdma:1; + uint64_t p_dperr:1; + uint64_t win_rto:1; + uint64_t reserved_36_38:3; + uint64_t i0_pperr:1; + uint64_t reserved_32_34:3; + uint64_t p0_ptout:1; + uint64_t reserved_28_30:3; + uint64_t p0_pperr:1; + uint64_t reserved_24_26:3; + uint64_t g0_rtout:1; + uint64_t reserved_20_22:3; + uint64_t p0_perr:1; + uint64_t reserved_16_18:3; + uint64_t p0_rtout:1; + uint64_t reserved_12_14:3; + uint64_t i0_overf:1; + uint64_t reserved_8_10:3; + uint64_t i0_rtout:1; + uint64_t reserved_4_6:3; + uint64_t po0_2sml:1; + uint64_t pci_rsl:1; + uint64_t rml_wto:1; + uint64_t rml_rto:1; + } cn30xx; + struct cvmx_npi_int_enb_cn31xx { + uint64_t reserved_62_63:2; + uint64_t q1_a_f:1; + uint64_t q1_s_e:1; + uint64_t pdf_p_f:1; + uint64_t pdf_p_e:1; + uint64_t pcf_p_f:1; + uint64_t pcf_p_e:1; + uint64_t rdx_s_e:1; + uint64_t rwx_s_e:1; + uint64_t pnc_a_f:1; + uint64_t pnc_s_e:1; + uint64_t com_a_f:1; + uint64_t com_s_e:1; + uint64_t q3_a_f:1; + uint64_t q3_s_e:1; + uint64_t q2_a_f:1; + uint64_t q2_s_e:1; + uint64_t pcr_a_f:1; + uint64_t pcr_s_e:1; + uint64_t fcr_a_f:1; + uint64_t fcr_s_e:1; + uint64_t iobdma:1; + uint64_t p_dperr:1; + uint64_t win_rto:1; + uint64_t reserved_37_38:2; + uint64_t i1_pperr:1; + uint64_t i0_pperr:1; + uint64_t reserved_33_34:2; + uint64_t p1_ptout:1; + uint64_t p0_ptout:1; + uint64_t reserved_29_30:2; + uint64_t p1_pperr:1; + uint64_t p0_pperr:1; + uint64_t reserved_25_26:2; + uint64_t g1_rtout:1; + uint64_t g0_rtout:1; + uint64_t reserved_21_22:2; + uint64_t p1_perr:1; + uint64_t p0_perr:1; + uint64_t reserved_17_18:2; + uint64_t p1_rtout:1; + uint64_t p0_rtout:1; + uint64_t reserved_13_14:2; + uint64_t i1_overf:1; + uint64_t i0_overf:1; + uint64_t reserved_9_10:2; + uint64_t i1_rtout:1; + uint64_t i0_rtout:1; + uint64_t reserved_5_6:2; + uint64_t po1_2sml:1; + uint64_t po0_2sml:1; + uint64_t pci_rsl:1; + uint64_t rml_wto:1; + uint64_t rml_rto:1; + } cn31xx; + struct cvmx_npi_int_enb_s cn38xx; + struct cvmx_npi_int_enb_cn38xxp2 { + uint64_t reserved_42_63:22; + uint64_t iobdma:1; + uint64_t p_dperr:1; + uint64_t win_rto:1; + uint64_t i3_pperr:1; + uint64_t i2_pperr:1; + uint64_t i1_pperr:1; + uint64_t i0_pperr:1; + uint64_t p3_ptout:1; + uint64_t p2_ptout:1; + uint64_t p1_ptout:1; + uint64_t p0_ptout:1; + uint64_t p3_pperr:1; + uint64_t p2_pperr:1; + uint64_t p1_pperr:1; + uint64_t p0_pperr:1; + uint64_t g3_rtout:1; + uint64_t g2_rtout:1; + uint64_t g1_rtout:1; + uint64_t g0_rtout:1; + uint64_t p3_perr:1; + uint64_t p2_perr:1; + uint64_t p1_perr:1; + uint64_t p0_perr:1; + uint64_t p3_rtout:1; + uint64_t p2_rtout:1; + uint64_t p1_rtout:1; + uint64_t p0_rtout:1; + uint64_t i3_overf:1; + uint64_t i2_overf:1; + uint64_t i1_overf:1; + uint64_t i0_overf:1; + uint64_t i3_rtout:1; + uint64_t i2_rtout:1; + uint64_t i1_rtout:1; + uint64_t i0_rtout:1; + uint64_t po3_2sml:1; + uint64_t po2_2sml:1; + uint64_t po1_2sml:1; + uint64_t po0_2sml:1; + uint64_t pci_rsl:1; + uint64_t rml_wto:1; + uint64_t rml_rto:1; + } cn38xxp2; + struct cvmx_npi_int_enb_cn31xx cn50xx; + struct cvmx_npi_int_enb_s cn58xx; + struct cvmx_npi_int_enb_s cn58xxp1; +}; + +union cvmx_npi_int_sum { + uint64_t u64; + struct cvmx_npi_int_sum_s { + uint64_t reserved_62_63:2; + uint64_t q1_a_f:1; + uint64_t q1_s_e:1; + uint64_t pdf_p_f:1; + uint64_t pdf_p_e:1; + uint64_t pcf_p_f:1; + uint64_t pcf_p_e:1; + uint64_t rdx_s_e:1; + uint64_t rwx_s_e:1; + uint64_t pnc_a_f:1; + uint64_t pnc_s_e:1; + uint64_t com_a_f:1; + uint64_t com_s_e:1; + uint64_t q3_a_f:1; + uint64_t q3_s_e:1; + uint64_t q2_a_f:1; + uint64_t q2_s_e:1; + uint64_t pcr_a_f:1; + uint64_t pcr_s_e:1; + uint64_t fcr_a_f:1; + uint64_t fcr_s_e:1; + uint64_t iobdma:1; + uint64_t p_dperr:1; + uint64_t win_rto:1; + uint64_t i3_pperr:1; + uint64_t i2_pperr:1; + uint64_t i1_pperr:1; + uint64_t i0_pperr:1; + uint64_t p3_ptout:1; + uint64_t p2_ptout:1; + uint64_t p1_ptout:1; + uint64_t p0_ptout:1; + uint64_t p3_pperr:1; + uint64_t p2_pperr:1; + uint64_t p1_pperr:1; + uint64_t p0_pperr:1; + uint64_t g3_rtout:1; + uint64_t g2_rtout:1; + uint64_t g1_rtout:1; + uint64_t g0_rtout:1; + uint64_t p3_perr:1; + uint64_t p2_perr:1; + uint64_t p1_perr:1; + uint64_t p0_perr:1; + uint64_t p3_rtout:1; + uint64_t p2_rtout:1; + uint64_t p1_rtout:1; + uint64_t p0_rtout:1; + uint64_t i3_overf:1; + uint64_t i2_overf:1; + uint64_t i1_overf:1; + uint64_t i0_overf:1; + uint64_t i3_rtout:1; + uint64_t i2_rtout:1; + uint64_t i1_rtout:1; + uint64_t i0_rtout:1; + uint64_t po3_2sml:1; + uint64_t po2_2sml:1; + uint64_t po1_2sml:1; + uint64_t po0_2sml:1; + uint64_t pci_rsl:1; + uint64_t rml_wto:1; + uint64_t rml_rto:1; + } s; + struct cvmx_npi_int_sum_cn30xx { + uint64_t reserved_62_63:2; + uint64_t q1_a_f:1; + uint64_t q1_s_e:1; + uint64_t pdf_p_f:1; + uint64_t pdf_p_e:1; + uint64_t pcf_p_f:1; + uint64_t pcf_p_e:1; + uint64_t rdx_s_e:1; + uint64_t rwx_s_e:1; + uint64_t pnc_a_f:1; + uint64_t pnc_s_e:1; + uint64_t com_a_f:1; + uint64_t com_s_e:1; + uint64_t q3_a_f:1; + uint64_t q3_s_e:1; + uint64_t q2_a_f:1; + uint64_t q2_s_e:1; + uint64_t pcr_a_f:1; + uint64_t pcr_s_e:1; + uint64_t fcr_a_f:1; + uint64_t fcr_s_e:1; + uint64_t iobdma:1; + uint64_t p_dperr:1; + uint64_t win_rto:1; + uint64_t reserved_36_38:3; + uint64_t i0_pperr:1; + uint64_t reserved_32_34:3; + uint64_t p0_ptout:1; + uint64_t reserved_28_30:3; + uint64_t p0_pperr:1; + uint64_t reserved_24_26:3; + uint64_t g0_rtout:1; + uint64_t reserved_20_22:3; + uint64_t p0_perr:1; + uint64_t reserved_16_18:3; + uint64_t p0_rtout:1; + uint64_t reserved_12_14:3; + uint64_t i0_overf:1; + uint64_t reserved_8_10:3; + uint64_t i0_rtout:1; + uint64_t reserved_4_6:3; + uint64_t po0_2sml:1; + uint64_t pci_rsl:1; + uint64_t rml_wto:1; + uint64_t rml_rto:1; + } cn30xx; + struct cvmx_npi_int_sum_cn31xx { + uint64_t reserved_62_63:2; + uint64_t q1_a_f:1; + uint64_t q1_s_e:1; + uint64_t pdf_p_f:1; + uint64_t pdf_p_e:1; + uint64_t pcf_p_f:1; + uint64_t pcf_p_e:1; + uint64_t rdx_s_e:1; + uint64_t rwx_s_e:1; + uint64_t pnc_a_f:1; + uint64_t pnc_s_e:1; + uint64_t com_a_f:1; + uint64_t com_s_e:1; + uint64_t q3_a_f:1; + uint64_t q3_s_e:1; + uint64_t q2_a_f:1; + uint64_t q2_s_e:1; + uint64_t pcr_a_f:1; + uint64_t pcr_s_e:1; + uint64_t fcr_a_f:1; + uint64_t fcr_s_e:1; + uint64_t iobdma:1; + uint64_t p_dperr:1; + uint64_t win_rto:1; + uint64_t reserved_37_38:2; + uint64_t i1_pperr:1; + uint64_t i0_pperr:1; + uint64_t reserved_33_34:2; + uint64_t p1_ptout:1; + uint64_t p0_ptout:1; + uint64_t reserved_29_30:2; + uint64_t p1_pperr:1; + uint64_t p0_pperr:1; + uint64_t reserved_25_26:2; + uint64_t g1_rtout:1; + uint64_t g0_rtout:1; + uint64_t reserved_21_22:2; + uint64_t p1_perr:1; + uint64_t p0_perr:1; + uint64_t reserved_17_18:2; + uint64_t p1_rtout:1; + uint64_t p0_rtout:1; + uint64_t reserved_13_14:2; + uint64_t i1_overf:1; + uint64_t i0_overf:1; + uint64_t reserved_9_10:2; + uint64_t i1_rtout:1; + uint64_t i0_rtout:1; + uint64_t reserved_5_6:2; + uint64_t po1_2sml:1; + uint64_t po0_2sml:1; + uint64_t pci_rsl:1; + uint64_t rml_wto:1; + uint64_t rml_rto:1; + } cn31xx; + struct cvmx_npi_int_sum_s cn38xx; + struct cvmx_npi_int_sum_cn38xxp2 { + uint64_t reserved_42_63:22; + uint64_t iobdma:1; + uint64_t p_dperr:1; + uint64_t win_rto:1; + uint64_t i3_pperr:1; + uint64_t i2_pperr:1; + uint64_t i1_pperr:1; + uint64_t i0_pperr:1; + uint64_t p3_ptout:1; + uint64_t p2_ptout:1; + uint64_t p1_ptout:1; + uint64_t p0_ptout:1; + uint64_t p3_pperr:1; + uint64_t p2_pperr:1; + uint64_t p1_pperr:1; + uint64_t p0_pperr:1; + uint64_t g3_rtout:1; + uint64_t g2_rtout:1; + uint64_t g1_rtout:1; + uint64_t g0_rtout:1; + uint64_t p3_perr:1; + uint64_t p2_perr:1; + uint64_t p1_perr:1; + uint64_t p0_perr:1; + uint64_t p3_rtout:1; + uint64_t p2_rtout:1; + uint64_t p1_rtout:1; + uint64_t p0_rtout:1; + uint64_t i3_overf:1; + uint64_t i2_overf:1; + uint64_t i1_overf:1; + uint64_t i0_overf:1; + uint64_t i3_rtout:1; + uint64_t i2_rtout:1; + uint64_t i1_rtout:1; + uint64_t i0_rtout:1; + uint64_t po3_2sml:1; + uint64_t po2_2sml:1; + uint64_t po1_2sml:1; + uint64_t po0_2sml:1; + uint64_t pci_rsl:1; + uint64_t rml_wto:1; + uint64_t rml_rto:1; + } cn38xxp2; + struct cvmx_npi_int_sum_cn31xx cn50xx; + struct cvmx_npi_int_sum_s cn58xx; + struct cvmx_npi_int_sum_s cn58xxp1; +}; + +union cvmx_npi_lowp_dbell { + uint64_t u64; + struct cvmx_npi_lowp_dbell_s { + uint64_t reserved_16_63:48; + uint64_t dbell:16; + } s; + struct cvmx_npi_lowp_dbell_s cn30xx; + struct cvmx_npi_lowp_dbell_s cn31xx; + struct cvmx_npi_lowp_dbell_s cn38xx; + struct cvmx_npi_lowp_dbell_s cn38xxp2; + struct cvmx_npi_lowp_dbell_s cn50xx; + struct cvmx_npi_lowp_dbell_s cn58xx; + struct cvmx_npi_lowp_dbell_s cn58xxp1; +}; + +union cvmx_npi_lowp_ibuff_saddr { + uint64_t u64; + struct cvmx_npi_lowp_ibuff_saddr_s { + uint64_t reserved_36_63:28; + uint64_t saddr:36; + } s; + struct cvmx_npi_lowp_ibuff_saddr_s cn30xx; + struct cvmx_npi_lowp_ibuff_saddr_s cn31xx; + struct cvmx_npi_lowp_ibuff_saddr_s cn38xx; + struct cvmx_npi_lowp_ibuff_saddr_s cn38xxp2; + struct cvmx_npi_lowp_ibuff_saddr_s cn50xx; + struct cvmx_npi_lowp_ibuff_saddr_s cn58xx; + struct cvmx_npi_lowp_ibuff_saddr_s cn58xxp1; +}; + +union cvmx_npi_mem_access_subidx { + uint64_t u64; + struct cvmx_npi_mem_access_subidx_s { + uint64_t reserved_38_63:26; + uint64_t shortl:1; + uint64_t nmerge:1; + uint64_t esr:2; + uint64_t esw:2; + uint64_t nsr:1; + uint64_t nsw:1; + uint64_t ror:1; + uint64_t row:1; + uint64_t ba:28; + } s; + struct cvmx_npi_mem_access_subidx_s cn30xx; + struct cvmx_npi_mem_access_subidx_cn31xx { + uint64_t reserved_36_63:28; + uint64_t esr:2; + uint64_t esw:2; + uint64_t nsr:1; + uint64_t nsw:1; + uint64_t ror:1; + uint64_t row:1; + uint64_t ba:28; + } cn31xx; + struct cvmx_npi_mem_access_subidx_s cn38xx; + struct cvmx_npi_mem_access_subidx_cn31xx cn38xxp2; + struct cvmx_npi_mem_access_subidx_s cn50xx; + struct cvmx_npi_mem_access_subidx_s cn58xx; + struct cvmx_npi_mem_access_subidx_s cn58xxp1; +}; + +union cvmx_npi_msi_rcv { + uint64_t u64; + struct cvmx_npi_msi_rcv_s { + uint64_t int_vec:64; + } s; + struct cvmx_npi_msi_rcv_s cn30xx; + struct cvmx_npi_msi_rcv_s cn31xx; + struct cvmx_npi_msi_rcv_s cn38xx; + struct cvmx_npi_msi_rcv_s cn38xxp2; + struct cvmx_npi_msi_rcv_s cn50xx; + struct cvmx_npi_msi_rcv_s cn58xx; + struct cvmx_npi_msi_rcv_s cn58xxp1; +}; + +union cvmx_npi_num_desc_outputx { + uint64_t u64; + struct cvmx_npi_num_desc_outputx_s { + uint64_t reserved_32_63:32; + uint64_t size:32; + } s; + struct cvmx_npi_num_desc_outputx_s cn30xx; + struct cvmx_npi_num_desc_outputx_s cn31xx; + struct cvmx_npi_num_desc_outputx_s cn38xx; + struct cvmx_npi_num_desc_outputx_s cn38xxp2; + struct cvmx_npi_num_desc_outputx_s cn50xx; + struct cvmx_npi_num_desc_outputx_s cn58xx; + struct cvmx_npi_num_desc_outputx_s cn58xxp1; +}; + +union cvmx_npi_output_control { + uint64_t u64; + struct cvmx_npi_output_control_s { + uint64_t reserved_49_63:15; + uint64_t pkt_rr:1; + uint64_t p3_bmode:1; + uint64_t p2_bmode:1; + uint64_t p1_bmode:1; + uint64_t p0_bmode:1; + uint64_t o3_es:2; + uint64_t o3_ns:1; + uint64_t o3_ro:1; + uint64_t o2_es:2; + uint64_t o2_ns:1; + uint64_t o2_ro:1; + uint64_t o1_es:2; + uint64_t o1_ns:1; + uint64_t o1_ro:1; + uint64_t o0_es:2; + uint64_t o0_ns:1; + uint64_t o0_ro:1; + uint64_t o3_csrm:1; + uint64_t o2_csrm:1; + uint64_t o1_csrm:1; + uint64_t o0_csrm:1; + uint64_t reserved_20_23:4; + uint64_t iptr_o3:1; + uint64_t iptr_o2:1; + uint64_t iptr_o1:1; + uint64_t iptr_o0:1; + uint64_t esr_sl3:2; + uint64_t nsr_sl3:1; + uint64_t ror_sl3:1; + uint64_t esr_sl2:2; + uint64_t nsr_sl2:1; + uint64_t ror_sl2:1; + uint64_t esr_sl1:2; + uint64_t nsr_sl1:1; + uint64_t ror_sl1:1; + uint64_t esr_sl0:2; + uint64_t nsr_sl0:1; + uint64_t ror_sl0:1; + } s; + struct cvmx_npi_output_control_cn30xx { + uint64_t reserved_45_63:19; + uint64_t p0_bmode:1; + uint64_t reserved_32_43:12; + uint64_t o0_es:2; + uint64_t o0_ns:1; + uint64_t o0_ro:1; + uint64_t reserved_25_27:3; + uint64_t o0_csrm:1; + uint64_t reserved_17_23:7; + uint64_t iptr_o0:1; + uint64_t reserved_4_15:12; + uint64_t esr_sl0:2; + uint64_t nsr_sl0:1; + uint64_t ror_sl0:1; + } cn30xx; + struct cvmx_npi_output_control_cn31xx { + uint64_t reserved_46_63:18; + uint64_t p1_bmode:1; + uint64_t p0_bmode:1; + uint64_t reserved_36_43:8; + uint64_t o1_es:2; + uint64_t o1_ns:1; + uint64_t o1_ro:1; + uint64_t o0_es:2; + uint64_t o0_ns:1; + uint64_t o0_ro:1; + uint64_t reserved_26_27:2; + uint64_t o1_csrm:1; + uint64_t o0_csrm:1; + uint64_t reserved_18_23:6; + uint64_t iptr_o1:1; + uint64_t iptr_o0:1; + uint64_t reserved_8_15:8; + uint64_t esr_sl1:2; + uint64_t nsr_sl1:1; + uint64_t ror_sl1:1; + uint64_t esr_sl0:2; + uint64_t nsr_sl0:1; + uint64_t ror_sl0:1; + } cn31xx; + struct cvmx_npi_output_control_s cn38xx; + struct cvmx_npi_output_control_cn38xxp2 { + uint64_t reserved_48_63:16; + uint64_t p3_bmode:1; + uint64_t p2_bmode:1; + uint64_t p1_bmode:1; + uint64_t p0_bmode:1; + uint64_t o3_es:2; + uint64_t o3_ns:1; + uint64_t o3_ro:1; + uint64_t o2_es:2; + uint64_t o2_ns:1; + uint64_t o2_ro:1; + uint64_t o1_es:2; + uint64_t o1_ns:1; + uint64_t o1_ro:1; + uint64_t o0_es:2; + uint64_t o0_ns:1; + uint64_t o0_ro:1; + uint64_t o3_csrm:1; + uint64_t o2_csrm:1; + uint64_t o1_csrm:1; + uint64_t o0_csrm:1; + uint64_t reserved_20_23:4; + uint64_t iptr_o3:1; + uint64_t iptr_o2:1; + uint64_t iptr_o1:1; + uint64_t iptr_o0:1; + uint64_t esr_sl3:2; + uint64_t nsr_sl3:1; + uint64_t ror_sl3:1; + uint64_t esr_sl2:2; + uint64_t nsr_sl2:1; + uint64_t ror_sl2:1; + uint64_t esr_sl1:2; + uint64_t nsr_sl1:1; + uint64_t ror_sl1:1; + uint64_t esr_sl0:2; + uint64_t nsr_sl0:1; + uint64_t ror_sl0:1; + } cn38xxp2; + struct cvmx_npi_output_control_cn50xx { + uint64_t reserved_49_63:15; + uint64_t pkt_rr:1; + uint64_t reserved_46_47:2; + uint64_t p1_bmode:1; + uint64_t p0_bmode:1; + uint64_t reserved_36_43:8; + uint64_t o1_es:2; + uint64_t o1_ns:1; + uint64_t o1_ro:1; + uint64_t o0_es:2; + uint64_t o0_ns:1; + uint64_t o0_ro:1; + uint64_t reserved_26_27:2; + uint64_t o1_csrm:1; + uint64_t o0_csrm:1; + uint64_t reserved_18_23:6; + uint64_t iptr_o1:1; + uint64_t iptr_o0:1; + uint64_t reserved_8_15:8; + uint64_t esr_sl1:2; + uint64_t nsr_sl1:1; + uint64_t ror_sl1:1; + uint64_t esr_sl0:2; + uint64_t nsr_sl0:1; + uint64_t ror_sl0:1; + } cn50xx; + struct cvmx_npi_output_control_s cn58xx; + struct cvmx_npi_output_control_s cn58xxp1; +}; + +union cvmx_npi_px_dbpair_addr { + uint64_t u64; + struct cvmx_npi_px_dbpair_addr_s { + uint64_t reserved_63_63:1; + uint64_t state:2; + uint64_t naddr:61; + } s; + struct cvmx_npi_px_dbpair_addr_s cn30xx; + struct cvmx_npi_px_dbpair_addr_s cn31xx; + struct cvmx_npi_px_dbpair_addr_s cn38xx; + struct cvmx_npi_px_dbpair_addr_s cn38xxp2; + struct cvmx_npi_px_dbpair_addr_s cn50xx; + struct cvmx_npi_px_dbpair_addr_s cn58xx; + struct cvmx_npi_px_dbpair_addr_s cn58xxp1; +}; + +union cvmx_npi_px_instr_addr { + uint64_t u64; + struct cvmx_npi_px_instr_addr_s { + uint64_t state:3; + uint64_t naddr:61; + } s; + struct cvmx_npi_px_instr_addr_s cn30xx; + struct cvmx_npi_px_instr_addr_s cn31xx; + struct cvmx_npi_px_instr_addr_s cn38xx; + struct cvmx_npi_px_instr_addr_s cn38xxp2; + struct cvmx_npi_px_instr_addr_s cn50xx; + struct cvmx_npi_px_instr_addr_s cn58xx; + struct cvmx_npi_px_instr_addr_s cn58xxp1; +}; + +union cvmx_npi_px_instr_cnts { + uint64_t u64; + struct cvmx_npi_px_instr_cnts_s { + uint64_t reserved_38_63:26; + uint64_t fcnt:6; + uint64_t avail:32; + } s; + struct cvmx_npi_px_instr_cnts_s cn30xx; + struct cvmx_npi_px_instr_cnts_s cn31xx; + struct cvmx_npi_px_instr_cnts_s cn38xx; + struct cvmx_npi_px_instr_cnts_s cn38xxp2; + struct cvmx_npi_px_instr_cnts_s cn50xx; + struct cvmx_npi_px_instr_cnts_s cn58xx; + struct cvmx_npi_px_instr_cnts_s cn58xxp1; +}; + +union cvmx_npi_px_pair_cnts { + uint64_t u64; + struct cvmx_npi_px_pair_cnts_s { + uint64_t reserved_37_63:27; + uint64_t fcnt:5; + uint64_t avail:32; + } s; + struct cvmx_npi_px_pair_cnts_s cn30xx; + struct cvmx_npi_px_pair_cnts_s cn31xx; + struct cvmx_npi_px_pair_cnts_s cn38xx; + struct cvmx_npi_px_pair_cnts_s cn38xxp2; + struct cvmx_npi_px_pair_cnts_s cn50xx; + struct cvmx_npi_px_pair_cnts_s cn58xx; + struct cvmx_npi_px_pair_cnts_s cn58xxp1; +}; + +union cvmx_npi_pci_burst_size { + uint64_t u64; + struct cvmx_npi_pci_burst_size_s { + uint64_t reserved_14_63:50; + uint64_t wr_brst:7; + uint64_t rd_brst:7; + } s; + struct cvmx_npi_pci_burst_size_s cn30xx; + struct cvmx_npi_pci_burst_size_s cn31xx; + struct cvmx_npi_pci_burst_size_s cn38xx; + struct cvmx_npi_pci_burst_size_s cn38xxp2; + struct cvmx_npi_pci_burst_size_s cn50xx; + struct cvmx_npi_pci_burst_size_s cn58xx; + struct cvmx_npi_pci_burst_size_s cn58xxp1; +}; + +union cvmx_npi_pci_int_arb_cfg { + uint64_t u64; + struct cvmx_npi_pci_int_arb_cfg_s { + uint64_t reserved_13_63:51; + uint64_t hostmode:1; + uint64_t pci_ovr:4; + uint64_t reserved_5_7:3; + uint64_t en:1; + uint64_t park_mod:1; + uint64_t park_dev:3; + } s; + struct cvmx_npi_pci_int_arb_cfg_cn30xx { + uint64_t reserved_5_63:59; + uint64_t en:1; + uint64_t park_mod:1; + uint64_t park_dev:3; + } cn30xx; + struct cvmx_npi_pci_int_arb_cfg_cn30xx cn31xx; + struct cvmx_npi_pci_int_arb_cfg_cn30xx cn38xx; + struct cvmx_npi_pci_int_arb_cfg_cn30xx cn38xxp2; + struct cvmx_npi_pci_int_arb_cfg_s cn50xx; + struct cvmx_npi_pci_int_arb_cfg_s cn58xx; + struct cvmx_npi_pci_int_arb_cfg_s cn58xxp1; +}; + +union cvmx_npi_pci_read_cmd { + uint64_t u64; + struct cvmx_npi_pci_read_cmd_s { + uint64_t reserved_11_63:53; + uint64_t cmd_size:11; + } s; + struct cvmx_npi_pci_read_cmd_s cn30xx; + struct cvmx_npi_pci_read_cmd_s cn31xx; + struct cvmx_npi_pci_read_cmd_s cn38xx; + struct cvmx_npi_pci_read_cmd_s cn38xxp2; + struct cvmx_npi_pci_read_cmd_s cn50xx; + struct cvmx_npi_pci_read_cmd_s cn58xx; + struct cvmx_npi_pci_read_cmd_s cn58xxp1; +}; + +union cvmx_npi_port32_instr_hdr { + uint64_t u64; + struct cvmx_npi_port32_instr_hdr_s { + uint64_t reserved_44_63:20; + uint64_t pbp:1; + uint64_t rsv_f:5; + uint64_t rparmode:2; + uint64_t rsv_e:1; + uint64_t rskp_len:7; + uint64_t rsv_d:6; + uint64_t use_ihdr:1; + uint64_t rsv_c:5; + uint64_t par_mode:2; + uint64_t rsv_b:1; + uint64_t skp_len:7; + uint64_t rsv_a:6; + } s; + struct cvmx_npi_port32_instr_hdr_s cn30xx; + struct cvmx_npi_port32_instr_hdr_s cn31xx; + struct cvmx_npi_port32_instr_hdr_s cn38xx; + struct cvmx_npi_port32_instr_hdr_s cn38xxp2; + struct cvmx_npi_port32_instr_hdr_s cn50xx; + struct cvmx_npi_port32_instr_hdr_s cn58xx; + struct cvmx_npi_port32_instr_hdr_s cn58xxp1; +}; + +union cvmx_npi_port33_instr_hdr { + uint64_t u64; + struct cvmx_npi_port33_instr_hdr_s { + uint64_t reserved_44_63:20; + uint64_t pbp:1; + uint64_t rsv_f:5; + uint64_t rparmode:2; + uint64_t rsv_e:1; + uint64_t rskp_len:7; + uint64_t rsv_d:6; + uint64_t use_ihdr:1; + uint64_t rsv_c:5; + uint64_t par_mode:2; + uint64_t rsv_b:1; + uint64_t skp_len:7; + uint64_t rsv_a:6; + } s; + struct cvmx_npi_port33_instr_hdr_s cn31xx; + struct cvmx_npi_port33_instr_hdr_s cn38xx; + struct cvmx_npi_port33_instr_hdr_s cn38xxp2; + struct cvmx_npi_port33_instr_hdr_s cn50xx; + struct cvmx_npi_port33_instr_hdr_s cn58xx; + struct cvmx_npi_port33_instr_hdr_s cn58xxp1; +}; + +union cvmx_npi_port34_instr_hdr { + uint64_t u64; + struct cvmx_npi_port34_instr_hdr_s { + uint64_t reserved_44_63:20; + uint64_t pbp:1; + uint64_t rsv_f:5; + uint64_t rparmode:2; + uint64_t rsv_e:1; + uint64_t rskp_len:7; + uint64_t rsv_d:6; + uint64_t use_ihdr:1; + uint64_t rsv_c:5; + uint64_t par_mode:2; + uint64_t rsv_b:1; + uint64_t skp_len:7; + uint64_t rsv_a:6; + } s; + struct cvmx_npi_port34_instr_hdr_s cn38xx; + struct cvmx_npi_port34_instr_hdr_s cn38xxp2; + struct cvmx_npi_port34_instr_hdr_s cn58xx; + struct cvmx_npi_port34_instr_hdr_s cn58xxp1; +}; + +union cvmx_npi_port35_instr_hdr { + uint64_t u64; + struct cvmx_npi_port35_instr_hdr_s { + uint64_t reserved_44_63:20; + uint64_t pbp:1; + uint64_t rsv_f:5; + uint64_t rparmode:2; + uint64_t rsv_e:1; + uint64_t rskp_len:7; + uint64_t rsv_d:6; + uint64_t use_ihdr:1; + uint64_t rsv_c:5; + uint64_t par_mode:2; + uint64_t rsv_b:1; + uint64_t skp_len:7; + uint64_t rsv_a:6; + } s; + struct cvmx_npi_port35_instr_hdr_s cn38xx; + struct cvmx_npi_port35_instr_hdr_s cn38xxp2; + struct cvmx_npi_port35_instr_hdr_s cn58xx; + struct cvmx_npi_port35_instr_hdr_s cn58xxp1; +}; + +union cvmx_npi_port_bp_control { + uint64_t u64; + struct cvmx_npi_port_bp_control_s { + uint64_t reserved_8_63:56; + uint64_t bp_on:4; + uint64_t enb:4; + } s; + struct cvmx_npi_port_bp_control_s cn30xx; + struct cvmx_npi_port_bp_control_s cn31xx; + struct cvmx_npi_port_bp_control_s cn38xx; + struct cvmx_npi_port_bp_control_s cn38xxp2; + struct cvmx_npi_port_bp_control_s cn50xx; + struct cvmx_npi_port_bp_control_s cn58xx; + struct cvmx_npi_port_bp_control_s cn58xxp1; +}; + +union cvmx_npi_rsl_int_blocks { + uint64_t u64; + struct cvmx_npi_rsl_int_blocks_s { + uint64_t reserved_32_63:32; + uint64_t rint_31:1; + uint64_t iob:1; + uint64_t reserved_28_29:2; + uint64_t rint_27:1; + uint64_t rint_26:1; + uint64_t rint_25:1; + uint64_t rint_24:1; + uint64_t asx1:1; + uint64_t asx0:1; + uint64_t rint_21:1; + uint64_t pip:1; + uint64_t spx1:1; + uint64_t spx0:1; + uint64_t lmc:1; + uint64_t l2c:1; + uint64_t rint_15:1; + uint64_t reserved_13_14:2; + uint64_t pow:1; + uint64_t tim:1; + uint64_t pko:1; + uint64_t ipd:1; + uint64_t rint_8:1; + uint64_t zip:1; + uint64_t dfa:1; + uint64_t fpa:1; + uint64_t key:1; + uint64_t npi:1; + uint64_t gmx1:1; + uint64_t gmx0:1; + uint64_t mio:1; + } s; + struct cvmx_npi_rsl_int_blocks_cn30xx { + uint64_t reserved_32_63:32; + uint64_t rint_31:1; + uint64_t iob:1; + uint64_t rint_29:1; + uint64_t rint_28:1; + uint64_t rint_27:1; + uint64_t rint_26:1; + uint64_t rint_25:1; + uint64_t rint_24:1; + uint64_t asx1:1; + uint64_t asx0:1; + uint64_t rint_21:1; + uint64_t pip:1; + uint64_t spx1:1; + uint64_t spx0:1; + uint64_t lmc:1; + uint64_t l2c:1; + uint64_t rint_15:1; + uint64_t rint_14:1; + uint64_t usb:1; + uint64_t pow:1; + uint64_t tim:1; + uint64_t pko:1; + uint64_t ipd:1; + uint64_t rint_8:1; + uint64_t zip:1; + uint64_t dfa:1; + uint64_t fpa:1; + uint64_t key:1; + uint64_t npi:1; + uint64_t gmx1:1; + uint64_t gmx0:1; + uint64_t mio:1; + } cn30xx; + struct cvmx_npi_rsl_int_blocks_cn30xx cn31xx; + struct cvmx_npi_rsl_int_blocks_cn38xx { + uint64_t reserved_32_63:32; + uint64_t rint_31:1; + uint64_t iob:1; + uint64_t rint_29:1; + uint64_t rint_28:1; + uint64_t rint_27:1; + uint64_t rint_26:1; + uint64_t rint_25:1; + uint64_t rint_24:1; + uint64_t asx1:1; + uint64_t asx0:1; + uint64_t rint_21:1; + uint64_t pip:1; + uint64_t spx1:1; + uint64_t spx0:1; + uint64_t lmc:1; + uint64_t l2c:1; + uint64_t rint_15:1; + uint64_t rint_14:1; + uint64_t rint_13:1; + uint64_t pow:1; + uint64_t tim:1; + uint64_t pko:1; + uint64_t ipd:1; + uint64_t rint_8:1; + uint64_t zip:1; + uint64_t dfa:1; + uint64_t fpa:1; + uint64_t key:1; + uint64_t npi:1; + uint64_t gmx1:1; + uint64_t gmx0:1; + uint64_t mio:1; + } cn38xx; + struct cvmx_npi_rsl_int_blocks_cn38xx cn38xxp2; + struct cvmx_npi_rsl_int_blocks_cn50xx { + uint64_t reserved_31_63:33; + uint64_t iob:1; + uint64_t lmc1:1; + uint64_t agl:1; + uint64_t reserved_24_27:4; + uint64_t asx1:1; + uint64_t asx0:1; + uint64_t reserved_21_21:1; + uint64_t pip:1; + uint64_t spx1:1; + uint64_t spx0:1; + uint64_t lmc:1; + uint64_t l2c:1; + uint64_t reserved_15_15:1; + uint64_t rad:1; + uint64_t usb:1; + uint64_t pow:1; + uint64_t tim:1; + uint64_t pko:1; + uint64_t ipd:1; + uint64_t reserved_8_8:1; + uint64_t zip:1; + uint64_t dfa:1; + uint64_t fpa:1; + uint64_t key:1; + uint64_t npi:1; + uint64_t gmx1:1; + uint64_t gmx0:1; + uint64_t mio:1; + } cn50xx; + struct cvmx_npi_rsl_int_blocks_cn38xx cn58xx; + struct cvmx_npi_rsl_int_blocks_cn38xx cn58xxp1; +}; + +union cvmx_npi_size_inputx { + uint64_t u64; + struct cvmx_npi_size_inputx_s { + uint64_t reserved_32_63:32; + uint64_t size:32; + } s; + struct cvmx_npi_size_inputx_s cn30xx; + struct cvmx_npi_size_inputx_s cn31xx; + struct cvmx_npi_size_inputx_s cn38xx; + struct cvmx_npi_size_inputx_s cn38xxp2; + struct cvmx_npi_size_inputx_s cn50xx; + struct cvmx_npi_size_inputx_s cn58xx; + struct cvmx_npi_size_inputx_s cn58xxp1; +}; + +union cvmx_npi_win_read_to { + uint64_t u64; + struct cvmx_npi_win_read_to_s { + uint64_t reserved_32_63:32; + uint64_t time:32; + } s; + struct cvmx_npi_win_read_to_s cn30xx; + struct cvmx_npi_win_read_to_s cn31xx; + struct cvmx_npi_win_read_to_s cn38xx; + struct cvmx_npi_win_read_to_s cn38xxp2; + struct cvmx_npi_win_read_to_s cn50xx; + struct cvmx_npi_win_read_to_s cn58xx; + struct cvmx_npi_win_read_to_s cn58xxp1; +}; + +#endif diff --git a/arch/mips/include/asm/octeon/cvmx-pci-defs.h b/arch/mips/include/asm/octeon/cvmx-pci-defs.h new file mode 100644 index 00000000000..90f8d653575 --- /dev/null +++ b/arch/mips/include/asm/octeon/cvmx-pci-defs.h @@ -0,0 +1,1645 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +#ifndef __CVMX_PCI_DEFS_H__ +#define __CVMX_PCI_DEFS_H__ + +#define CVMX_PCI_BAR1_INDEXX(offset) \ + (0x0000000000000100ull + (((offset) & 31) * 4)) +#define CVMX_PCI_BIST_REG \ + (0x00000000000001C0ull) +#define CVMX_PCI_CFG00 \ + (0x0000000000000000ull) +#define CVMX_PCI_CFG01 \ + (0x0000000000000004ull) +#define CVMX_PCI_CFG02 \ + (0x0000000000000008ull) +#define CVMX_PCI_CFG03 \ + (0x000000000000000Cull) +#define CVMX_PCI_CFG04 \ + (0x0000000000000010ull) +#define CVMX_PCI_CFG05 \ + (0x0000000000000014ull) +#define CVMX_PCI_CFG06 \ + (0x0000000000000018ull) +#define CVMX_PCI_CFG07 \ + (0x000000000000001Cull) +#define CVMX_PCI_CFG08 \ + (0x0000000000000020ull) +#define CVMX_PCI_CFG09 \ + (0x0000000000000024ull) +#define CVMX_PCI_CFG10 \ + (0x0000000000000028ull) +#define CVMX_PCI_CFG11 \ + (0x000000000000002Cull) +#define CVMX_PCI_CFG12 \ + (0x0000000000000030ull) +#define CVMX_PCI_CFG13 \ + (0x0000000000000034ull) +#define CVMX_PCI_CFG15 \ + (0x000000000000003Cull) +#define CVMX_PCI_CFG16 \ + (0x0000000000000040ull) +#define CVMX_PCI_CFG17 \ + (0x0000000000000044ull) +#define CVMX_PCI_CFG18 \ + (0x0000000000000048ull) +#define CVMX_PCI_CFG19 \ + (0x000000000000004Cull) +#define CVMX_PCI_CFG20 \ + (0x0000000000000050ull) +#define CVMX_PCI_CFG21 \ + (0x0000000000000054ull) +#define CVMX_PCI_CFG22 \ + (0x0000000000000058ull) +#define CVMX_PCI_CFG56 \ + (0x00000000000000E0ull) +#define CVMX_PCI_CFG57 \ + (0x00000000000000E4ull) +#define CVMX_PCI_CFG58 \ + (0x00000000000000E8ull) +#define CVMX_PCI_CFG59 \ + (0x00000000000000ECull) +#define CVMX_PCI_CFG60 \ + (0x00000000000000F0ull) +#define CVMX_PCI_CFG61 \ + (0x00000000000000F4ull) +#define CVMX_PCI_CFG62 \ + (0x00000000000000F8ull) +#define CVMX_PCI_CFG63 \ + (0x00000000000000FCull) +#define CVMX_PCI_CNT_REG \ + (0x00000000000001B8ull) +#define CVMX_PCI_CTL_STATUS_2 \ + (0x000000000000018Cull) +#define CVMX_PCI_DBELL_0 \ + (0x0000000000000080ull) +#define CVMX_PCI_DBELL_1 \ + (0x0000000000000088ull) +#define CVMX_PCI_DBELL_2 \ + (0x0000000000000090ull) +#define CVMX_PCI_DBELL_3 \ + (0x0000000000000098ull) +#define CVMX_PCI_DBELL_X(offset) \ + (0x0000000000000080ull + (((offset) & 3) * 8)) +#define CVMX_PCI_DMA_CNT0 \ + (0x00000000000000A0ull) +#define CVMX_PCI_DMA_CNT1 \ + (0x00000000000000A8ull) +#define CVMX_PCI_DMA_CNTX(offset) \ + (0x00000000000000A0ull + (((offset) & 1) * 8)) +#define CVMX_PCI_DMA_INT_LEV0 \ + (0x00000000000000A4ull) +#define CVMX_PCI_DMA_INT_LEV1 \ + (0x00000000000000ACull) +#define CVMX_PCI_DMA_INT_LEVX(offset) \ + (0x00000000000000A4ull + (((offset) & 1) * 8)) +#define CVMX_PCI_DMA_TIME0 \ + (0x00000000000000B0ull) +#define CVMX_PCI_DMA_TIME1 \ + (0x00000000000000B4ull) +#define CVMX_PCI_DMA_TIMEX(offset) \ + (0x00000000000000B0ull + (((offset) & 1) * 4)) +#define CVMX_PCI_INSTR_COUNT0 \ + (0x0000000000000084ull) +#define CVMX_PCI_INSTR_COUNT1 \ + (0x000000000000008Cull) +#define CVMX_PCI_INSTR_COUNT2 \ + (0x0000000000000094ull) +#define CVMX_PCI_INSTR_COUNT3 \ + (0x000000000000009Cull) +#define CVMX_PCI_INSTR_COUNTX(offset) \ + (0x0000000000000084ull + (((offset) & 3) * 8)) +#define CVMX_PCI_INT_ENB \ + (0x0000000000000038ull) +#define CVMX_PCI_INT_ENB2 \ + (0x00000000000001A0ull) +#define CVMX_PCI_INT_SUM \ + (0x0000000000000030ull) +#define CVMX_PCI_INT_SUM2 \ + (0x0000000000000198ull) +#define CVMX_PCI_MSI_RCV \ + (0x00000000000000F0ull) +#define CVMX_PCI_PKTS_SENT0 \ + (0x0000000000000040ull) +#define CVMX_PCI_PKTS_SENT1 \ + (0x0000000000000050ull) +#define CVMX_PCI_PKTS_SENT2 \ + (0x0000000000000060ull) +#define CVMX_PCI_PKTS_SENT3 \ + (0x0000000000000070ull) +#define CVMX_PCI_PKTS_SENTX(offset) \ + (0x0000000000000040ull + (((offset) & 3) * 16)) +#define CVMX_PCI_PKTS_SENT_INT_LEV0 \ + (0x0000000000000048ull) +#define CVMX_PCI_PKTS_SENT_INT_LEV1 \ + (0x0000000000000058ull) +#define CVMX_PCI_PKTS_SENT_INT_LEV2 \ + (0x0000000000000068ull) +#define CVMX_PCI_PKTS_SENT_INT_LEV3 \ + (0x0000000000000078ull) +#define CVMX_PCI_PKTS_SENT_INT_LEVX(offset) \ + (0x0000000000000048ull + (((offset) & 3) * 16)) +#define CVMX_PCI_PKTS_SENT_TIME0 \ + (0x000000000000004Cull) +#define CVMX_PCI_PKTS_SENT_TIME1 \ + (0x000000000000005Cull) +#define CVMX_PCI_PKTS_SENT_TIME2 \ + (0x000000000000006Cull) +#define CVMX_PCI_PKTS_SENT_TIME3 \ + (0x000000000000007Cull) +#define CVMX_PCI_PKTS_SENT_TIMEX(offset) \ + (0x000000000000004Cull + (((offset) & 3) * 16)) +#define CVMX_PCI_PKT_CREDITS0 \ + (0x0000000000000044ull) +#define CVMX_PCI_PKT_CREDITS1 \ + (0x0000000000000054ull) +#define CVMX_PCI_PKT_CREDITS2 \ + (0x0000000000000064ull) +#define CVMX_PCI_PKT_CREDITS3 \ + (0x0000000000000074ull) +#define CVMX_PCI_PKT_CREDITSX(offset) \ + (0x0000000000000044ull + (((offset) & 3) * 16)) +#define CVMX_PCI_READ_CMD_6 \ + (0x0000000000000180ull) +#define CVMX_PCI_READ_CMD_C \ + (0x0000000000000184ull) +#define CVMX_PCI_READ_CMD_E \ + (0x0000000000000188ull) +#define CVMX_PCI_READ_TIMEOUT \ + CVMX_ADD_IO_SEG(0x00011F00000000B0ull) +#define CVMX_PCI_SCM_REG \ + (0x00000000000001A8ull) +#define CVMX_PCI_TSR_REG \ + (0x00000000000001B0ull) +#define CVMX_PCI_WIN_RD_ADDR \ + (0x0000000000000008ull) +#define CVMX_PCI_WIN_RD_DATA \ + (0x0000000000000020ull) +#define CVMX_PCI_WIN_WR_ADDR \ + (0x0000000000000000ull) +#define CVMX_PCI_WIN_WR_DATA \ + (0x0000000000000010ull) +#define CVMX_PCI_WIN_WR_MASK \ + (0x0000000000000018ull) + +union cvmx_pci_bar1_indexx { + uint32_t u32; + struct cvmx_pci_bar1_indexx_s { + uint32_t reserved_18_31:14; + uint32_t addr_idx:14; + uint32_t ca:1; + uint32_t end_swp:2; + uint32_t addr_v:1; + } s; + struct cvmx_pci_bar1_indexx_s cn30xx; + struct cvmx_pci_bar1_indexx_s cn31xx; + struct cvmx_pci_bar1_indexx_s cn38xx; + struct cvmx_pci_bar1_indexx_s cn38xxp2; + struct cvmx_pci_bar1_indexx_s cn50xx; + struct cvmx_pci_bar1_indexx_s cn58xx; + struct cvmx_pci_bar1_indexx_s cn58xxp1; +}; + +union cvmx_pci_bist_reg { + uint64_t u64; + struct cvmx_pci_bist_reg_s { + uint64_t reserved_10_63:54; + uint64_t rsp_bs:1; + uint64_t dma0_bs:1; + uint64_t cmd0_bs:1; + uint64_t cmd_bs:1; + uint64_t csr2p_bs:1; + uint64_t csrr_bs:1; + uint64_t rsp2p_bs:1; + uint64_t csr2n_bs:1; + uint64_t dat2n_bs:1; + uint64_t dbg2n_bs:1; + } s; + struct cvmx_pci_bist_reg_s cn50xx; +}; + +union cvmx_pci_cfg00 { + uint32_t u32; + struct cvmx_pci_cfg00_s { + uint32_t devid:16; + uint32_t vendid:16; + } s; + struct cvmx_pci_cfg00_s cn30xx; + struct cvmx_pci_cfg00_s cn31xx; + struct cvmx_pci_cfg00_s cn38xx; + struct cvmx_pci_cfg00_s cn38xxp2; + struct cvmx_pci_cfg00_s cn50xx; + struct cvmx_pci_cfg00_s cn58xx; + struct cvmx_pci_cfg00_s cn58xxp1; +}; + +union cvmx_pci_cfg01 { + uint32_t u32; + struct cvmx_pci_cfg01_s { + uint32_t dpe:1; + uint32_t sse:1; + uint32_t rma:1; + uint32_t rta:1; + uint32_t sta:1; + uint32_t devt:2; + uint32_t mdpe:1; + uint32_t fbb:1; + uint32_t reserved_22_22:1; + uint32_t m66:1; + uint32_t cle:1; + uint32_t i_stat:1; + uint32_t reserved_11_18:8; + uint32_t i_dis:1; + uint32_t fbbe:1; + uint32_t see:1; + uint32_t ads:1; + uint32_t pee:1; + uint32_t vps:1; + uint32_t mwice:1; + uint32_t scse:1; + uint32_t me:1; + uint32_t msae:1; + uint32_t isae:1; + } s; + struct cvmx_pci_cfg01_s cn30xx; + struct cvmx_pci_cfg01_s cn31xx; + struct cvmx_pci_cfg01_s cn38xx; + struct cvmx_pci_cfg01_s cn38xxp2; + struct cvmx_pci_cfg01_s cn50xx; + struct cvmx_pci_cfg01_s cn58xx; + struct cvmx_pci_cfg01_s cn58xxp1; +}; + +union cvmx_pci_cfg02 { + uint32_t u32; + struct cvmx_pci_cfg02_s { + uint32_t cc:24; + uint32_t rid:8; + } s; + struct cvmx_pci_cfg02_s cn30xx; + struct cvmx_pci_cfg02_s cn31xx; + struct cvmx_pci_cfg02_s cn38xx; + struct cvmx_pci_cfg02_s cn38xxp2; + struct cvmx_pci_cfg02_s cn50xx; + struct cvmx_pci_cfg02_s cn58xx; + struct cvmx_pci_cfg02_s cn58xxp1; +}; + +union cvmx_pci_cfg03 { + uint32_t u32; + struct cvmx_pci_cfg03_s { + uint32_t bcap:1; + uint32_t brb:1; + uint32_t reserved_28_29:2; + uint32_t bcod:4; + uint32_t ht:8; + uint32_t lt:8; + uint32_t cls:8; + } s; + struct cvmx_pci_cfg03_s cn30xx; + struct cvmx_pci_cfg03_s cn31xx; + struct cvmx_pci_cfg03_s cn38xx; + struct cvmx_pci_cfg03_s cn38xxp2; + struct cvmx_pci_cfg03_s cn50xx; + struct cvmx_pci_cfg03_s cn58xx; + struct cvmx_pci_cfg03_s cn58xxp1; +}; + +union cvmx_pci_cfg04 { + uint32_t u32; + struct cvmx_pci_cfg04_s { + uint32_t lbase:20; + uint32_t lbasez:8; + uint32_t pf:1; + uint32_t typ:2; + uint32_t mspc:1; + } s; + struct cvmx_pci_cfg04_s cn30xx; + struct cvmx_pci_cfg04_s cn31xx; + struct cvmx_pci_cfg04_s cn38xx; + struct cvmx_pci_cfg04_s cn38xxp2; + struct cvmx_pci_cfg04_s cn50xx; + struct cvmx_pci_cfg04_s cn58xx; + struct cvmx_pci_cfg04_s cn58xxp1; +}; + +union cvmx_pci_cfg05 { + uint32_t u32; + struct cvmx_pci_cfg05_s { + uint32_t hbase:32; + } s; + struct cvmx_pci_cfg05_s cn30xx; + struct cvmx_pci_cfg05_s cn31xx; + struct cvmx_pci_cfg05_s cn38xx; + struct cvmx_pci_cfg05_s cn38xxp2; + struct cvmx_pci_cfg05_s cn50xx; + struct cvmx_pci_cfg05_s cn58xx; + struct cvmx_pci_cfg05_s cn58xxp1; +}; + +union cvmx_pci_cfg06 { + uint32_t u32; + struct cvmx_pci_cfg06_s { + uint32_t lbase:5; + uint32_t lbasez:23; + uint32_t pf:1; + uint32_t typ:2; + uint32_t mspc:1; + } s; + struct cvmx_pci_cfg06_s cn30xx; + struct cvmx_pci_cfg06_s cn31xx; + struct cvmx_pci_cfg06_s cn38xx; + struct cvmx_pci_cfg06_s cn38xxp2; + struct cvmx_pci_cfg06_s cn50xx; + struct cvmx_pci_cfg06_s cn58xx; + struct cvmx_pci_cfg06_s cn58xxp1; +}; + +union cvmx_pci_cfg07 { + uint32_t u32; + struct cvmx_pci_cfg07_s { + uint32_t hbase:32; + } s; + struct cvmx_pci_cfg07_s cn30xx; + struct cvmx_pci_cfg07_s cn31xx; + struct cvmx_pci_cfg07_s cn38xx; + struct cvmx_pci_cfg07_s cn38xxp2; + struct cvmx_pci_cfg07_s cn50xx; + struct cvmx_pci_cfg07_s cn58xx; + struct cvmx_pci_cfg07_s cn58xxp1; +}; + +union cvmx_pci_cfg08 { + uint32_t u32; + struct cvmx_pci_cfg08_s { + uint32_t lbasez:28; + uint32_t pf:1; + uint32_t typ:2; + uint32_t mspc:1; + } s; + struct cvmx_pci_cfg08_s cn30xx; + struct cvmx_pci_cfg08_s cn31xx; + struct cvmx_pci_cfg08_s cn38xx; + struct cvmx_pci_cfg08_s cn38xxp2; + struct cvmx_pci_cfg08_s cn50xx; + struct cvmx_pci_cfg08_s cn58xx; + struct cvmx_pci_cfg08_s cn58xxp1; +}; + +union cvmx_pci_cfg09 { + uint32_t u32; + struct cvmx_pci_cfg09_s { + uint32_t hbase:25; + uint32_t hbasez:7; + } s; + struct cvmx_pci_cfg09_s cn30xx; + struct cvmx_pci_cfg09_s cn31xx; + struct cvmx_pci_cfg09_s cn38xx; + struct cvmx_pci_cfg09_s cn38xxp2; + struct cvmx_pci_cfg09_s cn50xx; + struct cvmx_pci_cfg09_s cn58xx; + struct cvmx_pci_cfg09_s cn58xxp1; +}; + +union cvmx_pci_cfg10 { + uint32_t u32; + struct cvmx_pci_cfg10_s { + uint32_t cisp:32; + } s; + struct cvmx_pci_cfg10_s cn30xx; + struct cvmx_pci_cfg10_s cn31xx; + struct cvmx_pci_cfg10_s cn38xx; + struct cvmx_pci_cfg10_s cn38xxp2; + struct cvmx_pci_cfg10_s cn50xx; + struct cvmx_pci_cfg10_s cn58xx; + struct cvmx_pci_cfg10_s cn58xxp1; +}; + +union cvmx_pci_cfg11 { + uint32_t u32; + struct cvmx_pci_cfg11_s { + uint32_t ssid:16; + uint32_t ssvid:16; + } s; + struct cvmx_pci_cfg11_s cn30xx; + struct cvmx_pci_cfg11_s cn31xx; + struct cvmx_pci_cfg11_s cn38xx; + struct cvmx_pci_cfg11_s cn38xxp2; + struct cvmx_pci_cfg11_s cn50xx; + struct cvmx_pci_cfg11_s cn58xx; + struct cvmx_pci_cfg11_s cn58xxp1; +}; + +union cvmx_pci_cfg12 { + uint32_t u32; + struct cvmx_pci_cfg12_s { + uint32_t erbar:16; + uint32_t erbarz:5; + uint32_t reserved_1_10:10; + uint32_t erbar_en:1; + } s; + struct cvmx_pci_cfg12_s cn30xx; + struct cvmx_pci_cfg12_s cn31xx; + struct cvmx_pci_cfg12_s cn38xx; + struct cvmx_pci_cfg12_s cn38xxp2; + struct cvmx_pci_cfg12_s cn50xx; + struct cvmx_pci_cfg12_s cn58xx; + struct cvmx_pci_cfg12_s cn58xxp1; +}; + +union cvmx_pci_cfg13 { + uint32_t u32; + struct cvmx_pci_cfg13_s { + uint32_t reserved_8_31:24; + uint32_t cp:8; + } s; + struct cvmx_pci_cfg13_s cn30xx; + struct cvmx_pci_cfg13_s cn31xx; + struct cvmx_pci_cfg13_s cn38xx; + struct cvmx_pci_cfg13_s cn38xxp2; + struct cvmx_pci_cfg13_s cn50xx; + struct cvmx_pci_cfg13_s cn58xx; + struct cvmx_pci_cfg13_s cn58xxp1; +}; + +union cvmx_pci_cfg15 { + uint32_t u32; + struct cvmx_pci_cfg15_s { + uint32_t ml:8; + uint32_t mg:8; + uint32_t inta:8; + uint32_t il:8; + } s; + struct cvmx_pci_cfg15_s cn30xx; + struct cvmx_pci_cfg15_s cn31xx; + struct cvmx_pci_cfg15_s cn38xx; + struct cvmx_pci_cfg15_s cn38xxp2; + struct cvmx_pci_cfg15_s cn50xx; + struct cvmx_pci_cfg15_s cn58xx; + struct cvmx_pci_cfg15_s cn58xxp1; +}; + +union cvmx_pci_cfg16 { + uint32_t u32; + struct cvmx_pci_cfg16_s { + uint32_t trdnpr:1; + uint32_t trdard:1; + uint32_t rdsati:1; + uint32_t trdrs:1; + uint32_t trtae:1; + uint32_t twsei:1; + uint32_t twsen:1; + uint32_t twtae:1; + uint32_t tmae:1; + uint32_t tslte:3; + uint32_t tilt:4; + uint32_t pbe:12; + uint32_t dppmr:1; + uint32_t reserved_2_2:1; + uint32_t tswc:1; + uint32_t mltd:1; + } s; + struct cvmx_pci_cfg16_s cn30xx; + struct cvmx_pci_cfg16_s cn31xx; + struct cvmx_pci_cfg16_s cn38xx; + struct cvmx_pci_cfg16_s cn38xxp2; + struct cvmx_pci_cfg16_s cn50xx; + struct cvmx_pci_cfg16_s cn58xx; + struct cvmx_pci_cfg16_s cn58xxp1; +}; + +union cvmx_pci_cfg17 { + uint32_t u32; + struct cvmx_pci_cfg17_s { + uint32_t tscme:32; + } s; + struct cvmx_pci_cfg17_s cn30xx; + struct cvmx_pci_cfg17_s cn31xx; + struct cvmx_pci_cfg17_s cn38xx; + struct cvmx_pci_cfg17_s cn38xxp2; + struct cvmx_pci_cfg17_s cn50xx; + struct cvmx_pci_cfg17_s cn58xx; + struct cvmx_pci_cfg17_s cn58xxp1; +}; + +union cvmx_pci_cfg18 { + uint32_t u32; + struct cvmx_pci_cfg18_s { + uint32_t tdsrps:32; + } s; + struct cvmx_pci_cfg18_s cn30xx; + struct cvmx_pci_cfg18_s cn31xx; + struct cvmx_pci_cfg18_s cn38xx; + struct cvmx_pci_cfg18_s cn38xxp2; + struct cvmx_pci_cfg18_s cn50xx; + struct cvmx_pci_cfg18_s cn58xx; + struct cvmx_pci_cfg18_s cn58xxp1; +}; + +union cvmx_pci_cfg19 { + uint32_t u32; + struct cvmx_pci_cfg19_s { + uint32_t mrbcm:1; + uint32_t mrbci:1; + uint32_t mdwe:1; + uint32_t mdre:1; + uint32_t mdrimc:1; + uint32_t mdrrmc:3; + uint32_t tmes:8; + uint32_t teci:1; + uint32_t tmei:1; + uint32_t tmse:1; + uint32_t tmdpes:1; + uint32_t tmapes:1; + uint32_t reserved_9_10:2; + uint32_t tibcd:1; + uint32_t tibde:1; + uint32_t reserved_6_6:1; + uint32_t tidomc:1; + uint32_t tdomc:5; + } s; + struct cvmx_pci_cfg19_s cn30xx; + struct cvmx_pci_cfg19_s cn31xx; + struct cvmx_pci_cfg19_s cn38xx; + struct cvmx_pci_cfg19_s cn38xxp2; + struct cvmx_pci_cfg19_s cn50xx; + struct cvmx_pci_cfg19_s cn58xx; + struct cvmx_pci_cfg19_s cn58xxp1; +}; + +union cvmx_pci_cfg20 { + uint32_t u32; + struct cvmx_pci_cfg20_s { + uint32_t mdsp:32; + } s; + struct cvmx_pci_cfg20_s cn30xx; + struct cvmx_pci_cfg20_s cn31xx; + struct cvmx_pci_cfg20_s cn38xx; + struct cvmx_pci_cfg20_s cn38xxp2; + struct cvmx_pci_cfg20_s cn50xx; + struct cvmx_pci_cfg20_s cn58xx; + struct cvmx_pci_cfg20_s cn58xxp1; +}; + +union cvmx_pci_cfg21 { + uint32_t u32; + struct cvmx_pci_cfg21_s { + uint32_t scmre:32; + } s; + struct cvmx_pci_cfg21_s cn30xx; + struct cvmx_pci_cfg21_s cn31xx; + struct cvmx_pci_cfg21_s cn38xx; + struct cvmx_pci_cfg21_s cn38xxp2; + struct cvmx_pci_cfg21_s cn50xx; + struct cvmx_pci_cfg21_s cn58xx; + struct cvmx_pci_cfg21_s cn58xxp1; +}; + +union cvmx_pci_cfg22 { + uint32_t u32; + struct cvmx_pci_cfg22_s { + uint32_t mac:7; + uint32_t reserved_19_24:6; + uint32_t flush:1; + uint32_t mra:1; + uint32_t mtta:1; + uint32_t mrv:8; + uint32_t mttv:8; + } s; + struct cvmx_pci_cfg22_s cn30xx; + struct cvmx_pci_cfg22_s cn31xx; + struct cvmx_pci_cfg22_s cn38xx; + struct cvmx_pci_cfg22_s cn38xxp2; + struct cvmx_pci_cfg22_s cn50xx; + struct cvmx_pci_cfg22_s cn58xx; + struct cvmx_pci_cfg22_s cn58xxp1; +}; + +union cvmx_pci_cfg56 { + uint32_t u32; + struct cvmx_pci_cfg56_s { + uint32_t reserved_23_31:9; + uint32_t most:3; + uint32_t mmbc:2; + uint32_t roe:1; + uint32_t dpere:1; + uint32_t ncp:8; + uint32_t pxcid:8; + } s; + struct cvmx_pci_cfg56_s cn30xx; + struct cvmx_pci_cfg56_s cn31xx; + struct cvmx_pci_cfg56_s cn38xx; + struct cvmx_pci_cfg56_s cn38xxp2; + struct cvmx_pci_cfg56_s cn50xx; + struct cvmx_pci_cfg56_s cn58xx; + struct cvmx_pci_cfg56_s cn58xxp1; +}; + +union cvmx_pci_cfg57 { + uint32_t u32; + struct cvmx_pci_cfg57_s { + uint32_t reserved_30_31:2; + uint32_t scemr:1; + uint32_t mcrsd:3; + uint32_t mostd:3; + uint32_t mmrbcd:2; + uint32_t dc:1; + uint32_t usc:1; + uint32_t scd:1; + uint32_t m133:1; + uint32_t w64:1; + uint32_t bn:8; + uint32_t dn:5; + uint32_t fn:3; + } s; + struct cvmx_pci_cfg57_s cn30xx; + struct cvmx_pci_cfg57_s cn31xx; + struct cvmx_pci_cfg57_s cn38xx; + struct cvmx_pci_cfg57_s cn38xxp2; + struct cvmx_pci_cfg57_s cn50xx; + struct cvmx_pci_cfg57_s cn58xx; + struct cvmx_pci_cfg57_s cn58xxp1; +}; + +union cvmx_pci_cfg58 { + uint32_t u32; + struct cvmx_pci_cfg58_s { + uint32_t pmes:5; + uint32_t d2s:1; + uint32_t d1s:1; + uint32_t auxc:3; + uint32_t dsi:1; + uint32_t reserved_20_20:1; + uint32_t pmec:1; + uint32_t pcimiv:3; + uint32_t ncp:8; + uint32_t pmcid:8; + } s; + struct cvmx_pci_cfg58_s cn30xx; + struct cvmx_pci_cfg58_s cn31xx; + struct cvmx_pci_cfg58_s cn38xx; + struct cvmx_pci_cfg58_s cn38xxp2; + struct cvmx_pci_cfg58_s cn50xx; + struct cvmx_pci_cfg58_s cn58xx; + struct cvmx_pci_cfg58_s cn58xxp1; +}; + +union cvmx_pci_cfg59 { + uint32_t u32; + struct cvmx_pci_cfg59_s { + uint32_t pmdia:8; + uint32_t bpccen:1; + uint32_t bd3h:1; + uint32_t reserved_16_21:6; + uint32_t pmess:1; + uint32_t pmedsia:2; + uint32_t pmds:4; + uint32_t pmeens:1; + uint32_t reserved_2_7:6; + uint32_t ps:2; + } s; + struct cvmx_pci_cfg59_s cn30xx; + struct cvmx_pci_cfg59_s cn31xx; + struct cvmx_pci_cfg59_s cn38xx; + struct cvmx_pci_cfg59_s cn38xxp2; + struct cvmx_pci_cfg59_s cn50xx; + struct cvmx_pci_cfg59_s cn58xx; + struct cvmx_pci_cfg59_s cn58xxp1; +}; + +union cvmx_pci_cfg60 { + uint32_t u32; + struct cvmx_pci_cfg60_s { + uint32_t reserved_24_31:8; + uint32_t m64:1; + uint32_t mme:3; + uint32_t mmc:3; + uint32_t msien:1; + uint32_t ncp:8; + uint32_t msicid:8; + } s; + struct cvmx_pci_cfg60_s cn30xx; + struct cvmx_pci_cfg60_s cn31xx; + struct cvmx_pci_cfg60_s cn38xx; + struct cvmx_pci_cfg60_s cn38xxp2; + struct cvmx_pci_cfg60_s cn50xx; + struct cvmx_pci_cfg60_s cn58xx; + struct cvmx_pci_cfg60_s cn58xxp1; +}; + +union cvmx_pci_cfg61 { + uint32_t u32; + struct cvmx_pci_cfg61_s { + uint32_t msi31t2:30; + uint32_t reserved_0_1:2; + } s; + struct cvmx_pci_cfg61_s cn30xx; + struct cvmx_pci_cfg61_s cn31xx; + struct cvmx_pci_cfg61_s cn38xx; + struct cvmx_pci_cfg61_s cn38xxp2; + struct cvmx_pci_cfg61_s cn50xx; + struct cvmx_pci_cfg61_s cn58xx; + struct cvmx_pci_cfg61_s cn58xxp1; +}; + +union cvmx_pci_cfg62 { + uint32_t u32; + struct cvmx_pci_cfg62_s { + uint32_t msi:32; + } s; + struct cvmx_pci_cfg62_s cn30xx; + struct cvmx_pci_cfg62_s cn31xx; + struct cvmx_pci_cfg62_s cn38xx; + struct cvmx_pci_cfg62_s cn38xxp2; + struct cvmx_pci_cfg62_s cn50xx; + struct cvmx_pci_cfg62_s cn58xx; + struct cvmx_pci_cfg62_s cn58xxp1; +}; + +union cvmx_pci_cfg63 { + uint32_t u32; + struct cvmx_pci_cfg63_s { + uint32_t reserved_16_31:16; + uint32_t msimd:16; + } s; + struct cvmx_pci_cfg63_s cn30xx; + struct cvmx_pci_cfg63_s cn31xx; + struct cvmx_pci_cfg63_s cn38xx; + struct cvmx_pci_cfg63_s cn38xxp2; + struct cvmx_pci_cfg63_s cn50xx; + struct cvmx_pci_cfg63_s cn58xx; + struct cvmx_pci_cfg63_s cn58xxp1; +}; + +union cvmx_pci_cnt_reg { + uint64_t u64; + struct cvmx_pci_cnt_reg_s { + uint64_t reserved_38_63:26; + uint64_t hm_pcix:1; + uint64_t hm_speed:2; + uint64_t ap_pcix:1; + uint64_t ap_speed:2; + uint64_t pcicnt:32; + } s; + struct cvmx_pci_cnt_reg_s cn50xx; + struct cvmx_pci_cnt_reg_s cn58xx; + struct cvmx_pci_cnt_reg_s cn58xxp1; +}; + +union cvmx_pci_ctl_status_2 { + uint32_t u32; + struct cvmx_pci_ctl_status_2_s { + uint32_t reserved_29_31:3; + uint32_t bb1_hole:3; + uint32_t bb1_siz:1; + uint32_t bb_ca:1; + uint32_t bb_es:2; + uint32_t bb1:1; + uint32_t bb0:1; + uint32_t erst_n:1; + uint32_t bar2pres:1; + uint32_t scmtyp:1; + uint32_t scm:1; + uint32_t en_wfilt:1; + uint32_t reserved_14_14:1; + uint32_t ap_pcix:1; + uint32_t ap_64ad:1; + uint32_t b12_bist:1; + uint32_t pmo_amod:1; + uint32_t pmo_fpc:3; + uint32_t tsr_hwm:3; + uint32_t bar2_enb:1; + uint32_t bar2_esx:2; + uint32_t bar2_cax:1; + } s; + struct cvmx_pci_ctl_status_2_s cn30xx; + struct cvmx_pci_ctl_status_2_cn31xx { + uint32_t reserved_20_31:12; + uint32_t erst_n:1; + uint32_t bar2pres:1; + uint32_t scmtyp:1; + uint32_t scm:1; + uint32_t en_wfilt:1; + uint32_t reserved_14_14:1; + uint32_t ap_pcix:1; + uint32_t ap_64ad:1; + uint32_t b12_bist:1; + uint32_t pmo_amod:1; + uint32_t pmo_fpc:3; + uint32_t tsr_hwm:3; + uint32_t bar2_enb:1; + uint32_t bar2_esx:2; + uint32_t bar2_cax:1; + } cn31xx; + struct cvmx_pci_ctl_status_2_s cn38xx; + struct cvmx_pci_ctl_status_2_cn31xx cn38xxp2; + struct cvmx_pci_ctl_status_2_s cn50xx; + struct cvmx_pci_ctl_status_2_s cn58xx; + struct cvmx_pci_ctl_status_2_s cn58xxp1; +}; + +union cvmx_pci_dbellx { + uint32_t u32; + struct cvmx_pci_dbellx_s { + uint32_t reserved_16_31:16; + uint32_t inc_val:16; + } s; + struct cvmx_pci_dbellx_s cn30xx; + struct cvmx_pci_dbellx_s cn31xx; + struct cvmx_pci_dbellx_s cn38xx; + struct cvmx_pci_dbellx_s cn38xxp2; + struct cvmx_pci_dbellx_s cn50xx; + struct cvmx_pci_dbellx_s cn58xx; + struct cvmx_pci_dbellx_s cn58xxp1; +}; + +union cvmx_pci_dma_cntx { + uint32_t u32; + struct cvmx_pci_dma_cntx_s { + uint32_t dma_cnt:32; + } s; + struct cvmx_pci_dma_cntx_s cn30xx; + struct cvmx_pci_dma_cntx_s cn31xx; + struct cvmx_pci_dma_cntx_s cn38xx; + struct cvmx_pci_dma_cntx_s cn38xxp2; + struct cvmx_pci_dma_cntx_s cn50xx; + struct cvmx_pci_dma_cntx_s cn58xx; + struct cvmx_pci_dma_cntx_s cn58xxp1; +}; + +union cvmx_pci_dma_int_levx { + uint32_t u32; + struct cvmx_pci_dma_int_levx_s { + uint32_t pkt_cnt:32; + } s; + struct cvmx_pci_dma_int_levx_s cn30xx; + struct cvmx_pci_dma_int_levx_s cn31xx; + struct cvmx_pci_dma_int_levx_s cn38xx; + struct cvmx_pci_dma_int_levx_s cn38xxp2; + struct cvmx_pci_dma_int_levx_s cn50xx; + struct cvmx_pci_dma_int_levx_s cn58xx; + struct cvmx_pci_dma_int_levx_s cn58xxp1; +}; + +union cvmx_pci_dma_timex { + uint32_t u32; + struct cvmx_pci_dma_timex_s { + uint32_t dma_time:32; + } s; + struct cvmx_pci_dma_timex_s cn30xx; + struct cvmx_pci_dma_timex_s cn31xx; + struct cvmx_pci_dma_timex_s cn38xx; + struct cvmx_pci_dma_timex_s cn38xxp2; + struct cvmx_pci_dma_timex_s cn50xx; + struct cvmx_pci_dma_timex_s cn58xx; + struct cvmx_pci_dma_timex_s cn58xxp1; +}; + +union cvmx_pci_instr_countx { + uint32_t u32; + struct cvmx_pci_instr_countx_s { + uint32_t icnt:32; + } s; + struct cvmx_pci_instr_countx_s cn30xx; + struct cvmx_pci_instr_countx_s cn31xx; + struct cvmx_pci_instr_countx_s cn38xx; + struct cvmx_pci_instr_countx_s cn38xxp2; + struct cvmx_pci_instr_countx_s cn50xx; + struct cvmx_pci_instr_countx_s cn58xx; + struct cvmx_pci_instr_countx_s cn58xxp1; +}; + +union cvmx_pci_int_enb { + uint64_t u64; + struct cvmx_pci_int_enb_s { + uint64_t reserved_34_63:30; + uint64_t ill_rd:1; + uint64_t ill_wr:1; + uint64_t win_wr:1; + uint64_t dma1_fi:1; + uint64_t dma0_fi:1; + uint64_t idtime1:1; + uint64_t idtime0:1; + uint64_t idcnt1:1; + uint64_t idcnt0:1; + uint64_t iptime3:1; + uint64_t iptime2:1; + uint64_t iptime1:1; + uint64_t iptime0:1; + uint64_t ipcnt3:1; + uint64_t ipcnt2:1; + uint64_t ipcnt1:1; + uint64_t ipcnt0:1; + uint64_t irsl_int:1; + uint64_t ill_rrd:1; + uint64_t ill_rwr:1; + uint64_t idperr:1; + uint64_t iaperr:1; + uint64_t iserr:1; + uint64_t itsr_abt:1; + uint64_t imsc_msg:1; + uint64_t imsi_mabt:1; + uint64_t imsi_tabt:1; + uint64_t imsi_per:1; + uint64_t imr_tto:1; + uint64_t imr_abt:1; + uint64_t itr_abt:1; + uint64_t imr_wtto:1; + uint64_t imr_wabt:1; + uint64_t itr_wabt:1; + } s; + struct cvmx_pci_int_enb_cn30xx { + uint64_t reserved_34_63:30; + uint64_t ill_rd:1; + uint64_t ill_wr:1; + uint64_t win_wr:1; + uint64_t dma1_fi:1; + uint64_t dma0_fi:1; + uint64_t idtime1:1; + uint64_t idtime0:1; + uint64_t idcnt1:1; + uint64_t idcnt0:1; + uint64_t reserved_22_24:3; + uint64_t iptime0:1; + uint64_t reserved_18_20:3; + uint64_t ipcnt0:1; + uint64_t irsl_int:1; + uint64_t ill_rrd:1; + uint64_t ill_rwr:1; + uint64_t idperr:1; + uint64_t iaperr:1; + uint64_t iserr:1; + uint64_t itsr_abt:1; + uint64_t imsc_msg:1; + uint64_t imsi_mabt:1; + uint64_t imsi_tabt:1; + uint64_t imsi_per:1; + uint64_t imr_tto:1; + uint64_t imr_abt:1; + uint64_t itr_abt:1; + uint64_t imr_wtto:1; + uint64_t imr_wabt:1; + uint64_t itr_wabt:1; + } cn30xx; + struct cvmx_pci_int_enb_cn31xx { + uint64_t reserved_34_63:30; + uint64_t ill_rd:1; + uint64_t ill_wr:1; + uint64_t win_wr:1; + uint64_t dma1_fi:1; + uint64_t dma0_fi:1; + uint64_t idtime1:1; + uint64_t idtime0:1; + uint64_t idcnt1:1; + uint64_t idcnt0:1; + uint64_t reserved_23_24:2; + uint64_t iptime1:1; + uint64_t iptime0:1; + uint64_t reserved_19_20:2; + uint64_t ipcnt1:1; + uint64_t ipcnt0:1; + uint64_t irsl_int:1; + uint64_t ill_rrd:1; + uint64_t ill_rwr:1; + uint64_t idperr:1; + uint64_t iaperr:1; + uint64_t iserr:1; + uint64_t itsr_abt:1; + uint64_t imsc_msg:1; + uint64_t imsi_mabt:1; + uint64_t imsi_tabt:1; + uint64_t imsi_per:1; + uint64_t imr_tto:1; + uint64_t imr_abt:1; + uint64_t itr_abt:1; + uint64_t imr_wtto:1; + uint64_t imr_wabt:1; + uint64_t itr_wabt:1; + } cn31xx; + struct cvmx_pci_int_enb_s cn38xx; + struct cvmx_pci_int_enb_s cn38xxp2; + struct cvmx_pci_int_enb_cn31xx cn50xx; + struct cvmx_pci_int_enb_s cn58xx; + struct cvmx_pci_int_enb_s cn58xxp1; +}; + +union cvmx_pci_int_enb2 { + uint64_t u64; + struct cvmx_pci_int_enb2_s { + uint64_t reserved_34_63:30; + uint64_t ill_rd:1; + uint64_t ill_wr:1; + uint64_t win_wr:1; + uint64_t dma1_fi:1; + uint64_t dma0_fi:1; + uint64_t rdtime1:1; + uint64_t rdtime0:1; + uint64_t rdcnt1:1; + uint64_t rdcnt0:1; + uint64_t rptime3:1; + uint64_t rptime2:1; + uint64_t rptime1:1; + uint64_t rptime0:1; + uint64_t rpcnt3:1; + uint64_t rpcnt2:1; + uint64_t rpcnt1:1; + uint64_t rpcnt0:1; + uint64_t rrsl_int:1; + uint64_t ill_rrd:1; + uint64_t ill_rwr:1; + uint64_t rdperr:1; + uint64_t raperr:1; + uint64_t rserr:1; + uint64_t rtsr_abt:1; + uint64_t rmsc_msg:1; + uint64_t rmsi_mabt:1; + uint64_t rmsi_tabt:1; + uint64_t rmsi_per:1; + uint64_t rmr_tto:1; + uint64_t rmr_abt:1; + uint64_t rtr_abt:1; + uint64_t rmr_wtto:1; + uint64_t rmr_wabt:1; + uint64_t rtr_wabt:1; + } s; + struct cvmx_pci_int_enb2_cn30xx { + uint64_t reserved_34_63:30; + uint64_t ill_rd:1; + uint64_t ill_wr:1; + uint64_t win_wr:1; + uint64_t dma1_fi:1; + uint64_t dma0_fi:1; + uint64_t rdtime1:1; + uint64_t rdtime0:1; + uint64_t rdcnt1:1; + uint64_t rdcnt0:1; + uint64_t reserved_22_24:3; + uint64_t rptime0:1; + uint64_t reserved_18_20:3; + uint64_t rpcnt0:1; + uint64_t rrsl_int:1; + uint64_t ill_rrd:1; + uint64_t ill_rwr:1; + uint64_t rdperr:1; + uint64_t raperr:1; + uint64_t rserr:1; + uint64_t rtsr_abt:1; + uint64_t rmsc_msg:1; + uint64_t rmsi_mabt:1; + uint64_t rmsi_tabt:1; + uint64_t rmsi_per:1; + uint64_t rmr_tto:1; + uint64_t rmr_abt:1; + uint64_t rtr_abt:1; + uint64_t rmr_wtto:1; + uint64_t rmr_wabt:1; + uint64_t rtr_wabt:1; + } cn30xx; + struct cvmx_pci_int_enb2_cn31xx { + uint64_t reserved_34_63:30; + uint64_t ill_rd:1; + uint64_t ill_wr:1; + uint64_t win_wr:1; + uint64_t dma1_fi:1; + uint64_t dma0_fi:1; + uint64_t rdtime1:1; + uint64_t rdtime0:1; + uint64_t rdcnt1:1; + uint64_t rdcnt0:1; + uint64_t reserved_23_24:2; + uint64_t rptime1:1; + uint64_t rptime0:1; + uint64_t reserved_19_20:2; + uint64_t rpcnt1:1; + uint64_t rpcnt0:1; + uint64_t rrsl_int:1; + uint64_t ill_rrd:1; + uint64_t ill_rwr:1; + uint64_t rdperr:1; + uint64_t raperr:1; + uint64_t rserr:1; + uint64_t rtsr_abt:1; + uint64_t rmsc_msg:1; + uint64_t rmsi_mabt:1; + uint64_t rmsi_tabt:1; + uint64_t rmsi_per:1; + uint64_t rmr_tto:1; + uint64_t rmr_abt:1; + uint64_t rtr_abt:1; + uint64_t rmr_wtto:1; + uint64_t rmr_wabt:1; + uint64_t rtr_wabt:1; + } cn31xx; + struct cvmx_pci_int_enb2_s cn38xx; + struct cvmx_pci_int_enb2_s cn38xxp2; + struct cvmx_pci_int_enb2_cn31xx cn50xx; + struct cvmx_pci_int_enb2_s cn58xx; + struct cvmx_pci_int_enb2_s cn58xxp1; +}; + +union cvmx_pci_int_sum { + uint64_t u64; + struct cvmx_pci_int_sum_s { + uint64_t reserved_34_63:30; + uint64_t ill_rd:1; + uint64_t ill_wr:1; + uint64_t win_wr:1; + uint64_t dma1_fi:1; + uint64_t dma0_fi:1; + uint64_t dtime1:1; + uint64_t dtime0:1; + uint64_t dcnt1:1; + uint64_t dcnt0:1; + uint64_t ptime3:1; + uint64_t ptime2:1; + uint64_t ptime1:1; + uint64_t ptime0:1; + uint64_t pcnt3:1; + uint64_t pcnt2:1; + uint64_t pcnt1:1; + uint64_t pcnt0:1; + uint64_t rsl_int:1; + uint64_t ill_rrd:1; + uint64_t ill_rwr:1; + uint64_t dperr:1; + uint64_t aperr:1; + uint64_t serr:1; + uint64_t tsr_abt:1; + uint64_t msc_msg:1; + uint64_t msi_mabt:1; + uint64_t msi_tabt:1; + uint64_t msi_per:1; + uint64_t mr_tto:1; + uint64_t mr_abt:1; + uint64_t tr_abt:1; + uint64_t mr_wtto:1; + uint64_t mr_wabt:1; + uint64_t tr_wabt:1; + } s; + struct cvmx_pci_int_sum_cn30xx { + uint64_t reserved_34_63:30; + uint64_t ill_rd:1; + uint64_t ill_wr:1; + uint64_t win_wr:1; + uint64_t dma1_fi:1; + uint64_t dma0_fi:1; + uint64_t dtime1:1; + uint64_t dtime0:1; + uint64_t dcnt1:1; + uint64_t dcnt0:1; + uint64_t reserved_22_24:3; + uint64_t ptime0:1; + uint64_t reserved_18_20:3; + uint64_t pcnt0:1; + uint64_t rsl_int:1; + uint64_t ill_rrd:1; + uint64_t ill_rwr:1; + uint64_t dperr:1; + uint64_t aperr:1; + uint64_t serr:1; + uint64_t tsr_abt:1; + uint64_t msc_msg:1; + uint64_t msi_mabt:1; + uint64_t msi_tabt:1; + uint64_t msi_per:1; + uint64_t mr_tto:1; + uint64_t mr_abt:1; + uint64_t tr_abt:1; + uint64_t mr_wtto:1; + uint64_t mr_wabt:1; + uint64_t tr_wabt:1; + } cn30xx; + struct cvmx_pci_int_sum_cn31xx { + uint64_t reserved_34_63:30; + uint64_t ill_rd:1; + uint64_t ill_wr:1; + uint64_t win_wr:1; + uint64_t dma1_fi:1; + uint64_t dma0_fi:1; + uint64_t dtime1:1; + uint64_t dtime0:1; + uint64_t dcnt1:1; + uint64_t dcnt0:1; + uint64_t reserved_23_24:2; + uint64_t ptime1:1; + uint64_t ptime0:1; + uint64_t reserved_19_20:2; + uint64_t pcnt1:1; + uint64_t pcnt0:1; + uint64_t rsl_int:1; + uint64_t ill_rrd:1; + uint64_t ill_rwr:1; + uint64_t dperr:1; + uint64_t aperr:1; + uint64_t serr:1; + uint64_t tsr_abt:1; + uint64_t msc_msg:1; + uint64_t msi_mabt:1; + uint64_t msi_tabt:1; + uint64_t msi_per:1; + uint64_t mr_tto:1; + uint64_t mr_abt:1; + uint64_t tr_abt:1; + uint64_t mr_wtto:1; + uint64_t mr_wabt:1; + uint64_t tr_wabt:1; + } cn31xx; + struct cvmx_pci_int_sum_s cn38xx; + struct cvmx_pci_int_sum_s cn38xxp2; + struct cvmx_pci_int_sum_cn31xx cn50xx; + struct cvmx_pci_int_sum_s cn58xx; + struct cvmx_pci_int_sum_s cn58xxp1; +}; + +union cvmx_pci_int_sum2 { + uint64_t u64; + struct cvmx_pci_int_sum2_s { + uint64_t reserved_34_63:30; + uint64_t ill_rd:1; + uint64_t ill_wr:1; + uint64_t win_wr:1; + uint64_t dma1_fi:1; + uint64_t dma0_fi:1; + uint64_t dtime1:1; + uint64_t dtime0:1; + uint64_t dcnt1:1; + uint64_t dcnt0:1; + uint64_t ptime3:1; + uint64_t ptime2:1; + uint64_t ptime1:1; + uint64_t ptime0:1; + uint64_t pcnt3:1; + uint64_t pcnt2:1; + uint64_t pcnt1:1; + uint64_t pcnt0:1; + uint64_t rsl_int:1; + uint64_t ill_rrd:1; + uint64_t ill_rwr:1; + uint64_t dperr:1; + uint64_t aperr:1; + uint64_t serr:1; + uint64_t tsr_abt:1; + uint64_t msc_msg:1; + uint64_t msi_mabt:1; + uint64_t msi_tabt:1; + uint64_t msi_per:1; + uint64_t mr_tto:1; + uint64_t mr_abt:1; + uint64_t tr_abt:1; + uint64_t mr_wtto:1; + uint64_t mr_wabt:1; + uint64_t tr_wabt:1; + } s; + struct cvmx_pci_int_sum2_cn30xx { + uint64_t reserved_34_63:30; + uint64_t ill_rd:1; + uint64_t ill_wr:1; + uint64_t win_wr:1; + uint64_t dma1_fi:1; + uint64_t dma0_fi:1; + uint64_t dtime1:1; + uint64_t dtime0:1; + uint64_t dcnt1:1; + uint64_t dcnt0:1; + uint64_t reserved_22_24:3; + uint64_t ptime0:1; + uint64_t reserved_18_20:3; + uint64_t pcnt0:1; + uint64_t rsl_int:1; + uint64_t ill_rrd:1; + uint64_t ill_rwr:1; + uint64_t dperr:1; + uint64_t aperr:1; + uint64_t serr:1; + uint64_t tsr_abt:1; + uint64_t msc_msg:1; + uint64_t msi_mabt:1; + uint64_t msi_tabt:1; + uint64_t msi_per:1; + uint64_t mr_tto:1; + uint64_t mr_abt:1; + uint64_t tr_abt:1; + uint64_t mr_wtto:1; + uint64_t mr_wabt:1; + uint64_t tr_wabt:1; + } cn30xx; + struct cvmx_pci_int_sum2_cn31xx { + uint64_t reserved_34_63:30; + uint64_t ill_rd:1; + uint64_t ill_wr:1; + uint64_t win_wr:1; + uint64_t dma1_fi:1; + uint64_t dma0_fi:1; + uint64_t dtime1:1; + uint64_t dtime0:1; + uint64_t dcnt1:1; + uint64_t dcnt0:1; + uint64_t reserved_23_24:2; + uint64_t ptime1:1; + uint64_t ptime0:1; + uint64_t reserved_19_20:2; + uint64_t pcnt1:1; + uint64_t pcnt0:1; + uint64_t rsl_int:1; + uint64_t ill_rrd:1; + uint64_t ill_rwr:1; + uint64_t dperr:1; + uint64_t aperr:1; + uint64_t serr:1; + uint64_t tsr_abt:1; + uint64_t msc_msg:1; + uint64_t msi_mabt:1; + uint64_t msi_tabt:1; + uint64_t msi_per:1; + uint64_t mr_tto:1; + uint64_t mr_abt:1; + uint64_t tr_abt:1; + uint64_t mr_wtto:1; + uint64_t mr_wabt:1; + uint64_t tr_wabt:1; + } cn31xx; + struct cvmx_pci_int_sum2_s cn38xx; + struct cvmx_pci_int_sum2_s cn38xxp2; + struct cvmx_pci_int_sum2_cn31xx cn50xx; + struct cvmx_pci_int_sum2_s cn58xx; + struct cvmx_pci_int_sum2_s cn58xxp1; +}; + +union cvmx_pci_msi_rcv { + uint32_t u32; + struct cvmx_pci_msi_rcv_s { + uint32_t reserved_6_31:26; + uint32_t intr:6; + } s; + struct cvmx_pci_msi_rcv_s cn30xx; + struct cvmx_pci_msi_rcv_s cn31xx; + struct cvmx_pci_msi_rcv_s cn38xx; + struct cvmx_pci_msi_rcv_s cn38xxp2; + struct cvmx_pci_msi_rcv_s cn50xx; + struct cvmx_pci_msi_rcv_s cn58xx; + struct cvmx_pci_msi_rcv_s cn58xxp1; +}; + +union cvmx_pci_pkt_creditsx { + uint32_t u32; + struct cvmx_pci_pkt_creditsx_s { + uint32_t pkt_cnt:16; + uint32_t ptr_cnt:16; + } s; + struct cvmx_pci_pkt_creditsx_s cn30xx; + struct cvmx_pci_pkt_creditsx_s cn31xx; + struct cvmx_pci_pkt_creditsx_s cn38xx; + struct cvmx_pci_pkt_creditsx_s cn38xxp2; + struct cvmx_pci_pkt_creditsx_s cn50xx; + struct cvmx_pci_pkt_creditsx_s cn58xx; + struct cvmx_pci_pkt_creditsx_s cn58xxp1; +}; + +union cvmx_pci_pkts_sentx { + uint32_t u32; + struct cvmx_pci_pkts_sentx_s { + uint32_t pkt_cnt:32; + } s; + struct cvmx_pci_pkts_sentx_s cn30xx; + struct cvmx_pci_pkts_sentx_s cn31xx; + struct cvmx_pci_pkts_sentx_s cn38xx; + struct cvmx_pci_pkts_sentx_s cn38xxp2; + struct cvmx_pci_pkts_sentx_s cn50xx; + struct cvmx_pci_pkts_sentx_s cn58xx; + struct cvmx_pci_pkts_sentx_s cn58xxp1; +}; + +union cvmx_pci_pkts_sent_int_levx { + uint32_t u32; + struct cvmx_pci_pkts_sent_int_levx_s { + uint32_t pkt_cnt:32; + } s; + struct cvmx_pci_pkts_sent_int_levx_s cn30xx; + struct cvmx_pci_pkts_sent_int_levx_s cn31xx; + struct cvmx_pci_pkts_sent_int_levx_s cn38xx; + struct cvmx_pci_pkts_sent_int_levx_s cn38xxp2; + struct cvmx_pci_pkts_sent_int_levx_s cn50xx; + struct cvmx_pci_pkts_sent_int_levx_s cn58xx; + struct cvmx_pci_pkts_sent_int_levx_s cn58xxp1; +}; + +union cvmx_pci_pkts_sent_timex { + uint32_t u32; + struct cvmx_pci_pkts_sent_timex_s { + uint32_t pkt_time:32; + } s; + struct cvmx_pci_pkts_sent_timex_s cn30xx; + struct cvmx_pci_pkts_sent_timex_s cn31xx; + struct cvmx_pci_pkts_sent_timex_s cn38xx; + struct cvmx_pci_pkts_sent_timex_s cn38xxp2; + struct cvmx_pci_pkts_sent_timex_s cn50xx; + struct cvmx_pci_pkts_sent_timex_s cn58xx; + struct cvmx_pci_pkts_sent_timex_s cn58xxp1; +}; + +union cvmx_pci_read_cmd_6 { + uint32_t u32; + struct cvmx_pci_read_cmd_6_s { + uint32_t reserved_9_31:23; + uint32_t min_data:6; + uint32_t prefetch:3; + } s; + struct cvmx_pci_read_cmd_6_s cn30xx; + struct cvmx_pci_read_cmd_6_s cn31xx; + struct cvmx_pci_read_cmd_6_s cn38xx; + struct cvmx_pci_read_cmd_6_s cn38xxp2; + struct cvmx_pci_read_cmd_6_s cn50xx; + struct cvmx_pci_read_cmd_6_s cn58xx; + struct cvmx_pci_read_cmd_6_s cn58xxp1; +}; + +union cvmx_pci_read_cmd_c { + uint32_t u32; + struct cvmx_pci_read_cmd_c_s { + uint32_t reserved_9_31:23; + uint32_t min_data:6; + uint32_t prefetch:3; + } s; + struct cvmx_pci_read_cmd_c_s cn30xx; + struct cvmx_pci_read_cmd_c_s cn31xx; + struct cvmx_pci_read_cmd_c_s cn38xx; + struct cvmx_pci_read_cmd_c_s cn38xxp2; + struct cvmx_pci_read_cmd_c_s cn50xx; + struct cvmx_pci_read_cmd_c_s cn58xx; + struct cvmx_pci_read_cmd_c_s cn58xxp1; +}; + +union cvmx_pci_read_cmd_e { + uint32_t u32; + struct cvmx_pci_read_cmd_e_s { + uint32_t reserved_9_31:23; + uint32_t min_data:6; + uint32_t prefetch:3; + } s; + struct cvmx_pci_read_cmd_e_s cn30xx; + struct cvmx_pci_read_cmd_e_s cn31xx; + struct cvmx_pci_read_cmd_e_s cn38xx; + struct cvmx_pci_read_cmd_e_s cn38xxp2; + struct cvmx_pci_read_cmd_e_s cn50xx; + struct cvmx_pci_read_cmd_e_s cn58xx; + struct cvmx_pci_read_cmd_e_s cn58xxp1; +}; + +union cvmx_pci_read_timeout { + uint64_t u64; + struct cvmx_pci_read_timeout_s { + uint64_t reserved_32_63:32; + uint64_t enb:1; + uint64_t cnt:31; + } s; + struct cvmx_pci_read_timeout_s cn30xx; + struct cvmx_pci_read_timeout_s cn31xx; + struct cvmx_pci_read_timeout_s cn38xx; + struct cvmx_pci_read_timeout_s cn38xxp2; + struct cvmx_pci_read_timeout_s cn50xx; + struct cvmx_pci_read_timeout_s cn58xx; + struct cvmx_pci_read_timeout_s cn58xxp1; +}; + +union cvmx_pci_scm_reg { + uint64_t u64; + struct cvmx_pci_scm_reg_s { + uint64_t reserved_32_63:32; + uint64_t scm:32; + } s; + struct cvmx_pci_scm_reg_s cn30xx; + struct cvmx_pci_scm_reg_s cn31xx; + struct cvmx_pci_scm_reg_s cn38xx; + struct cvmx_pci_scm_reg_s cn38xxp2; + struct cvmx_pci_scm_reg_s cn50xx; + struct cvmx_pci_scm_reg_s cn58xx; + struct cvmx_pci_scm_reg_s cn58xxp1; +}; + +union cvmx_pci_tsr_reg { + uint64_t u64; + struct cvmx_pci_tsr_reg_s { + uint64_t reserved_36_63:28; + uint64_t tsr:36; + } s; + struct cvmx_pci_tsr_reg_s cn30xx; + struct cvmx_pci_tsr_reg_s cn31xx; + struct cvmx_pci_tsr_reg_s cn38xx; + struct cvmx_pci_tsr_reg_s cn38xxp2; + struct cvmx_pci_tsr_reg_s cn50xx; + struct cvmx_pci_tsr_reg_s cn58xx; + struct cvmx_pci_tsr_reg_s cn58xxp1; +}; + +union cvmx_pci_win_rd_addr { + uint64_t u64; + struct cvmx_pci_win_rd_addr_s { + uint64_t reserved_49_63:15; + uint64_t iobit:1; + uint64_t reserved_0_47:48; + } s; + struct cvmx_pci_win_rd_addr_cn30xx { + uint64_t reserved_49_63:15; + uint64_t iobit:1; + uint64_t rd_addr:46; + uint64_t reserved_0_1:2; + } cn30xx; + struct cvmx_pci_win_rd_addr_cn30xx cn31xx; + struct cvmx_pci_win_rd_addr_cn38xx { + uint64_t reserved_49_63:15; + uint64_t iobit:1; + uint64_t rd_addr:45; + uint64_t reserved_0_2:3; + } cn38xx; + struct cvmx_pci_win_rd_addr_cn38xx cn38xxp2; + struct cvmx_pci_win_rd_addr_cn30xx cn50xx; + struct cvmx_pci_win_rd_addr_cn38xx cn58xx; + struct cvmx_pci_win_rd_addr_cn38xx cn58xxp1; +}; + +union cvmx_pci_win_rd_data { + uint64_t u64; + struct cvmx_pci_win_rd_data_s { + uint64_t rd_data:64; + } s; + struct cvmx_pci_win_rd_data_s cn30xx; + struct cvmx_pci_win_rd_data_s cn31xx; + struct cvmx_pci_win_rd_data_s cn38xx; + struct cvmx_pci_win_rd_data_s cn38xxp2; + struct cvmx_pci_win_rd_data_s cn50xx; + struct cvmx_pci_win_rd_data_s cn58xx; + struct cvmx_pci_win_rd_data_s cn58xxp1; +}; + +union cvmx_pci_win_wr_addr { + uint64_t u64; + struct cvmx_pci_win_wr_addr_s { + uint64_t reserved_49_63:15; + uint64_t iobit:1; + uint64_t wr_addr:45; + uint64_t reserved_0_2:3; + } s; + struct cvmx_pci_win_wr_addr_s cn30xx; + struct cvmx_pci_win_wr_addr_s cn31xx; + struct cvmx_pci_win_wr_addr_s cn38xx; + struct cvmx_pci_win_wr_addr_s cn38xxp2; + struct cvmx_pci_win_wr_addr_s cn50xx; + struct cvmx_pci_win_wr_addr_s cn58xx; + struct cvmx_pci_win_wr_addr_s cn58xxp1; +}; + +union cvmx_pci_win_wr_data { + uint64_t u64; + struct cvmx_pci_win_wr_data_s { + uint64_t wr_data:64; + } s; + struct cvmx_pci_win_wr_data_s cn30xx; + struct cvmx_pci_win_wr_data_s cn31xx; + struct cvmx_pci_win_wr_data_s cn38xx; + struct cvmx_pci_win_wr_data_s cn38xxp2; + struct cvmx_pci_win_wr_data_s cn50xx; + struct cvmx_pci_win_wr_data_s cn58xx; + struct cvmx_pci_win_wr_data_s cn58xxp1; +}; + +union cvmx_pci_win_wr_mask { + uint64_t u64; + struct cvmx_pci_win_wr_mask_s { + uint64_t reserved_8_63:56; + uint64_t wr_mask:8; + } s; + struct cvmx_pci_win_wr_mask_s cn30xx; + struct cvmx_pci_win_wr_mask_s cn31xx; + struct cvmx_pci_win_wr_mask_s cn38xx; + struct cvmx_pci_win_wr_mask_s cn38xxp2; + struct cvmx_pci_win_wr_mask_s cn50xx; + struct cvmx_pci_win_wr_mask_s cn58xx; + struct cvmx_pci_win_wr_mask_s cn58xxp1; +}; + +#endif diff --git a/arch/mips/include/asm/octeon/cvmx-pcieep-defs.h b/arch/mips/include/asm/octeon/cvmx-pcieep-defs.h new file mode 100644 index 00000000000..d553f8e88df --- /dev/null +++ b/arch/mips/include/asm/octeon/cvmx-pcieep-defs.h @@ -0,0 +1,1365 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +#ifndef __CVMX_PCIEEP_DEFS_H__ +#define __CVMX_PCIEEP_DEFS_H__ + +#define CVMX_PCIEEP_CFG000 \ + (0x0000000000000000ull) +#define CVMX_PCIEEP_CFG001 \ + (0x0000000000000004ull) +#define CVMX_PCIEEP_CFG002 \ + (0x0000000000000008ull) +#define CVMX_PCIEEP_CFG003 \ + (0x000000000000000Cull) +#define CVMX_PCIEEP_CFG004 \ + (0x0000000000000010ull) +#define CVMX_PCIEEP_CFG004_MASK \ + (0x0000000080000010ull) +#define CVMX_PCIEEP_CFG005 \ + (0x0000000000000014ull) +#define CVMX_PCIEEP_CFG005_MASK \ + (0x0000000080000014ull) +#define CVMX_PCIEEP_CFG006 \ + (0x0000000000000018ull) +#define CVMX_PCIEEP_CFG006_MASK \ + (0x0000000080000018ull) +#define CVMX_PCIEEP_CFG007 \ + (0x000000000000001Cull) +#define CVMX_PCIEEP_CFG007_MASK \ + (0x000000008000001Cull) +#define CVMX_PCIEEP_CFG008 \ + (0x0000000000000020ull) +#define CVMX_PCIEEP_CFG008_MASK \ + (0x0000000080000020ull) +#define CVMX_PCIEEP_CFG009 \ + (0x0000000000000024ull) +#define CVMX_PCIEEP_CFG009_MASK \ + (0x0000000080000024ull) +#define CVMX_PCIEEP_CFG010 \ + (0x0000000000000028ull) +#define CVMX_PCIEEP_CFG011 \ + (0x000000000000002Cull) +#define CVMX_PCIEEP_CFG012 \ + (0x0000000000000030ull) +#define CVMX_PCIEEP_CFG012_MASK \ + (0x0000000080000030ull) +#define CVMX_PCIEEP_CFG013 \ + (0x0000000000000034ull) +#define CVMX_PCIEEP_CFG015 \ + (0x000000000000003Cull) +#define CVMX_PCIEEP_CFG016 \ + (0x0000000000000040ull) +#define CVMX_PCIEEP_CFG017 \ + (0x0000000000000044ull) +#define CVMX_PCIEEP_CFG020 \ + (0x0000000000000050ull) +#define CVMX_PCIEEP_CFG021 \ + (0x0000000000000054ull) +#define CVMX_PCIEEP_CFG022 \ + (0x0000000000000058ull) +#define CVMX_PCIEEP_CFG023 \ + (0x000000000000005Cull) +#define CVMX_PCIEEP_CFG028 \ + (0x0000000000000070ull) +#define CVMX_PCIEEP_CFG029 \ + (0x0000000000000074ull) +#define CVMX_PCIEEP_CFG030 \ + (0x0000000000000078ull) +#define CVMX_PCIEEP_CFG031 \ + (0x000000000000007Cull) +#define CVMX_PCIEEP_CFG032 \ + (0x0000000000000080ull) +#define CVMX_PCIEEP_CFG033 \ + (0x0000000000000084ull) +#define CVMX_PCIEEP_CFG034 \ + (0x0000000000000088ull) +#define CVMX_PCIEEP_CFG037 \ + (0x0000000000000094ull) +#define CVMX_PCIEEP_CFG038 \ + (0x0000000000000098ull) +#define CVMX_PCIEEP_CFG039 \ + (0x000000000000009Cull) +#define CVMX_PCIEEP_CFG040 \ + (0x00000000000000A0ull) +#define CVMX_PCIEEP_CFG041 \ + (0x00000000000000A4ull) +#define CVMX_PCIEEP_CFG042 \ + (0x00000000000000A8ull) +#define CVMX_PCIEEP_CFG064 \ + (0x0000000000000100ull) +#define CVMX_PCIEEP_CFG065 \ + (0x0000000000000104ull) +#define CVMX_PCIEEP_CFG066 \ + (0x0000000000000108ull) +#define CVMX_PCIEEP_CFG067 \ + (0x000000000000010Cull) +#define CVMX_PCIEEP_CFG068 \ + (0x0000000000000110ull) +#define CVMX_PCIEEP_CFG069 \ + (0x0000000000000114ull) +#define CVMX_PCIEEP_CFG070 \ + (0x0000000000000118ull) +#define CVMX_PCIEEP_CFG071 \ + (0x000000000000011Cull) +#define CVMX_PCIEEP_CFG072 \ + (0x0000000000000120ull) +#define CVMX_PCIEEP_CFG073 \ + (0x0000000000000124ull) +#define CVMX_PCIEEP_CFG074 \ + (0x0000000000000128ull) +#define CVMX_PCIEEP_CFG448 \ + (0x0000000000000700ull) +#define CVMX_PCIEEP_CFG449 \ + (0x0000000000000704ull) +#define CVMX_PCIEEP_CFG450 \ + (0x0000000000000708ull) +#define CVMX_PCIEEP_CFG451 \ + (0x000000000000070Cull) +#define CVMX_PCIEEP_CFG452 \ + (0x0000000000000710ull) +#define CVMX_PCIEEP_CFG453 \ + (0x0000000000000714ull) +#define CVMX_PCIEEP_CFG454 \ + (0x0000000000000718ull) +#define CVMX_PCIEEP_CFG455 \ + (0x000000000000071Cull) +#define CVMX_PCIEEP_CFG456 \ + (0x0000000000000720ull) +#define CVMX_PCIEEP_CFG458 \ + (0x0000000000000728ull) +#define CVMX_PCIEEP_CFG459 \ + (0x000000000000072Cull) +#define CVMX_PCIEEP_CFG460 \ + (0x0000000000000730ull) +#define CVMX_PCIEEP_CFG461 \ + (0x0000000000000734ull) +#define CVMX_PCIEEP_CFG462 \ + (0x0000000000000738ull) +#define CVMX_PCIEEP_CFG463 \ + (0x000000000000073Cull) +#define CVMX_PCIEEP_CFG464 \ + (0x0000000000000740ull) +#define CVMX_PCIEEP_CFG465 \ + (0x0000000000000744ull) +#define CVMX_PCIEEP_CFG466 \ + (0x0000000000000748ull) +#define CVMX_PCIEEP_CFG467 \ + (0x000000000000074Cull) +#define CVMX_PCIEEP_CFG468 \ + (0x0000000000000750ull) +#define CVMX_PCIEEP_CFG490 \ + (0x00000000000007A8ull) +#define CVMX_PCIEEP_CFG491 \ + (0x00000000000007ACull) +#define CVMX_PCIEEP_CFG492 \ + (0x00000000000007B0ull) +#define CVMX_PCIEEP_CFG516 \ + (0x0000000000000810ull) +#define CVMX_PCIEEP_CFG517 \ + (0x0000000000000814ull) + +union cvmx_pcieep_cfg000 { + uint32_t u32; + struct cvmx_pcieep_cfg000_s { + uint32_t devid:16; + uint32_t vendid:16; + } s; + struct cvmx_pcieep_cfg000_s cn52xx; + struct cvmx_pcieep_cfg000_s cn52xxp1; + struct cvmx_pcieep_cfg000_s cn56xx; + struct cvmx_pcieep_cfg000_s cn56xxp1; +}; + +union cvmx_pcieep_cfg001 { + uint32_t u32; + struct cvmx_pcieep_cfg001_s { + uint32_t dpe:1; + uint32_t sse:1; + uint32_t rma:1; + uint32_t rta:1; + uint32_t sta:1; + uint32_t devt:2; + uint32_t mdpe:1; + uint32_t fbb:1; + uint32_t reserved_22_22:1; + uint32_t m66:1; + uint32_t cl:1; + uint32_t i_stat:1; + uint32_t reserved_11_18:8; + uint32_t i_dis:1; + uint32_t fbbe:1; + uint32_t see:1; + uint32_t ids_wcc:1; + uint32_t per:1; + uint32_t vps:1; + uint32_t mwice:1; + uint32_t scse:1; + uint32_t me:1; + uint32_t msae:1; + uint32_t isae:1; + } s; + struct cvmx_pcieep_cfg001_s cn52xx; + struct cvmx_pcieep_cfg001_s cn52xxp1; + struct cvmx_pcieep_cfg001_s cn56xx; + struct cvmx_pcieep_cfg001_s cn56xxp1; +}; + +union cvmx_pcieep_cfg002 { + uint32_t u32; + struct cvmx_pcieep_cfg002_s { + uint32_t bcc:8; + uint32_t sc:8; + uint32_t pi:8; + uint32_t rid:8; + } s; + struct cvmx_pcieep_cfg002_s cn52xx; + struct cvmx_pcieep_cfg002_s cn52xxp1; + struct cvmx_pcieep_cfg002_s cn56xx; + struct cvmx_pcieep_cfg002_s cn56xxp1; +}; + +union cvmx_pcieep_cfg003 { + uint32_t u32; + struct cvmx_pcieep_cfg003_s { + uint32_t bist:8; + uint32_t mfd:1; + uint32_t chf:7; + uint32_t lt:8; + uint32_t cls:8; + } s; + struct cvmx_pcieep_cfg003_s cn52xx; + struct cvmx_pcieep_cfg003_s cn52xxp1; + struct cvmx_pcieep_cfg003_s cn56xx; + struct cvmx_pcieep_cfg003_s cn56xxp1; +}; + +union cvmx_pcieep_cfg004 { + uint32_t u32; + struct cvmx_pcieep_cfg004_s { + uint32_t lbab:18; + uint32_t reserved_4_13:10; + uint32_t pf:1; + uint32_t typ:2; + uint32_t mspc:1; + } s; + struct cvmx_pcieep_cfg004_s cn52xx; + struct cvmx_pcieep_cfg004_s cn52xxp1; + struct cvmx_pcieep_cfg004_s cn56xx; + struct cvmx_pcieep_cfg004_s cn56xxp1; +}; + +union cvmx_pcieep_cfg004_mask { + uint32_t u32; + struct cvmx_pcieep_cfg004_mask_s { + uint32_t lmask:31; + uint32_t enb:1; + } s; + struct cvmx_pcieep_cfg004_mask_s cn52xx; + struct cvmx_pcieep_cfg004_mask_s cn52xxp1; + struct cvmx_pcieep_cfg004_mask_s cn56xx; + struct cvmx_pcieep_cfg004_mask_s cn56xxp1; +}; + +union cvmx_pcieep_cfg005 { + uint32_t u32; + struct cvmx_pcieep_cfg005_s { + uint32_t ubab:32; + } s; + struct cvmx_pcieep_cfg005_s cn52xx; + struct cvmx_pcieep_cfg005_s cn52xxp1; + struct cvmx_pcieep_cfg005_s cn56xx; + struct cvmx_pcieep_cfg005_s cn56xxp1; +}; + +union cvmx_pcieep_cfg005_mask { + uint32_t u32; + struct cvmx_pcieep_cfg005_mask_s { + uint32_t umask:32; + } s; + struct cvmx_pcieep_cfg005_mask_s cn52xx; + struct cvmx_pcieep_cfg005_mask_s cn52xxp1; + struct cvmx_pcieep_cfg005_mask_s cn56xx; + struct cvmx_pcieep_cfg005_mask_s cn56xxp1; +}; + +union cvmx_pcieep_cfg006 { + uint32_t u32; + struct cvmx_pcieep_cfg006_s { + uint32_t lbab:6; + uint32_t reserved_4_25:22; + uint32_t pf:1; + uint32_t typ:2; + uint32_t mspc:1; + } s; + struct cvmx_pcieep_cfg006_s cn52xx; + struct cvmx_pcieep_cfg006_s cn52xxp1; + struct cvmx_pcieep_cfg006_s cn56xx; + struct cvmx_pcieep_cfg006_s cn56xxp1; +}; + +union cvmx_pcieep_cfg006_mask { + uint32_t u32; + struct cvmx_pcieep_cfg006_mask_s { + uint32_t lmask:31; + uint32_t enb:1; + } s; + struct cvmx_pcieep_cfg006_mask_s cn52xx; + struct cvmx_pcieep_cfg006_mask_s cn52xxp1; + struct cvmx_pcieep_cfg006_mask_s cn56xx; + struct cvmx_pcieep_cfg006_mask_s cn56xxp1; +}; + +union cvmx_pcieep_cfg007 { + uint32_t u32; + struct cvmx_pcieep_cfg007_s { + uint32_t ubab:32; + } s; + struct cvmx_pcieep_cfg007_s cn52xx; + struct cvmx_pcieep_cfg007_s cn52xxp1; + struct cvmx_pcieep_cfg007_s cn56xx; + struct cvmx_pcieep_cfg007_s cn56xxp1; +}; + +union cvmx_pcieep_cfg007_mask { + uint32_t u32; + struct cvmx_pcieep_cfg007_mask_s { + uint32_t umask:32; + } s; + struct cvmx_pcieep_cfg007_mask_s cn52xx; + struct cvmx_pcieep_cfg007_mask_s cn52xxp1; + struct cvmx_pcieep_cfg007_mask_s cn56xx; + struct cvmx_pcieep_cfg007_mask_s cn56xxp1; +}; + +union cvmx_pcieep_cfg008 { + uint32_t u32; + struct cvmx_pcieep_cfg008_s { + uint32_t reserved_4_31:28; + uint32_t pf:1; + uint32_t typ:2; + uint32_t mspc:1; + } s; + struct cvmx_pcieep_cfg008_s cn52xx; + struct cvmx_pcieep_cfg008_s cn52xxp1; + struct cvmx_pcieep_cfg008_s cn56xx; + struct cvmx_pcieep_cfg008_s cn56xxp1; +}; + +union cvmx_pcieep_cfg008_mask { + uint32_t u32; + struct cvmx_pcieep_cfg008_mask_s { + uint32_t lmask:31; + uint32_t enb:1; + } s; + struct cvmx_pcieep_cfg008_mask_s cn52xx; + struct cvmx_pcieep_cfg008_mask_s cn52xxp1; + struct cvmx_pcieep_cfg008_mask_s cn56xx; + struct cvmx_pcieep_cfg008_mask_s cn56xxp1; +}; + +union cvmx_pcieep_cfg009 { + uint32_t u32; + struct cvmx_pcieep_cfg009_s { + uint32_t ubab:25; + uint32_t reserved_0_6:7; + } s; + struct cvmx_pcieep_cfg009_s cn52xx; + struct cvmx_pcieep_cfg009_s cn52xxp1; + struct cvmx_pcieep_cfg009_s cn56xx; + struct cvmx_pcieep_cfg009_s cn56xxp1; +}; + +union cvmx_pcieep_cfg009_mask { + uint32_t u32; + struct cvmx_pcieep_cfg009_mask_s { + uint32_t umask:32; + } s; + struct cvmx_pcieep_cfg009_mask_s cn52xx; + struct cvmx_pcieep_cfg009_mask_s cn52xxp1; + struct cvmx_pcieep_cfg009_mask_s cn56xx; + struct cvmx_pcieep_cfg009_mask_s cn56xxp1; +}; + +union cvmx_pcieep_cfg010 { + uint32_t u32; + struct cvmx_pcieep_cfg010_s { + uint32_t cisp:32; + } s; + struct cvmx_pcieep_cfg010_s cn52xx; + struct cvmx_pcieep_cfg010_s cn52xxp1; + struct cvmx_pcieep_cfg010_s cn56xx; + struct cvmx_pcieep_cfg010_s cn56xxp1; +}; + +union cvmx_pcieep_cfg011 { + uint32_t u32; + struct cvmx_pcieep_cfg011_s { + uint32_t ssid:16; + uint32_t ssvid:16; + } s; + struct cvmx_pcieep_cfg011_s cn52xx; + struct cvmx_pcieep_cfg011_s cn52xxp1; + struct cvmx_pcieep_cfg011_s cn56xx; + struct cvmx_pcieep_cfg011_s cn56xxp1; +}; + +union cvmx_pcieep_cfg012 { + uint32_t u32; + struct cvmx_pcieep_cfg012_s { + uint32_t eraddr:16; + uint32_t reserved_1_15:15; + uint32_t er_en:1; + } s; + struct cvmx_pcieep_cfg012_s cn52xx; + struct cvmx_pcieep_cfg012_s cn52xxp1; + struct cvmx_pcieep_cfg012_s cn56xx; + struct cvmx_pcieep_cfg012_s cn56xxp1; +}; + +union cvmx_pcieep_cfg012_mask { + uint32_t u32; + struct cvmx_pcieep_cfg012_mask_s { + uint32_t mask:31; + uint32_t enb:1; + } s; + struct cvmx_pcieep_cfg012_mask_s cn52xx; + struct cvmx_pcieep_cfg012_mask_s cn52xxp1; + struct cvmx_pcieep_cfg012_mask_s cn56xx; + struct cvmx_pcieep_cfg012_mask_s cn56xxp1; +}; + +union cvmx_pcieep_cfg013 { + uint32_t u32; + struct cvmx_pcieep_cfg013_s { + uint32_t reserved_8_31:24; + uint32_t cp:8; + } s; + struct cvmx_pcieep_cfg013_s cn52xx; + struct cvmx_pcieep_cfg013_s cn52xxp1; + struct cvmx_pcieep_cfg013_s cn56xx; + struct cvmx_pcieep_cfg013_s cn56xxp1; +}; + +union cvmx_pcieep_cfg015 { + uint32_t u32; + struct cvmx_pcieep_cfg015_s { + uint32_t ml:8; + uint32_t mg:8; + uint32_t inta:8; + uint32_t il:8; + } s; + struct cvmx_pcieep_cfg015_s cn52xx; + struct cvmx_pcieep_cfg015_s cn52xxp1; + struct cvmx_pcieep_cfg015_s cn56xx; + struct cvmx_pcieep_cfg015_s cn56xxp1; +}; + +union cvmx_pcieep_cfg016 { + uint32_t u32; + struct cvmx_pcieep_cfg016_s { + uint32_t pmes:5; + uint32_t d2s:1; + uint32_t d1s:1; + uint32_t auxc:3; + uint32_t dsi:1; + uint32_t reserved_20_20:1; + uint32_t pme_clock:1; + uint32_t pmsv:3; + uint32_t ncp:8; + uint32_t pmcid:8; + } s; + struct cvmx_pcieep_cfg016_s cn52xx; + struct cvmx_pcieep_cfg016_s cn52xxp1; + struct cvmx_pcieep_cfg016_s cn56xx; + struct cvmx_pcieep_cfg016_s cn56xxp1; +}; + +union cvmx_pcieep_cfg017 { + uint32_t u32; + struct cvmx_pcieep_cfg017_s { + uint32_t pmdia:8; + uint32_t bpccee:1; + uint32_t bd3h:1; + uint32_t reserved_16_21:6; + uint32_t pmess:1; + uint32_t pmedsia:2; + uint32_t pmds:4; + uint32_t pmeens:1; + uint32_t reserved_4_7:4; + uint32_t nsr:1; + uint32_t reserved_2_2:1; + uint32_t ps:2; + } s; + struct cvmx_pcieep_cfg017_s cn52xx; + struct cvmx_pcieep_cfg017_s cn52xxp1; + struct cvmx_pcieep_cfg017_s cn56xx; + struct cvmx_pcieep_cfg017_s cn56xxp1; +}; + +union cvmx_pcieep_cfg020 { + uint32_t u32; + struct cvmx_pcieep_cfg020_s { + uint32_t reserved_24_31:8; + uint32_t m64:1; + uint32_t mme:3; + uint32_t mmc:3; + uint32_t msien:1; + uint32_t ncp:8; + uint32_t msicid:8; + } s; + struct cvmx_pcieep_cfg020_s cn52xx; + struct cvmx_pcieep_cfg020_s cn52xxp1; + struct cvmx_pcieep_cfg020_s cn56xx; + struct cvmx_pcieep_cfg020_s cn56xxp1; +}; + +union cvmx_pcieep_cfg021 { + uint32_t u32; + struct cvmx_pcieep_cfg021_s { + uint32_t lmsi:30; + uint32_t reserved_0_1:2; + } s; + struct cvmx_pcieep_cfg021_s cn52xx; + struct cvmx_pcieep_cfg021_s cn52xxp1; + struct cvmx_pcieep_cfg021_s cn56xx; + struct cvmx_pcieep_cfg021_s cn56xxp1; +}; + +union cvmx_pcieep_cfg022 { + uint32_t u32; + struct cvmx_pcieep_cfg022_s { + uint32_t umsi:32; + } s; + struct cvmx_pcieep_cfg022_s cn52xx; + struct cvmx_pcieep_cfg022_s cn52xxp1; + struct cvmx_pcieep_cfg022_s cn56xx; + struct cvmx_pcieep_cfg022_s cn56xxp1; +}; + +union cvmx_pcieep_cfg023 { + uint32_t u32; + struct cvmx_pcieep_cfg023_s { + uint32_t reserved_16_31:16; + uint32_t msimd:16; + } s; + struct cvmx_pcieep_cfg023_s cn52xx; + struct cvmx_pcieep_cfg023_s cn52xxp1; + struct cvmx_pcieep_cfg023_s cn56xx; + struct cvmx_pcieep_cfg023_s cn56xxp1; +}; + +union cvmx_pcieep_cfg028 { + uint32_t u32; + struct cvmx_pcieep_cfg028_s { + uint32_t reserved_30_31:2; + uint32_t imn:5; + uint32_t si:1; + uint32_t dpt:4; + uint32_t pciecv:4; + uint32_t ncp:8; + uint32_t pcieid:8; + } s; + struct cvmx_pcieep_cfg028_s cn52xx; + struct cvmx_pcieep_cfg028_s cn52xxp1; + struct cvmx_pcieep_cfg028_s cn56xx; + struct cvmx_pcieep_cfg028_s cn56xxp1; +}; + +union cvmx_pcieep_cfg029 { + uint32_t u32; + struct cvmx_pcieep_cfg029_s { + uint32_t reserved_28_31:4; + uint32_t cspls:2; + uint32_t csplv:8; + uint32_t reserved_16_17:2; + uint32_t rber:1; + uint32_t reserved_12_14:3; + uint32_t el1al:3; + uint32_t el0al:3; + uint32_t etfs:1; + uint32_t pfs:2; + uint32_t mpss:3; + } s; + struct cvmx_pcieep_cfg029_s cn52xx; + struct cvmx_pcieep_cfg029_s cn52xxp1; + struct cvmx_pcieep_cfg029_s cn56xx; + struct cvmx_pcieep_cfg029_s cn56xxp1; +}; + +union cvmx_pcieep_cfg030 { + uint32_t u32; + struct cvmx_pcieep_cfg030_s { + uint32_t reserved_22_31:10; + uint32_t tp:1; + uint32_t ap_d:1; + uint32_t ur_d:1; + uint32_t fe_d:1; + uint32_t nfe_d:1; + uint32_t ce_d:1; + uint32_t reserved_15_15:1; + uint32_t mrrs:3; + uint32_t ns_en:1; + uint32_t ap_en:1; + uint32_t pf_en:1; + uint32_t etf_en:1; + uint32_t mps:3; + uint32_t ro_en:1; + uint32_t ur_en:1; + uint32_t fe_en:1; + uint32_t nfe_en:1; + uint32_t ce_en:1; + } s; + struct cvmx_pcieep_cfg030_s cn52xx; + struct cvmx_pcieep_cfg030_s cn52xxp1; + struct cvmx_pcieep_cfg030_s cn56xx; + struct cvmx_pcieep_cfg030_s cn56xxp1; +}; + +union cvmx_pcieep_cfg031 { + uint32_t u32; + struct cvmx_pcieep_cfg031_s { + uint32_t pnum:8; + uint32_t reserved_22_23:2; + uint32_t lbnc:1; + uint32_t dllarc:1; + uint32_t sderc:1; + uint32_t cpm:1; + uint32_t l1el:3; + uint32_t l0el:3; + uint32_t aslpms:2; + uint32_t mlw:6; + uint32_t mls:4; + } s; + struct cvmx_pcieep_cfg031_s cn52xx; + struct cvmx_pcieep_cfg031_s cn52xxp1; + struct cvmx_pcieep_cfg031_s cn56xx; + struct cvmx_pcieep_cfg031_s cn56xxp1; +}; + +union cvmx_pcieep_cfg032 { + uint32_t u32; + struct cvmx_pcieep_cfg032_s { + uint32_t reserved_30_31:2; + uint32_t dlla:1; + uint32_t scc:1; + uint32_t lt:1; + uint32_t reserved_26_26:1; + uint32_t nlw:6; + uint32_t ls:4; + uint32_t reserved_10_15:6; + uint32_t hawd:1; + uint32_t ecpm:1; + uint32_t es:1; + uint32_t ccc:1; + uint32_t rl:1; + uint32_t ld:1; + uint32_t rcb:1; + uint32_t reserved_2_2:1; + uint32_t aslpc:2; + } s; + struct cvmx_pcieep_cfg032_s cn52xx; + struct cvmx_pcieep_cfg032_s cn52xxp1; + struct cvmx_pcieep_cfg032_s cn56xx; + struct cvmx_pcieep_cfg032_s cn56xxp1; +}; + +union cvmx_pcieep_cfg033 { + uint32_t u32; + struct cvmx_pcieep_cfg033_s { + uint32_t ps_num:13; + uint32_t nccs:1; + uint32_t emip:1; + uint32_t sp_ls:2; + uint32_t sp_lv:8; + uint32_t hp_c:1; + uint32_t hp_s:1; + uint32_t pip:1; + uint32_t aip:1; + uint32_t mrlsp:1; + uint32_t pcp:1; + uint32_t abp:1; + } s; + struct cvmx_pcieep_cfg033_s cn52xx; + struct cvmx_pcieep_cfg033_s cn52xxp1; + struct cvmx_pcieep_cfg033_s cn56xx; + struct cvmx_pcieep_cfg033_s cn56xxp1; +}; + +union cvmx_pcieep_cfg034 { + uint32_t u32; + struct cvmx_pcieep_cfg034_s { + uint32_t reserved_25_31:7; + uint32_t dlls_c:1; + uint32_t emis:1; + uint32_t pds:1; + uint32_t mrlss:1; + uint32_t ccint_d:1; + uint32_t pd_c:1; + uint32_t mrls_c:1; + uint32_t pf_d:1; + uint32_t abp_d:1; + uint32_t reserved_13_15:3; + uint32_t dlls_en:1; + uint32_t emic:1; + uint32_t pcc:1; + uint32_t pic:2; + uint32_t aic:2; + uint32_t hpint_en:1; + uint32_t ccint_en:1; + uint32_t pd_en:1; + uint32_t mrls_en:1; + uint32_t pf_en:1; + uint32_t abp_en:1; + } s; + struct cvmx_pcieep_cfg034_s cn52xx; + struct cvmx_pcieep_cfg034_s cn52xxp1; + struct cvmx_pcieep_cfg034_s cn56xx; + struct cvmx_pcieep_cfg034_s cn56xxp1; +}; + +union cvmx_pcieep_cfg037 { + uint32_t u32; + struct cvmx_pcieep_cfg037_s { + uint32_t reserved_5_31:27; + uint32_t ctds:1; + uint32_t ctrs:4; + } s; + struct cvmx_pcieep_cfg037_s cn52xx; + struct cvmx_pcieep_cfg037_s cn52xxp1; + struct cvmx_pcieep_cfg037_s cn56xx; + struct cvmx_pcieep_cfg037_s cn56xxp1; +}; + +union cvmx_pcieep_cfg038 { + uint32_t u32; + struct cvmx_pcieep_cfg038_s { + uint32_t reserved_5_31:27; + uint32_t ctd:1; + uint32_t ctv:4; + } s; + struct cvmx_pcieep_cfg038_s cn52xx; + struct cvmx_pcieep_cfg038_s cn52xxp1; + struct cvmx_pcieep_cfg038_s cn56xx; + struct cvmx_pcieep_cfg038_s cn56xxp1; +}; + +union cvmx_pcieep_cfg039 { + uint32_t u32; + struct cvmx_pcieep_cfg039_s { + uint32_t reserved_0_31:32; + } s; + struct cvmx_pcieep_cfg039_s cn52xx; + struct cvmx_pcieep_cfg039_s cn52xxp1; + struct cvmx_pcieep_cfg039_s cn56xx; + struct cvmx_pcieep_cfg039_s cn56xxp1; +}; + +union cvmx_pcieep_cfg040 { + uint32_t u32; + struct cvmx_pcieep_cfg040_s { + uint32_t reserved_0_31:32; + } s; + struct cvmx_pcieep_cfg040_s cn52xx; + struct cvmx_pcieep_cfg040_s cn52xxp1; + struct cvmx_pcieep_cfg040_s cn56xx; + struct cvmx_pcieep_cfg040_s cn56xxp1; +}; + +union cvmx_pcieep_cfg041 { + uint32_t u32; + struct cvmx_pcieep_cfg041_s { + uint32_t reserved_0_31:32; + } s; + struct cvmx_pcieep_cfg041_s cn52xx; + struct cvmx_pcieep_cfg041_s cn52xxp1; + struct cvmx_pcieep_cfg041_s cn56xx; + struct cvmx_pcieep_cfg041_s cn56xxp1; +}; + +union cvmx_pcieep_cfg042 { + uint32_t u32; + struct cvmx_pcieep_cfg042_s { + uint32_t reserved_0_31:32; + } s; + struct cvmx_pcieep_cfg042_s cn52xx; + struct cvmx_pcieep_cfg042_s cn52xxp1; + struct cvmx_pcieep_cfg042_s cn56xx; + struct cvmx_pcieep_cfg042_s cn56xxp1; +}; + +union cvmx_pcieep_cfg064 { + uint32_t u32; + struct cvmx_pcieep_cfg064_s { + uint32_t nco:12; + uint32_t cv:4; + uint32_t pcieec:16; + } s; + struct cvmx_pcieep_cfg064_s cn52xx; + struct cvmx_pcieep_cfg064_s cn52xxp1; + struct cvmx_pcieep_cfg064_s cn56xx; + struct cvmx_pcieep_cfg064_s cn56xxp1; +}; + +union cvmx_pcieep_cfg065 { + uint32_t u32; + struct cvmx_pcieep_cfg065_s { + uint32_t reserved_21_31:11; + uint32_t ures:1; + uint32_t ecrces:1; + uint32_t mtlps:1; + uint32_t ros:1; + uint32_t ucs:1; + uint32_t cas:1; + uint32_t cts:1; + uint32_t fcpes:1; + uint32_t ptlps:1; + uint32_t reserved_6_11:6; + uint32_t sdes:1; + uint32_t dlpes:1; + uint32_t reserved_0_3:4; + } s; + struct cvmx_pcieep_cfg065_s cn52xx; + struct cvmx_pcieep_cfg065_s cn52xxp1; + struct cvmx_pcieep_cfg065_s cn56xx; + struct cvmx_pcieep_cfg065_s cn56xxp1; +}; + +union cvmx_pcieep_cfg066 { + uint32_t u32; + struct cvmx_pcieep_cfg066_s { + uint32_t reserved_21_31:11; + uint32_t urem:1; + uint32_t ecrcem:1; + uint32_t mtlpm:1; + uint32_t rom:1; + uint32_t ucm:1; + uint32_t cam:1; + uint32_t ctm:1; + uint32_t fcpem:1; + uint32_t ptlpm:1; + uint32_t reserved_6_11:6; + uint32_t sdem:1; + uint32_t dlpem:1; + uint32_t reserved_0_3:4; + } s; + struct cvmx_pcieep_cfg066_s cn52xx; + struct cvmx_pcieep_cfg066_s cn52xxp1; + struct cvmx_pcieep_cfg066_s cn56xx; + struct cvmx_pcieep_cfg066_s cn56xxp1; +}; + +union cvmx_pcieep_cfg067 { + uint32_t u32; + struct cvmx_pcieep_cfg067_s { + uint32_t reserved_21_31:11; + uint32_t ures:1; + uint32_t ecrces:1; + uint32_t mtlps:1; + uint32_t ros:1; + uint32_t ucs:1; + uint32_t cas:1; + uint32_t cts:1; + uint32_t fcpes:1; + uint32_t ptlps:1; + uint32_t reserved_6_11:6; + uint32_t sdes:1; + uint32_t dlpes:1; + uint32_t reserved_0_3:4; + } s; + struct cvmx_pcieep_cfg067_s cn52xx; + struct cvmx_pcieep_cfg067_s cn52xxp1; + struct cvmx_pcieep_cfg067_s cn56xx; + struct cvmx_pcieep_cfg067_s cn56xxp1; +}; + +union cvmx_pcieep_cfg068 { + uint32_t u32; + struct cvmx_pcieep_cfg068_s { + uint32_t reserved_14_31:18; + uint32_t anfes:1; + uint32_t rtts:1; + uint32_t reserved_9_11:3; + uint32_t rnrs:1; + uint32_t bdllps:1; + uint32_t btlps:1; + uint32_t reserved_1_5:5; + uint32_t res:1; + } s; + struct cvmx_pcieep_cfg068_s cn52xx; + struct cvmx_pcieep_cfg068_s cn52xxp1; + struct cvmx_pcieep_cfg068_s cn56xx; + struct cvmx_pcieep_cfg068_s cn56xxp1; +}; + +union cvmx_pcieep_cfg069 { + uint32_t u32; + struct cvmx_pcieep_cfg069_s { + uint32_t reserved_14_31:18; + uint32_t anfem:1; + uint32_t rttm:1; + uint32_t reserved_9_11:3; + uint32_t rnrm:1; + uint32_t bdllpm:1; + uint32_t btlpm:1; + uint32_t reserved_1_5:5; + uint32_t rem:1; + } s; + struct cvmx_pcieep_cfg069_s cn52xx; + struct cvmx_pcieep_cfg069_s cn52xxp1; + struct cvmx_pcieep_cfg069_s cn56xx; + struct cvmx_pcieep_cfg069_s cn56xxp1; +}; + +union cvmx_pcieep_cfg070 { + uint32_t u32; + struct cvmx_pcieep_cfg070_s { + uint32_t reserved_9_31:23; + uint32_t ce:1; + uint32_t cc:1; + uint32_t ge:1; + uint32_t gc:1; + uint32_t fep:5; + } s; + struct cvmx_pcieep_cfg070_s cn52xx; + struct cvmx_pcieep_cfg070_s cn52xxp1; + struct cvmx_pcieep_cfg070_s cn56xx; + struct cvmx_pcieep_cfg070_s cn56xxp1; +}; + +union cvmx_pcieep_cfg071 { + uint32_t u32; + struct cvmx_pcieep_cfg071_s { + uint32_t dword1:32; + } s; + struct cvmx_pcieep_cfg071_s cn52xx; + struct cvmx_pcieep_cfg071_s cn52xxp1; + struct cvmx_pcieep_cfg071_s cn56xx; + struct cvmx_pcieep_cfg071_s cn56xxp1; +}; + +union cvmx_pcieep_cfg072 { + uint32_t u32; + struct cvmx_pcieep_cfg072_s { + uint32_t dword2:32; + } s; + struct cvmx_pcieep_cfg072_s cn52xx; + struct cvmx_pcieep_cfg072_s cn52xxp1; + struct cvmx_pcieep_cfg072_s cn56xx; + struct cvmx_pcieep_cfg072_s cn56xxp1; +}; + +union cvmx_pcieep_cfg073 { + uint32_t u32; + struct cvmx_pcieep_cfg073_s { + uint32_t dword3:32; + } s; + struct cvmx_pcieep_cfg073_s cn52xx; + struct cvmx_pcieep_cfg073_s cn52xxp1; + struct cvmx_pcieep_cfg073_s cn56xx; + struct cvmx_pcieep_cfg073_s cn56xxp1; +}; + +union cvmx_pcieep_cfg074 { + uint32_t u32; + struct cvmx_pcieep_cfg074_s { + uint32_t dword4:32; + } s; + struct cvmx_pcieep_cfg074_s cn52xx; + struct cvmx_pcieep_cfg074_s cn52xxp1; + struct cvmx_pcieep_cfg074_s cn56xx; + struct cvmx_pcieep_cfg074_s cn56xxp1; +}; + +union cvmx_pcieep_cfg448 { + uint32_t u32; + struct cvmx_pcieep_cfg448_s { + uint32_t rtl:16; + uint32_t rtltl:16; + } s; + struct cvmx_pcieep_cfg448_s cn52xx; + struct cvmx_pcieep_cfg448_s cn52xxp1; + struct cvmx_pcieep_cfg448_s cn56xx; + struct cvmx_pcieep_cfg448_s cn56xxp1; +}; + +union cvmx_pcieep_cfg449 { + uint32_t u32; + struct cvmx_pcieep_cfg449_s { + uint32_t omr:32; + } s; + struct cvmx_pcieep_cfg449_s cn52xx; + struct cvmx_pcieep_cfg449_s cn52xxp1; + struct cvmx_pcieep_cfg449_s cn56xx; + struct cvmx_pcieep_cfg449_s cn56xxp1; +}; + +union cvmx_pcieep_cfg450 { + uint32_t u32; + struct cvmx_pcieep_cfg450_s { + uint32_t lpec:8; + uint32_t reserved_22_23:2; + uint32_t link_state:6; + uint32_t force_link:1; + uint32_t reserved_8_14:7; + uint32_t link_num:8; + } s; + struct cvmx_pcieep_cfg450_s cn52xx; + struct cvmx_pcieep_cfg450_s cn52xxp1; + struct cvmx_pcieep_cfg450_s cn56xx; + struct cvmx_pcieep_cfg450_s cn56xxp1; +}; + +union cvmx_pcieep_cfg451 { + uint32_t u32; + struct cvmx_pcieep_cfg451_s { + uint32_t reserved_30_31:2; + uint32_t l1el:3; + uint32_t l0el:3; + uint32_t n_fts_cc:8; + uint32_t n_fts:8; + uint32_t ack_freq:8; + } s; + struct cvmx_pcieep_cfg451_s cn52xx; + struct cvmx_pcieep_cfg451_s cn52xxp1; + struct cvmx_pcieep_cfg451_s cn56xx; + struct cvmx_pcieep_cfg451_s cn56xxp1; +}; + +union cvmx_pcieep_cfg452 { + uint32_t u32; + struct cvmx_pcieep_cfg452_s { + uint32_t reserved_26_31:6; + uint32_t eccrc:1; + uint32_t reserved_22_24:3; + uint32_t lme:6; + uint32_t reserved_8_15:8; + uint32_t flm:1; + uint32_t reserved_6_6:1; + uint32_t dllle:1; + uint32_t reserved_4_4:1; + uint32_t ra:1; + uint32_t le:1; + uint32_t sd:1; + uint32_t omr:1; + } s; + struct cvmx_pcieep_cfg452_s cn52xx; + struct cvmx_pcieep_cfg452_s cn52xxp1; + struct cvmx_pcieep_cfg452_s cn56xx; + struct cvmx_pcieep_cfg452_s cn56xxp1; +}; + +union cvmx_pcieep_cfg453 { + uint32_t u32; + struct cvmx_pcieep_cfg453_s { + uint32_t dlld:1; + uint32_t reserved_26_30:5; + uint32_t ack_nak:1; + uint32_t fcd:1; + uint32_t ilst:24; + } s; + struct cvmx_pcieep_cfg453_s cn52xx; + struct cvmx_pcieep_cfg453_s cn52xxp1; + struct cvmx_pcieep_cfg453_s cn56xx; + struct cvmx_pcieep_cfg453_s cn56xxp1; +}; + +union cvmx_pcieep_cfg454 { + uint32_t u32; + struct cvmx_pcieep_cfg454_s { + uint32_t reserved_29_31:3; + uint32_t tmfcwt:5; + uint32_t tmanlt:5; + uint32_t tmrt:5; + uint32_t reserved_11_13:3; + uint32_t nskps:3; + uint32_t reserved_4_7:4; + uint32_t ntss:4; + } s; + struct cvmx_pcieep_cfg454_s cn52xx; + struct cvmx_pcieep_cfg454_s cn52xxp1; + struct cvmx_pcieep_cfg454_s cn56xx; + struct cvmx_pcieep_cfg454_s cn56xxp1; +}; + +union cvmx_pcieep_cfg455 { + uint32_t u32; + struct cvmx_pcieep_cfg455_s { + uint32_t m_cfg0_filt:1; + uint32_t m_io_filt:1; + uint32_t msg_ctrl:1; + uint32_t m_cpl_ecrc_filt:1; + uint32_t m_ecrc_filt:1; + uint32_t m_cpl_len_err:1; + uint32_t m_cpl_attr_err:1; + uint32_t m_cpl_tc_err:1; + uint32_t m_cpl_fun_err:1; + uint32_t m_cpl_rid_err:1; + uint32_t m_cpl_tag_err:1; + uint32_t m_lk_filt:1; + uint32_t m_cfg1_filt:1; + uint32_t m_bar_match:1; + uint32_t m_pois_filt:1; + uint32_t m_fun:1; + uint32_t dfcwt:1; + uint32_t reserved_11_14:4; + uint32_t skpiv:11; + } s; + struct cvmx_pcieep_cfg455_s cn52xx; + struct cvmx_pcieep_cfg455_s cn52xxp1; + struct cvmx_pcieep_cfg455_s cn56xx; + struct cvmx_pcieep_cfg455_s cn56xxp1; +}; + +union cvmx_pcieep_cfg456 { + uint32_t u32; + struct cvmx_pcieep_cfg456_s { + uint32_t reserved_2_31:30; + uint32_t m_vend1_drp:1; + uint32_t m_vend0_drp:1; + } s; + struct cvmx_pcieep_cfg456_s cn52xx; + struct cvmx_pcieep_cfg456_s cn52xxp1; + struct cvmx_pcieep_cfg456_s cn56xx; + struct cvmx_pcieep_cfg456_s cn56xxp1; +}; + +union cvmx_pcieep_cfg458 { + uint32_t u32; + struct cvmx_pcieep_cfg458_s { + uint32_t dbg_info_l32:32; + } s; + struct cvmx_pcieep_cfg458_s cn52xx; + struct cvmx_pcieep_cfg458_s cn52xxp1; + struct cvmx_pcieep_cfg458_s cn56xx; + struct cvmx_pcieep_cfg458_s cn56xxp1; +}; + +union cvmx_pcieep_cfg459 { + uint32_t u32; + struct cvmx_pcieep_cfg459_s { + uint32_t dbg_info_u32:32; + } s; + struct cvmx_pcieep_cfg459_s cn52xx; + struct cvmx_pcieep_cfg459_s cn52xxp1; + struct cvmx_pcieep_cfg459_s cn56xx; + struct cvmx_pcieep_cfg459_s cn56xxp1; +}; + +union cvmx_pcieep_cfg460 { + uint32_t u32; + struct cvmx_pcieep_cfg460_s { + uint32_t reserved_20_31:12; + uint32_t tphfcc:8; + uint32_t tpdfcc:12; + } s; + struct cvmx_pcieep_cfg460_s cn52xx; + struct cvmx_pcieep_cfg460_s cn52xxp1; + struct cvmx_pcieep_cfg460_s cn56xx; + struct cvmx_pcieep_cfg460_s cn56xxp1; +}; + +union cvmx_pcieep_cfg461 { + uint32_t u32; + struct cvmx_pcieep_cfg461_s { + uint32_t reserved_20_31:12; + uint32_t tchfcc:8; + uint32_t tcdfcc:12; + } s; + struct cvmx_pcieep_cfg461_s cn52xx; + struct cvmx_pcieep_cfg461_s cn52xxp1; + struct cvmx_pcieep_cfg461_s cn56xx; + struct cvmx_pcieep_cfg461_s cn56xxp1; +}; + +union cvmx_pcieep_cfg462 { + uint32_t u32; + struct cvmx_pcieep_cfg462_s { + uint32_t reserved_20_31:12; + uint32_t tchfcc:8; + uint32_t tcdfcc:12; + } s; + struct cvmx_pcieep_cfg462_s cn52xx; + struct cvmx_pcieep_cfg462_s cn52xxp1; + struct cvmx_pcieep_cfg462_s cn56xx; + struct cvmx_pcieep_cfg462_s cn56xxp1; +}; + +union cvmx_pcieep_cfg463 { + uint32_t u32; + struct cvmx_pcieep_cfg463_s { + uint32_t reserved_3_31:29; + uint32_t rqne:1; + uint32_t trbne:1; + uint32_t rtlpfccnr:1; + } s; + struct cvmx_pcieep_cfg463_s cn52xx; + struct cvmx_pcieep_cfg463_s cn52xxp1; + struct cvmx_pcieep_cfg463_s cn56xx; + struct cvmx_pcieep_cfg463_s cn56xxp1; +}; + +union cvmx_pcieep_cfg464 { + uint32_t u32; + struct cvmx_pcieep_cfg464_s { + uint32_t wrr_vc3:8; + uint32_t wrr_vc2:8; + uint32_t wrr_vc1:8; + uint32_t wrr_vc0:8; + } s; + struct cvmx_pcieep_cfg464_s cn52xx; + struct cvmx_pcieep_cfg464_s cn52xxp1; + struct cvmx_pcieep_cfg464_s cn56xx; + struct cvmx_pcieep_cfg464_s cn56xxp1; +}; + +union cvmx_pcieep_cfg465 { + uint32_t u32; + struct cvmx_pcieep_cfg465_s { + uint32_t wrr_vc7:8; + uint32_t wrr_vc6:8; + uint32_t wrr_vc5:8; + uint32_t wrr_vc4:8; + } s; + struct cvmx_pcieep_cfg465_s cn52xx; + struct cvmx_pcieep_cfg465_s cn52xxp1; + struct cvmx_pcieep_cfg465_s cn56xx; + struct cvmx_pcieep_cfg465_s cn56xxp1; +}; + +union cvmx_pcieep_cfg466 { + uint32_t u32; + struct cvmx_pcieep_cfg466_s { + uint32_t rx_queue_order:1; + uint32_t type_ordering:1; + uint32_t reserved_24_29:6; + uint32_t queue_mode:3; + uint32_t reserved_20_20:1; + uint32_t header_credits:8; + uint32_t data_credits:12; + } s; + struct cvmx_pcieep_cfg466_s cn52xx; + struct cvmx_pcieep_cfg466_s cn52xxp1; + struct cvmx_pcieep_cfg466_s cn56xx; + struct cvmx_pcieep_cfg466_s cn56xxp1; +}; + +union cvmx_pcieep_cfg467 { + uint32_t u32; + struct cvmx_pcieep_cfg467_s { + uint32_t reserved_24_31:8; + uint32_t queue_mode:3; + uint32_t reserved_20_20:1; + uint32_t header_credits:8; + uint32_t data_credits:12; + } s; + struct cvmx_pcieep_cfg467_s cn52xx; + struct cvmx_pcieep_cfg467_s cn52xxp1; + struct cvmx_pcieep_cfg467_s cn56xx; + struct cvmx_pcieep_cfg467_s cn56xxp1; +}; + +union cvmx_pcieep_cfg468 { + uint32_t u32; + struct cvmx_pcieep_cfg468_s { + uint32_t reserved_24_31:8; + uint32_t queue_mode:3; + uint32_t reserved_20_20:1; + uint32_t header_credits:8; + uint32_t data_credits:12; + } s; + struct cvmx_pcieep_cfg468_s cn52xx; + struct cvmx_pcieep_cfg468_s cn52xxp1; + struct cvmx_pcieep_cfg468_s cn56xx; + struct cvmx_pcieep_cfg468_s cn56xxp1; +}; + +union cvmx_pcieep_cfg490 { + uint32_t u32; + struct cvmx_pcieep_cfg490_s { + uint32_t reserved_26_31:6; + uint32_t header_depth:10; + uint32_t reserved_14_15:2; + uint32_t data_depth:14; + } s; + struct cvmx_pcieep_cfg490_s cn52xx; + struct cvmx_pcieep_cfg490_s cn52xxp1; + struct cvmx_pcieep_cfg490_s cn56xx; + struct cvmx_pcieep_cfg490_s cn56xxp1; +}; + +union cvmx_pcieep_cfg491 { + uint32_t u32; + struct cvmx_pcieep_cfg491_s { + uint32_t reserved_26_31:6; + uint32_t header_depth:10; + uint32_t reserved_14_15:2; + uint32_t data_depth:14; + } s; + struct cvmx_pcieep_cfg491_s cn52xx; + struct cvmx_pcieep_cfg491_s cn52xxp1; + struct cvmx_pcieep_cfg491_s cn56xx; + struct cvmx_pcieep_cfg491_s cn56xxp1; +}; + +union cvmx_pcieep_cfg492 { + uint32_t u32; + struct cvmx_pcieep_cfg492_s { + uint32_t reserved_26_31:6; + uint32_t header_depth:10; + uint32_t reserved_14_15:2; + uint32_t data_depth:14; + } s; + struct cvmx_pcieep_cfg492_s cn52xx; + struct cvmx_pcieep_cfg492_s cn52xxp1; + struct cvmx_pcieep_cfg492_s cn56xx; + struct cvmx_pcieep_cfg492_s cn56xxp1; +}; + +union cvmx_pcieep_cfg516 { + uint32_t u32; + struct cvmx_pcieep_cfg516_s { + uint32_t phy_stat:32; + } s; + struct cvmx_pcieep_cfg516_s cn52xx; + struct cvmx_pcieep_cfg516_s cn52xxp1; + struct cvmx_pcieep_cfg516_s cn56xx; + struct cvmx_pcieep_cfg516_s cn56xxp1; +}; + +union cvmx_pcieep_cfg517 { + uint32_t u32; + struct cvmx_pcieep_cfg517_s { + uint32_t phy_ctrl:32; + } s; + struct cvmx_pcieep_cfg517_s cn52xx; + struct cvmx_pcieep_cfg517_s cn52xxp1; + struct cvmx_pcieep_cfg517_s cn56xx; + struct cvmx_pcieep_cfg517_s cn56xxp1; +}; + +#endif diff --git a/arch/mips/include/asm/octeon/cvmx-pciercx-defs.h b/arch/mips/include/asm/octeon/cvmx-pciercx-defs.h new file mode 100644 index 00000000000..75574c91894 --- /dev/null +++ b/arch/mips/include/asm/octeon/cvmx-pciercx-defs.h @@ -0,0 +1,1397 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +#ifndef __CVMX_PCIERCX_DEFS_H__ +#define __CVMX_PCIERCX_DEFS_H__ + +#define CVMX_PCIERCX_CFG000(offset) \ + (0x0000000000000000ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG001(offset) \ + (0x0000000000000004ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG002(offset) \ + (0x0000000000000008ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG003(offset) \ + (0x000000000000000Cull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG004(offset) \ + (0x0000000000000010ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG005(offset) \ + (0x0000000000000014ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG006(offset) \ + (0x0000000000000018ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG007(offset) \ + (0x000000000000001Cull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG008(offset) \ + (0x0000000000000020ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG009(offset) \ + (0x0000000000000024ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG010(offset) \ + (0x0000000000000028ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG011(offset) \ + (0x000000000000002Cull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG012(offset) \ + (0x0000000000000030ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG013(offset) \ + (0x0000000000000034ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG014(offset) \ + (0x0000000000000038ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG015(offset) \ + (0x000000000000003Cull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG016(offset) \ + (0x0000000000000040ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG017(offset) \ + (0x0000000000000044ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG020(offset) \ + (0x0000000000000050ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG021(offset) \ + (0x0000000000000054ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG022(offset) \ + (0x0000000000000058ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG023(offset) \ + (0x000000000000005Cull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG028(offset) \ + (0x0000000000000070ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG029(offset) \ + (0x0000000000000074ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG030(offset) \ + (0x0000000000000078ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG031(offset) \ + (0x000000000000007Cull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG032(offset) \ + (0x0000000000000080ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG033(offset) \ + (0x0000000000000084ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG034(offset) \ + (0x0000000000000088ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG035(offset) \ + (0x000000000000008Cull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG036(offset) \ + (0x0000000000000090ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG037(offset) \ + (0x0000000000000094ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG038(offset) \ + (0x0000000000000098ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG039(offset) \ + (0x000000000000009Cull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG040(offset) \ + (0x00000000000000A0ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG041(offset) \ + (0x00000000000000A4ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG042(offset) \ + (0x00000000000000A8ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG064(offset) \ + (0x0000000000000100ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG065(offset) \ + (0x0000000000000104ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG066(offset) \ + (0x0000000000000108ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG067(offset) \ + (0x000000000000010Cull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG068(offset) \ + (0x0000000000000110ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG069(offset) \ + (0x0000000000000114ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG070(offset) \ + (0x0000000000000118ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG071(offset) \ + (0x000000000000011Cull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG072(offset) \ + (0x0000000000000120ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG073(offset) \ + (0x0000000000000124ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG074(offset) \ + (0x0000000000000128ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG075(offset) \ + (0x000000000000012Cull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG076(offset) \ + (0x0000000000000130ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG077(offset) \ + (0x0000000000000134ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG448(offset) \ + (0x0000000000000700ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG449(offset) \ + (0x0000000000000704ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG450(offset) \ + (0x0000000000000708ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG451(offset) \ + (0x000000000000070Cull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG452(offset) \ + (0x0000000000000710ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG453(offset) \ + (0x0000000000000714ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG454(offset) \ + (0x0000000000000718ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG455(offset) \ + (0x000000000000071Cull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG456(offset) \ + (0x0000000000000720ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG458(offset) \ + (0x0000000000000728ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG459(offset) \ + (0x000000000000072Cull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG460(offset) \ + (0x0000000000000730ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG461(offset) \ + (0x0000000000000734ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG462(offset) \ + (0x0000000000000738ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG463(offset) \ + (0x000000000000073Cull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG464(offset) \ + (0x0000000000000740ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG465(offset) \ + (0x0000000000000744ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG466(offset) \ + (0x0000000000000748ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG467(offset) \ + (0x000000000000074Cull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG468(offset) \ + (0x0000000000000750ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG490(offset) \ + (0x00000000000007A8ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG491(offset) \ + (0x00000000000007ACull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG492(offset) \ + (0x00000000000007B0ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG516(offset) \ + (0x0000000000000810ull + (((offset) & 1) * 0)) +#define CVMX_PCIERCX_CFG517(offset) \ + (0x0000000000000814ull + (((offset) & 1) * 0)) + +union cvmx_pciercx_cfg000 { + uint32_t u32; + struct cvmx_pciercx_cfg000_s { + uint32_t devid:16; + uint32_t vendid:16; + } s; + struct cvmx_pciercx_cfg000_s cn52xx; + struct cvmx_pciercx_cfg000_s cn52xxp1; + struct cvmx_pciercx_cfg000_s cn56xx; + struct cvmx_pciercx_cfg000_s cn56xxp1; +}; + +union cvmx_pciercx_cfg001 { + uint32_t u32; + struct cvmx_pciercx_cfg001_s { + uint32_t dpe:1; + uint32_t sse:1; + uint32_t rma:1; + uint32_t rta:1; + uint32_t sta:1; + uint32_t devt:2; + uint32_t mdpe:1; + uint32_t fbb:1; + uint32_t reserved_22_22:1; + uint32_t m66:1; + uint32_t cl:1; + uint32_t i_stat:1; + uint32_t reserved_11_18:8; + uint32_t i_dis:1; + uint32_t fbbe:1; + uint32_t see:1; + uint32_t ids_wcc:1; + uint32_t per:1; + uint32_t vps:1; + uint32_t mwice:1; + uint32_t scse:1; + uint32_t me:1; + uint32_t msae:1; + uint32_t isae:1; + } s; + struct cvmx_pciercx_cfg001_s cn52xx; + struct cvmx_pciercx_cfg001_s cn52xxp1; + struct cvmx_pciercx_cfg001_s cn56xx; + struct cvmx_pciercx_cfg001_s cn56xxp1; +}; + +union cvmx_pciercx_cfg002 { + uint32_t u32; + struct cvmx_pciercx_cfg002_s { + uint32_t bcc:8; + uint32_t sc:8; + uint32_t pi:8; + uint32_t rid:8; + } s; + struct cvmx_pciercx_cfg002_s cn52xx; + struct cvmx_pciercx_cfg002_s cn52xxp1; + struct cvmx_pciercx_cfg002_s cn56xx; + struct cvmx_pciercx_cfg002_s cn56xxp1; +}; + +union cvmx_pciercx_cfg003 { + uint32_t u32; + struct cvmx_pciercx_cfg003_s { + uint32_t bist:8; + uint32_t mfd:1; + uint32_t chf:7; + uint32_t lt:8; + uint32_t cls:8; + } s; + struct cvmx_pciercx_cfg003_s cn52xx; + struct cvmx_pciercx_cfg003_s cn52xxp1; + struct cvmx_pciercx_cfg003_s cn56xx; + struct cvmx_pciercx_cfg003_s cn56xxp1; +}; + +union cvmx_pciercx_cfg004 { + uint32_t u32; + struct cvmx_pciercx_cfg004_s { + uint32_t reserved_0_31:32; + } s; + struct cvmx_pciercx_cfg004_s cn52xx; + struct cvmx_pciercx_cfg004_s cn52xxp1; + struct cvmx_pciercx_cfg004_s cn56xx; + struct cvmx_pciercx_cfg004_s cn56xxp1; +}; + +union cvmx_pciercx_cfg005 { + uint32_t u32; + struct cvmx_pciercx_cfg005_s { + uint32_t reserved_0_31:32; + } s; + struct cvmx_pciercx_cfg005_s cn52xx; + struct cvmx_pciercx_cfg005_s cn52xxp1; + struct cvmx_pciercx_cfg005_s cn56xx; + struct cvmx_pciercx_cfg005_s cn56xxp1; +}; + +union cvmx_pciercx_cfg006 { + uint32_t u32; + struct cvmx_pciercx_cfg006_s { + uint32_t slt:8; + uint32_t subbnum:8; + uint32_t sbnum:8; + uint32_t pbnum:8; + } s; + struct cvmx_pciercx_cfg006_s cn52xx; + struct cvmx_pciercx_cfg006_s cn52xxp1; + struct cvmx_pciercx_cfg006_s cn56xx; + struct cvmx_pciercx_cfg006_s cn56xxp1; +}; + +union cvmx_pciercx_cfg007 { + uint32_t u32; + struct cvmx_pciercx_cfg007_s { + uint32_t dpe:1; + uint32_t sse:1; + uint32_t rma:1; + uint32_t rta:1; + uint32_t sta:1; + uint32_t devt:2; + uint32_t mdpe:1; + uint32_t fbb:1; + uint32_t reserved_22_22:1; + uint32_t m66:1; + uint32_t reserved_16_20:5; + uint32_t lio_limi:4; + uint32_t reserved_9_11:3; + uint32_t io32b:1; + uint32_t lio_base:4; + uint32_t reserved_1_3:3; + uint32_t io32a:1; + } s; + struct cvmx_pciercx_cfg007_s cn52xx; + struct cvmx_pciercx_cfg007_s cn52xxp1; + struct cvmx_pciercx_cfg007_s cn56xx; + struct cvmx_pciercx_cfg007_s cn56xxp1; +}; + +union cvmx_pciercx_cfg008 { + uint32_t u32; + struct cvmx_pciercx_cfg008_s { + uint32_t ml_addr:12; + uint32_t reserved_16_19:4; + uint32_t mb_addr:12; + uint32_t reserved_0_3:4; + } s; + struct cvmx_pciercx_cfg008_s cn52xx; + struct cvmx_pciercx_cfg008_s cn52xxp1; + struct cvmx_pciercx_cfg008_s cn56xx; + struct cvmx_pciercx_cfg008_s cn56xxp1; +}; + +union cvmx_pciercx_cfg009 { + uint32_t u32; + struct cvmx_pciercx_cfg009_s { + uint32_t lmem_limit:12; + uint32_t reserved_17_19:3; + uint32_t mem64b:1; + uint32_t lmem_base:12; + uint32_t reserved_1_3:3; + uint32_t mem64a:1; + } s; + struct cvmx_pciercx_cfg009_s cn52xx; + struct cvmx_pciercx_cfg009_s cn52xxp1; + struct cvmx_pciercx_cfg009_s cn56xx; + struct cvmx_pciercx_cfg009_s cn56xxp1; +}; + +union cvmx_pciercx_cfg010 { + uint32_t u32; + struct cvmx_pciercx_cfg010_s { + uint32_t umem_base:32; + } s; + struct cvmx_pciercx_cfg010_s cn52xx; + struct cvmx_pciercx_cfg010_s cn52xxp1; + struct cvmx_pciercx_cfg010_s cn56xx; + struct cvmx_pciercx_cfg010_s cn56xxp1; +}; + +union cvmx_pciercx_cfg011 { + uint32_t u32; + struct cvmx_pciercx_cfg011_s { + uint32_t umem_limit:32; + } s; + struct cvmx_pciercx_cfg011_s cn52xx; + struct cvmx_pciercx_cfg011_s cn52xxp1; + struct cvmx_pciercx_cfg011_s cn56xx; + struct cvmx_pciercx_cfg011_s cn56xxp1; +}; + +union cvmx_pciercx_cfg012 { + uint32_t u32; + struct cvmx_pciercx_cfg012_s { + uint32_t uio_limit:16; + uint32_t uio_base:16; + } s; + struct cvmx_pciercx_cfg012_s cn52xx; + struct cvmx_pciercx_cfg012_s cn52xxp1; + struct cvmx_pciercx_cfg012_s cn56xx; + struct cvmx_pciercx_cfg012_s cn56xxp1; +}; + +union cvmx_pciercx_cfg013 { + uint32_t u32; + struct cvmx_pciercx_cfg013_s { + uint32_t reserved_8_31:24; + uint32_t cp:8; + } s; + struct cvmx_pciercx_cfg013_s cn52xx; + struct cvmx_pciercx_cfg013_s cn52xxp1; + struct cvmx_pciercx_cfg013_s cn56xx; + struct cvmx_pciercx_cfg013_s cn56xxp1; +}; + +union cvmx_pciercx_cfg014 { + uint32_t u32; + struct cvmx_pciercx_cfg014_s { + uint32_t reserved_0_31:32; + } s; + struct cvmx_pciercx_cfg014_s cn52xx; + struct cvmx_pciercx_cfg014_s cn52xxp1; + struct cvmx_pciercx_cfg014_s cn56xx; + struct cvmx_pciercx_cfg014_s cn56xxp1; +}; + +union cvmx_pciercx_cfg015 { + uint32_t u32; + struct cvmx_pciercx_cfg015_s { + uint32_t reserved_28_31:4; + uint32_t dtsees:1; + uint32_t dts:1; + uint32_t sdt:1; + uint32_t pdt:1; + uint32_t fbbe:1; + uint32_t sbrst:1; + uint32_t mam:1; + uint32_t vga16d:1; + uint32_t vgae:1; + uint32_t isae:1; + uint32_t see:1; + uint32_t pere:1; + uint32_t inta:8; + uint32_t il:8; + } s; + struct cvmx_pciercx_cfg015_s cn52xx; + struct cvmx_pciercx_cfg015_s cn52xxp1; + struct cvmx_pciercx_cfg015_s cn56xx; + struct cvmx_pciercx_cfg015_s cn56xxp1; +}; + +union cvmx_pciercx_cfg016 { + uint32_t u32; + struct cvmx_pciercx_cfg016_s { + uint32_t pmes:5; + uint32_t d2s:1; + uint32_t d1s:1; + uint32_t auxc:3; + uint32_t dsi:1; + uint32_t reserved_20_20:1; + uint32_t pme_clock:1; + uint32_t pmsv:3; + uint32_t ncp:8; + uint32_t pmcid:8; + } s; + struct cvmx_pciercx_cfg016_s cn52xx; + struct cvmx_pciercx_cfg016_s cn52xxp1; + struct cvmx_pciercx_cfg016_s cn56xx; + struct cvmx_pciercx_cfg016_s cn56xxp1; +}; + +union cvmx_pciercx_cfg017 { + uint32_t u32; + struct cvmx_pciercx_cfg017_s { + uint32_t pmdia:8; + uint32_t bpccee:1; + uint32_t bd3h:1; + uint32_t reserved_16_21:6; + uint32_t pmess:1; + uint32_t pmedsia:2; + uint32_t pmds:4; + uint32_t pmeens:1; + uint32_t reserved_4_7:4; + uint32_t nsr:1; + uint32_t reserved_2_2:1; + uint32_t ps:2; + } s; + struct cvmx_pciercx_cfg017_s cn52xx; + struct cvmx_pciercx_cfg017_s cn52xxp1; + struct cvmx_pciercx_cfg017_s cn56xx; + struct cvmx_pciercx_cfg017_s cn56xxp1; +}; + +union cvmx_pciercx_cfg020 { + uint32_t u32; + struct cvmx_pciercx_cfg020_s { + uint32_t reserved_24_31:8; + uint32_t m64:1; + uint32_t mme:3; + uint32_t mmc:3; + uint32_t msien:1; + uint32_t ncp:8; + uint32_t msicid:8; + } s; + struct cvmx_pciercx_cfg020_s cn52xx; + struct cvmx_pciercx_cfg020_s cn52xxp1; + struct cvmx_pciercx_cfg020_s cn56xx; + struct cvmx_pciercx_cfg020_s cn56xxp1; +}; + +union cvmx_pciercx_cfg021 { + uint32_t u32; + struct cvmx_pciercx_cfg021_s { + uint32_t lmsi:30; + uint32_t reserved_0_1:2; + } s; + struct cvmx_pciercx_cfg021_s cn52xx; + struct cvmx_pciercx_cfg021_s cn52xxp1; + struct cvmx_pciercx_cfg021_s cn56xx; + struct cvmx_pciercx_cfg021_s cn56xxp1; +}; + +union cvmx_pciercx_cfg022 { + uint32_t u32; + struct cvmx_pciercx_cfg022_s { + uint32_t umsi:32; + } s; + struct cvmx_pciercx_cfg022_s cn52xx; + struct cvmx_pciercx_cfg022_s cn52xxp1; + struct cvmx_pciercx_cfg022_s cn56xx; + struct cvmx_pciercx_cfg022_s cn56xxp1; +}; + +union cvmx_pciercx_cfg023 { + uint32_t u32; + struct cvmx_pciercx_cfg023_s { + uint32_t reserved_16_31:16; + uint32_t msimd:16; + } s; + struct cvmx_pciercx_cfg023_s cn52xx; + struct cvmx_pciercx_cfg023_s cn52xxp1; + struct cvmx_pciercx_cfg023_s cn56xx; + struct cvmx_pciercx_cfg023_s cn56xxp1; +}; + +union cvmx_pciercx_cfg028 { + uint32_t u32; + struct cvmx_pciercx_cfg028_s { + uint32_t reserved_30_31:2; + uint32_t imn:5; + uint32_t si:1; + uint32_t dpt:4; + uint32_t pciecv:4; + uint32_t ncp:8; + uint32_t pcieid:8; + } s; + struct cvmx_pciercx_cfg028_s cn52xx; + struct cvmx_pciercx_cfg028_s cn52xxp1; + struct cvmx_pciercx_cfg028_s cn56xx; + struct cvmx_pciercx_cfg028_s cn56xxp1; +}; + +union cvmx_pciercx_cfg029 { + uint32_t u32; + struct cvmx_pciercx_cfg029_s { + uint32_t reserved_28_31:4; + uint32_t cspls:2; + uint32_t csplv:8; + uint32_t reserved_16_17:2; + uint32_t rber:1; + uint32_t reserved_12_14:3; + uint32_t el1al:3; + uint32_t el0al:3; + uint32_t etfs:1; + uint32_t pfs:2; + uint32_t mpss:3; + } s; + struct cvmx_pciercx_cfg029_s cn52xx; + struct cvmx_pciercx_cfg029_s cn52xxp1; + struct cvmx_pciercx_cfg029_s cn56xx; + struct cvmx_pciercx_cfg029_s cn56xxp1; +}; + +union cvmx_pciercx_cfg030 { + uint32_t u32; + struct cvmx_pciercx_cfg030_s { + uint32_t reserved_22_31:10; + uint32_t tp:1; + uint32_t ap_d:1; + uint32_t ur_d:1; + uint32_t fe_d:1; + uint32_t nfe_d:1; + uint32_t ce_d:1; + uint32_t reserved_15_15:1; + uint32_t mrrs:3; + uint32_t ns_en:1; + uint32_t ap_en:1; + uint32_t pf_en:1; + uint32_t etf_en:1; + uint32_t mps:3; + uint32_t ro_en:1; + uint32_t ur_en:1; + uint32_t fe_en:1; + uint32_t nfe_en:1; + uint32_t ce_en:1; + } s; + struct cvmx_pciercx_cfg030_s cn52xx; + struct cvmx_pciercx_cfg030_s cn52xxp1; + struct cvmx_pciercx_cfg030_s cn56xx; + struct cvmx_pciercx_cfg030_s cn56xxp1; +}; + +union cvmx_pciercx_cfg031 { + uint32_t u32; + struct cvmx_pciercx_cfg031_s { + uint32_t pnum:8; + uint32_t reserved_22_23:2; + uint32_t lbnc:1; + uint32_t dllarc:1; + uint32_t sderc:1; + uint32_t cpm:1; + uint32_t l1el:3; + uint32_t l0el:3; + uint32_t aslpms:2; + uint32_t mlw:6; + uint32_t mls:4; + } s; + struct cvmx_pciercx_cfg031_s cn52xx; + struct cvmx_pciercx_cfg031_s cn52xxp1; + struct cvmx_pciercx_cfg031_s cn56xx; + struct cvmx_pciercx_cfg031_s cn56xxp1; +}; + +union cvmx_pciercx_cfg032 { + uint32_t u32; + struct cvmx_pciercx_cfg032_s { + uint32_t lab:1; + uint32_t lbm:1; + uint32_t dlla:1; + uint32_t scc:1; + uint32_t lt:1; + uint32_t reserved_26_26:1; + uint32_t nlw:6; + uint32_t ls:4; + uint32_t reserved_12_15:4; + uint32_t lab_int_enb:1; + uint32_t lbm_int_enb:1; + uint32_t hawd:1; + uint32_t ecpm:1; + uint32_t es:1; + uint32_t ccc:1; + uint32_t rl:1; + uint32_t ld:1; + uint32_t rcb:1; + uint32_t reserved_2_2:1; + uint32_t aslpc:2; + } s; + struct cvmx_pciercx_cfg032_s cn52xx; + struct cvmx_pciercx_cfg032_s cn52xxp1; + struct cvmx_pciercx_cfg032_s cn56xx; + struct cvmx_pciercx_cfg032_s cn56xxp1; +}; + +union cvmx_pciercx_cfg033 { + uint32_t u32; + struct cvmx_pciercx_cfg033_s { + uint32_t ps_num:13; + uint32_t nccs:1; + uint32_t emip:1; + uint32_t sp_ls:2; + uint32_t sp_lv:8; + uint32_t hp_c:1; + uint32_t hp_s:1; + uint32_t pip:1; + uint32_t aip:1; + uint32_t mrlsp:1; + uint32_t pcp:1; + uint32_t abp:1; + } s; + struct cvmx_pciercx_cfg033_s cn52xx; + struct cvmx_pciercx_cfg033_s cn52xxp1; + struct cvmx_pciercx_cfg033_s cn56xx; + struct cvmx_pciercx_cfg033_s cn56xxp1; +}; + +union cvmx_pciercx_cfg034 { + uint32_t u32; + struct cvmx_pciercx_cfg034_s { + uint32_t reserved_25_31:7; + uint32_t dlls_c:1; + uint32_t emis:1; + uint32_t pds:1; + uint32_t mrlss:1; + uint32_t ccint_d:1; + uint32_t pd_c:1; + uint32_t mrls_c:1; + uint32_t pf_d:1; + uint32_t abp_d:1; + uint32_t reserved_13_15:3; + uint32_t dlls_en:1; + uint32_t emic:1; + uint32_t pcc:1; + uint32_t pic:2; + uint32_t aic:2; + uint32_t hpint_en:1; + uint32_t ccint_en:1; + uint32_t pd_en:1; + uint32_t mrls_en:1; + uint32_t pf_en:1; + uint32_t abp_en:1; + } s; + struct cvmx_pciercx_cfg034_s cn52xx; + struct cvmx_pciercx_cfg034_s cn52xxp1; + struct cvmx_pciercx_cfg034_s cn56xx; + struct cvmx_pciercx_cfg034_s cn56xxp1; +}; + +union cvmx_pciercx_cfg035 { + uint32_t u32; + struct cvmx_pciercx_cfg035_s { + uint32_t reserved_17_31:15; + uint32_t crssv:1; + uint32_t reserved_5_15:11; + uint32_t crssve:1; + uint32_t pmeie:1; + uint32_t sefee:1; + uint32_t senfee:1; + uint32_t secee:1; + } s; + struct cvmx_pciercx_cfg035_s cn52xx; + struct cvmx_pciercx_cfg035_s cn52xxp1; + struct cvmx_pciercx_cfg035_s cn56xx; + struct cvmx_pciercx_cfg035_s cn56xxp1; +}; + +union cvmx_pciercx_cfg036 { + uint32_t u32; + struct cvmx_pciercx_cfg036_s { + uint32_t reserved_18_31:14; + uint32_t pme_pend:1; + uint32_t pme_stat:1; + uint32_t pme_rid:16; + } s; + struct cvmx_pciercx_cfg036_s cn52xx; + struct cvmx_pciercx_cfg036_s cn52xxp1; + struct cvmx_pciercx_cfg036_s cn56xx; + struct cvmx_pciercx_cfg036_s cn56xxp1; +}; + +union cvmx_pciercx_cfg037 { + uint32_t u32; + struct cvmx_pciercx_cfg037_s { + uint32_t reserved_5_31:27; + uint32_t ctds:1; + uint32_t ctrs:4; + } s; + struct cvmx_pciercx_cfg037_s cn52xx; + struct cvmx_pciercx_cfg037_s cn52xxp1; + struct cvmx_pciercx_cfg037_s cn56xx; + struct cvmx_pciercx_cfg037_s cn56xxp1; +}; + +union cvmx_pciercx_cfg038 { + uint32_t u32; + struct cvmx_pciercx_cfg038_s { + uint32_t reserved_5_31:27; + uint32_t ctd:1; + uint32_t ctv:4; + } s; + struct cvmx_pciercx_cfg038_s cn52xx; + struct cvmx_pciercx_cfg038_s cn52xxp1; + struct cvmx_pciercx_cfg038_s cn56xx; + struct cvmx_pciercx_cfg038_s cn56xxp1; +}; + +union cvmx_pciercx_cfg039 { + uint32_t u32; + struct cvmx_pciercx_cfg039_s { + uint32_t reserved_0_31:32; + } s; + struct cvmx_pciercx_cfg039_s cn52xx; + struct cvmx_pciercx_cfg039_s cn52xxp1; + struct cvmx_pciercx_cfg039_s cn56xx; + struct cvmx_pciercx_cfg039_s cn56xxp1; +}; + +union cvmx_pciercx_cfg040 { + uint32_t u32; + struct cvmx_pciercx_cfg040_s { + uint32_t reserved_0_31:32; + } s; + struct cvmx_pciercx_cfg040_s cn52xx; + struct cvmx_pciercx_cfg040_s cn52xxp1; + struct cvmx_pciercx_cfg040_s cn56xx; + struct cvmx_pciercx_cfg040_s cn56xxp1; +}; + +union cvmx_pciercx_cfg041 { + uint32_t u32; + struct cvmx_pciercx_cfg041_s { + uint32_t reserved_0_31:32; + } s; + struct cvmx_pciercx_cfg041_s cn52xx; + struct cvmx_pciercx_cfg041_s cn52xxp1; + struct cvmx_pciercx_cfg041_s cn56xx; + struct cvmx_pciercx_cfg041_s cn56xxp1; +}; + +union cvmx_pciercx_cfg042 { + uint32_t u32; + struct cvmx_pciercx_cfg042_s { + uint32_t reserved_0_31:32; + } s; + struct cvmx_pciercx_cfg042_s cn52xx; + struct cvmx_pciercx_cfg042_s cn52xxp1; + struct cvmx_pciercx_cfg042_s cn56xx; + struct cvmx_pciercx_cfg042_s cn56xxp1; +}; + +union cvmx_pciercx_cfg064 { + uint32_t u32; + struct cvmx_pciercx_cfg064_s { + uint32_t nco:12; + uint32_t cv:4; + uint32_t pcieec:16; + } s; + struct cvmx_pciercx_cfg064_s cn52xx; + struct cvmx_pciercx_cfg064_s cn52xxp1; + struct cvmx_pciercx_cfg064_s cn56xx; + struct cvmx_pciercx_cfg064_s cn56xxp1; +}; + +union cvmx_pciercx_cfg065 { + uint32_t u32; + struct cvmx_pciercx_cfg065_s { + uint32_t reserved_21_31:11; + uint32_t ures:1; + uint32_t ecrces:1; + uint32_t mtlps:1; + uint32_t ros:1; + uint32_t ucs:1; + uint32_t cas:1; + uint32_t cts:1; + uint32_t fcpes:1; + uint32_t ptlps:1; + uint32_t reserved_6_11:6; + uint32_t sdes:1; + uint32_t dlpes:1; + uint32_t reserved_0_3:4; + } s; + struct cvmx_pciercx_cfg065_s cn52xx; + struct cvmx_pciercx_cfg065_s cn52xxp1; + struct cvmx_pciercx_cfg065_s cn56xx; + struct cvmx_pciercx_cfg065_s cn56xxp1; +}; + +union cvmx_pciercx_cfg066 { + uint32_t u32; + struct cvmx_pciercx_cfg066_s { + uint32_t reserved_21_31:11; + uint32_t urem:1; + uint32_t ecrcem:1; + uint32_t mtlpm:1; + uint32_t rom:1; + uint32_t ucm:1; + uint32_t cam:1; + uint32_t ctm:1; + uint32_t fcpem:1; + uint32_t ptlpm:1; + uint32_t reserved_6_11:6; + uint32_t sdem:1; + uint32_t dlpem:1; + uint32_t reserved_0_3:4; + } s; + struct cvmx_pciercx_cfg066_s cn52xx; + struct cvmx_pciercx_cfg066_s cn52xxp1; + struct cvmx_pciercx_cfg066_s cn56xx; + struct cvmx_pciercx_cfg066_s cn56xxp1; +}; + +union cvmx_pciercx_cfg067 { + uint32_t u32; + struct cvmx_pciercx_cfg067_s { + uint32_t reserved_21_31:11; + uint32_t ures:1; + uint32_t ecrces:1; + uint32_t mtlps:1; + uint32_t ros:1; + uint32_t ucs:1; + uint32_t cas:1; + uint32_t cts:1; + uint32_t fcpes:1; + uint32_t ptlps:1; + uint32_t reserved_6_11:6; + uint32_t sdes:1; + uint32_t dlpes:1; + uint32_t reserved_0_3:4; + } s; + struct cvmx_pciercx_cfg067_s cn52xx; + struct cvmx_pciercx_cfg067_s cn52xxp1; + struct cvmx_pciercx_cfg067_s cn56xx; + struct cvmx_pciercx_cfg067_s cn56xxp1; +}; + +union cvmx_pciercx_cfg068 { + uint32_t u32; + struct cvmx_pciercx_cfg068_s { + uint32_t reserved_14_31:18; + uint32_t anfes:1; + uint32_t rtts:1; + uint32_t reserved_9_11:3; + uint32_t rnrs:1; + uint32_t bdllps:1; + uint32_t btlps:1; + uint32_t reserved_1_5:5; + uint32_t res:1; + } s; + struct cvmx_pciercx_cfg068_s cn52xx; + struct cvmx_pciercx_cfg068_s cn52xxp1; + struct cvmx_pciercx_cfg068_s cn56xx; + struct cvmx_pciercx_cfg068_s cn56xxp1; +}; + +union cvmx_pciercx_cfg069 { + uint32_t u32; + struct cvmx_pciercx_cfg069_s { + uint32_t reserved_14_31:18; + uint32_t anfem:1; + uint32_t rttm:1; + uint32_t reserved_9_11:3; + uint32_t rnrm:1; + uint32_t bdllpm:1; + uint32_t btlpm:1; + uint32_t reserved_1_5:5; + uint32_t rem:1; + } s; + struct cvmx_pciercx_cfg069_s cn52xx; + struct cvmx_pciercx_cfg069_s cn52xxp1; + struct cvmx_pciercx_cfg069_s cn56xx; + struct cvmx_pciercx_cfg069_s cn56xxp1; +}; + +union cvmx_pciercx_cfg070 { + uint32_t u32; + struct cvmx_pciercx_cfg070_s { + uint32_t reserved_9_31:23; + uint32_t ce:1; + uint32_t cc:1; + uint32_t ge:1; + uint32_t gc:1; + uint32_t fep:5; + } s; + struct cvmx_pciercx_cfg070_s cn52xx; + struct cvmx_pciercx_cfg070_s cn52xxp1; + struct cvmx_pciercx_cfg070_s cn56xx; + struct cvmx_pciercx_cfg070_s cn56xxp1; +}; + +union cvmx_pciercx_cfg071 { + uint32_t u32; + struct cvmx_pciercx_cfg071_s { + uint32_t dword1:32; + } s; + struct cvmx_pciercx_cfg071_s cn52xx; + struct cvmx_pciercx_cfg071_s cn52xxp1; + struct cvmx_pciercx_cfg071_s cn56xx; + struct cvmx_pciercx_cfg071_s cn56xxp1; +}; + +union cvmx_pciercx_cfg072 { + uint32_t u32; + struct cvmx_pciercx_cfg072_s { + uint32_t dword2:32; + } s; + struct cvmx_pciercx_cfg072_s cn52xx; + struct cvmx_pciercx_cfg072_s cn52xxp1; + struct cvmx_pciercx_cfg072_s cn56xx; + struct cvmx_pciercx_cfg072_s cn56xxp1; +}; + +union cvmx_pciercx_cfg073 { + uint32_t u32; + struct cvmx_pciercx_cfg073_s { + uint32_t dword3:32; + } s; + struct cvmx_pciercx_cfg073_s cn52xx; + struct cvmx_pciercx_cfg073_s cn52xxp1; + struct cvmx_pciercx_cfg073_s cn56xx; + struct cvmx_pciercx_cfg073_s cn56xxp1; +}; + +union cvmx_pciercx_cfg074 { + uint32_t u32; + struct cvmx_pciercx_cfg074_s { + uint32_t dword4:32; + } s; + struct cvmx_pciercx_cfg074_s cn52xx; + struct cvmx_pciercx_cfg074_s cn52xxp1; + struct cvmx_pciercx_cfg074_s cn56xx; + struct cvmx_pciercx_cfg074_s cn56xxp1; +}; + +union cvmx_pciercx_cfg075 { + uint32_t u32; + struct cvmx_pciercx_cfg075_s { + uint32_t reserved_3_31:29; + uint32_t fere:1; + uint32_t nfere:1; + uint32_t cere:1; + } s; + struct cvmx_pciercx_cfg075_s cn52xx; + struct cvmx_pciercx_cfg075_s cn52xxp1; + struct cvmx_pciercx_cfg075_s cn56xx; + struct cvmx_pciercx_cfg075_s cn56xxp1; +}; + +union cvmx_pciercx_cfg076 { + uint32_t u32; + struct cvmx_pciercx_cfg076_s { + uint32_t aeimn:5; + uint32_t reserved_7_26:20; + uint32_t femr:1; + uint32_t nfemr:1; + uint32_t fuf:1; + uint32_t multi_efnfr:1; + uint32_t efnfr:1; + uint32_t multi_ecr:1; + uint32_t ecr:1; + } s; + struct cvmx_pciercx_cfg076_s cn52xx; + struct cvmx_pciercx_cfg076_s cn52xxp1; + struct cvmx_pciercx_cfg076_s cn56xx; + struct cvmx_pciercx_cfg076_s cn56xxp1; +}; + +union cvmx_pciercx_cfg077 { + uint32_t u32; + struct cvmx_pciercx_cfg077_s { + uint32_t efnfsi:16; + uint32_t ecsi:16; + } s; + struct cvmx_pciercx_cfg077_s cn52xx; + struct cvmx_pciercx_cfg077_s cn52xxp1; + struct cvmx_pciercx_cfg077_s cn56xx; + struct cvmx_pciercx_cfg077_s cn56xxp1; +}; + +union cvmx_pciercx_cfg448 { + uint32_t u32; + struct cvmx_pciercx_cfg448_s { + uint32_t rtl:16; + uint32_t rtltl:16; + } s; + struct cvmx_pciercx_cfg448_s cn52xx; + struct cvmx_pciercx_cfg448_s cn52xxp1; + struct cvmx_pciercx_cfg448_s cn56xx; + struct cvmx_pciercx_cfg448_s cn56xxp1; +}; + +union cvmx_pciercx_cfg449 { + uint32_t u32; + struct cvmx_pciercx_cfg449_s { + uint32_t omr:32; + } s; + struct cvmx_pciercx_cfg449_s cn52xx; + struct cvmx_pciercx_cfg449_s cn52xxp1; + struct cvmx_pciercx_cfg449_s cn56xx; + struct cvmx_pciercx_cfg449_s cn56xxp1; +}; + +union cvmx_pciercx_cfg450 { + uint32_t u32; + struct cvmx_pciercx_cfg450_s { + uint32_t lpec:8; + uint32_t reserved_22_23:2; + uint32_t link_state:6; + uint32_t force_link:1; + uint32_t reserved_8_14:7; + uint32_t link_num:8; + } s; + struct cvmx_pciercx_cfg450_s cn52xx; + struct cvmx_pciercx_cfg450_s cn52xxp1; + struct cvmx_pciercx_cfg450_s cn56xx; + struct cvmx_pciercx_cfg450_s cn56xxp1; +}; + +union cvmx_pciercx_cfg451 { + uint32_t u32; + struct cvmx_pciercx_cfg451_s { + uint32_t reserved_30_31:2; + uint32_t l1el:3; + uint32_t l0el:3; + uint32_t n_fts_cc:8; + uint32_t n_fts:8; + uint32_t ack_freq:8; + } s; + struct cvmx_pciercx_cfg451_s cn52xx; + struct cvmx_pciercx_cfg451_s cn52xxp1; + struct cvmx_pciercx_cfg451_s cn56xx; + struct cvmx_pciercx_cfg451_s cn56xxp1; +}; + +union cvmx_pciercx_cfg452 { + uint32_t u32; + struct cvmx_pciercx_cfg452_s { + uint32_t reserved_26_31:6; + uint32_t eccrc:1; + uint32_t reserved_22_24:3; + uint32_t lme:6; + uint32_t reserved_8_15:8; + uint32_t flm:1; + uint32_t reserved_6_6:1; + uint32_t dllle:1; + uint32_t reserved_4_4:1; + uint32_t ra:1; + uint32_t le:1; + uint32_t sd:1; + uint32_t omr:1; + } s; + struct cvmx_pciercx_cfg452_s cn52xx; + struct cvmx_pciercx_cfg452_s cn52xxp1; + struct cvmx_pciercx_cfg452_s cn56xx; + struct cvmx_pciercx_cfg452_s cn56xxp1; +}; + +union cvmx_pciercx_cfg453 { + uint32_t u32; + struct cvmx_pciercx_cfg453_s { + uint32_t dlld:1; + uint32_t reserved_26_30:5; + uint32_t ack_nak:1; + uint32_t fcd:1; + uint32_t ilst:24; + } s; + struct cvmx_pciercx_cfg453_s cn52xx; + struct cvmx_pciercx_cfg453_s cn52xxp1; + struct cvmx_pciercx_cfg453_s cn56xx; + struct cvmx_pciercx_cfg453_s cn56xxp1; +}; + +union cvmx_pciercx_cfg454 { + uint32_t u32; + struct cvmx_pciercx_cfg454_s { + uint32_t reserved_29_31:3; + uint32_t tmfcwt:5; + uint32_t tmanlt:5; + uint32_t tmrt:5; + uint32_t reserved_11_13:3; + uint32_t nskps:3; + uint32_t reserved_4_7:4; + uint32_t ntss:4; + } s; + struct cvmx_pciercx_cfg454_s cn52xx; + struct cvmx_pciercx_cfg454_s cn52xxp1; + struct cvmx_pciercx_cfg454_s cn56xx; + struct cvmx_pciercx_cfg454_s cn56xxp1; +}; + +union cvmx_pciercx_cfg455 { + uint32_t u32; + struct cvmx_pciercx_cfg455_s { + uint32_t m_cfg0_filt:1; + uint32_t m_io_filt:1; + uint32_t msg_ctrl:1; + uint32_t m_cpl_ecrc_filt:1; + uint32_t m_ecrc_filt:1; + uint32_t m_cpl_len_err:1; + uint32_t m_cpl_attr_err:1; + uint32_t m_cpl_tc_err:1; + uint32_t m_cpl_fun_err:1; + uint32_t m_cpl_rid_err:1; + uint32_t m_cpl_tag_err:1; + uint32_t m_lk_filt:1; + uint32_t m_cfg1_filt:1; + uint32_t m_bar_match:1; + uint32_t m_pois_filt:1; + uint32_t m_fun:1; + uint32_t dfcwt:1; + uint32_t reserved_11_14:4; + uint32_t skpiv:11; + } s; + struct cvmx_pciercx_cfg455_s cn52xx; + struct cvmx_pciercx_cfg455_s cn52xxp1; + struct cvmx_pciercx_cfg455_s cn56xx; + struct cvmx_pciercx_cfg455_s cn56xxp1; +}; + +union cvmx_pciercx_cfg456 { + uint32_t u32; + struct cvmx_pciercx_cfg456_s { + uint32_t reserved_2_31:30; + uint32_t m_vend1_drp:1; + uint32_t m_vend0_drp:1; + } s; + struct cvmx_pciercx_cfg456_s cn52xx; + struct cvmx_pciercx_cfg456_s cn52xxp1; + struct cvmx_pciercx_cfg456_s cn56xx; + struct cvmx_pciercx_cfg456_s cn56xxp1; +}; + +union cvmx_pciercx_cfg458 { + uint32_t u32; + struct cvmx_pciercx_cfg458_s { + uint32_t dbg_info_l32:32; + } s; + struct cvmx_pciercx_cfg458_s cn52xx; + struct cvmx_pciercx_cfg458_s cn52xxp1; + struct cvmx_pciercx_cfg458_s cn56xx; + struct cvmx_pciercx_cfg458_s cn56xxp1; +}; + +union cvmx_pciercx_cfg459 { + uint32_t u32; + struct cvmx_pciercx_cfg459_s { + uint32_t dbg_info_u32:32; + } s; + struct cvmx_pciercx_cfg459_s cn52xx; + struct cvmx_pciercx_cfg459_s cn52xxp1; + struct cvmx_pciercx_cfg459_s cn56xx; + struct cvmx_pciercx_cfg459_s cn56xxp1; +}; + +union cvmx_pciercx_cfg460 { + uint32_t u32; + struct cvmx_pciercx_cfg460_s { + uint32_t reserved_20_31:12; + uint32_t tphfcc:8; + uint32_t tpdfcc:12; + } s; + struct cvmx_pciercx_cfg460_s cn52xx; + struct cvmx_pciercx_cfg460_s cn52xxp1; + struct cvmx_pciercx_cfg460_s cn56xx; + struct cvmx_pciercx_cfg460_s cn56xxp1; +}; + +union cvmx_pciercx_cfg461 { + uint32_t u32; + struct cvmx_pciercx_cfg461_s { + uint32_t reserved_20_31:12; + uint32_t tchfcc:8; + uint32_t tcdfcc:12; + } s; + struct cvmx_pciercx_cfg461_s cn52xx; + struct cvmx_pciercx_cfg461_s cn52xxp1; + struct cvmx_pciercx_cfg461_s cn56xx; + struct cvmx_pciercx_cfg461_s cn56xxp1; +}; + +union cvmx_pciercx_cfg462 { + uint32_t u32; + struct cvmx_pciercx_cfg462_s { + uint32_t reserved_20_31:12; + uint32_t tchfcc:8; + uint32_t tcdfcc:12; + } s; + struct cvmx_pciercx_cfg462_s cn52xx; + struct cvmx_pciercx_cfg462_s cn52xxp1; + struct cvmx_pciercx_cfg462_s cn56xx; + struct cvmx_pciercx_cfg462_s cn56xxp1; +}; + +union cvmx_pciercx_cfg463 { + uint32_t u32; + struct cvmx_pciercx_cfg463_s { + uint32_t reserved_3_31:29; + uint32_t rqne:1; + uint32_t trbne:1; + uint32_t rtlpfccnr:1; + } s; + struct cvmx_pciercx_cfg463_s cn52xx; + struct cvmx_pciercx_cfg463_s cn52xxp1; + struct cvmx_pciercx_cfg463_s cn56xx; + struct cvmx_pciercx_cfg463_s cn56xxp1; +}; + +union cvmx_pciercx_cfg464 { + uint32_t u32; + struct cvmx_pciercx_cfg464_s { + uint32_t wrr_vc3:8; + uint32_t wrr_vc2:8; + uint32_t wrr_vc1:8; + uint32_t wrr_vc0:8; + } s; + struct cvmx_pciercx_cfg464_s cn52xx; + struct cvmx_pciercx_cfg464_s cn52xxp1; + struct cvmx_pciercx_cfg464_s cn56xx; + struct cvmx_pciercx_cfg464_s cn56xxp1; +}; + +union cvmx_pciercx_cfg465 { + uint32_t u32; + struct cvmx_pciercx_cfg465_s { + uint32_t wrr_vc7:8; + uint32_t wrr_vc6:8; + uint32_t wrr_vc5:8; + uint32_t wrr_vc4:8; + } s; + struct cvmx_pciercx_cfg465_s cn52xx; + struct cvmx_pciercx_cfg465_s cn52xxp1; + struct cvmx_pciercx_cfg465_s cn56xx; + struct cvmx_pciercx_cfg465_s cn56xxp1; +}; + +union cvmx_pciercx_cfg466 { + uint32_t u32; + struct cvmx_pciercx_cfg466_s { + uint32_t rx_queue_order:1; + uint32_t type_ordering:1; + uint32_t reserved_24_29:6; + uint32_t queue_mode:3; + uint32_t reserved_20_20:1; + uint32_t header_credits:8; + uint32_t data_credits:12; + } s; + struct cvmx_pciercx_cfg466_s cn52xx; + struct cvmx_pciercx_cfg466_s cn52xxp1; + struct cvmx_pciercx_cfg466_s cn56xx; + struct cvmx_pciercx_cfg466_s cn56xxp1; +}; + +union cvmx_pciercx_cfg467 { + uint32_t u32; + struct cvmx_pciercx_cfg467_s { + uint32_t reserved_24_31:8; + uint32_t queue_mode:3; + uint32_t reserved_20_20:1; + uint32_t header_credits:8; + uint32_t data_credits:12; + } s; + struct cvmx_pciercx_cfg467_s cn52xx; + struct cvmx_pciercx_cfg467_s cn52xxp1; + struct cvmx_pciercx_cfg467_s cn56xx; + struct cvmx_pciercx_cfg467_s cn56xxp1; +}; + +union cvmx_pciercx_cfg468 { + uint32_t u32; + struct cvmx_pciercx_cfg468_s { + uint32_t reserved_24_31:8; + uint32_t queue_mode:3; + uint32_t reserved_20_20:1; + uint32_t header_credits:8; + uint32_t data_credits:12; + } s; + struct cvmx_pciercx_cfg468_s cn52xx; + struct cvmx_pciercx_cfg468_s cn52xxp1; + struct cvmx_pciercx_cfg468_s cn56xx; + struct cvmx_pciercx_cfg468_s cn56xxp1; +}; + +union cvmx_pciercx_cfg490 { + uint32_t u32; + struct cvmx_pciercx_cfg490_s { + uint32_t reserved_26_31:6; + uint32_t header_depth:10; + uint32_t reserved_14_15:2; + uint32_t data_depth:14; + } s; + struct cvmx_pciercx_cfg490_s cn52xx; + struct cvmx_pciercx_cfg490_s cn52xxp1; + struct cvmx_pciercx_cfg490_s cn56xx; + struct cvmx_pciercx_cfg490_s cn56xxp1; +}; + +union cvmx_pciercx_cfg491 { + uint32_t u32; + struct cvmx_pciercx_cfg491_s { + uint32_t reserved_26_31:6; + uint32_t header_depth:10; + uint32_t reserved_14_15:2; + uint32_t data_depth:14; + } s; + struct cvmx_pciercx_cfg491_s cn52xx; + struct cvmx_pciercx_cfg491_s cn52xxp1; + struct cvmx_pciercx_cfg491_s cn56xx; + struct cvmx_pciercx_cfg491_s cn56xxp1; +}; + +union cvmx_pciercx_cfg492 { + uint32_t u32; + struct cvmx_pciercx_cfg492_s { + uint32_t reserved_26_31:6; + uint32_t header_depth:10; + uint32_t reserved_14_15:2; + uint32_t data_depth:14; + } s; + struct cvmx_pciercx_cfg492_s cn52xx; + struct cvmx_pciercx_cfg492_s cn52xxp1; + struct cvmx_pciercx_cfg492_s cn56xx; + struct cvmx_pciercx_cfg492_s cn56xxp1; +}; + +union cvmx_pciercx_cfg516 { + uint32_t u32; + struct cvmx_pciercx_cfg516_s { + uint32_t phy_stat:32; + } s; + struct cvmx_pciercx_cfg516_s cn52xx; + struct cvmx_pciercx_cfg516_s cn52xxp1; + struct cvmx_pciercx_cfg516_s cn56xx; + struct cvmx_pciercx_cfg516_s cn56xxp1; +}; + +union cvmx_pciercx_cfg517 { + uint32_t u32; + struct cvmx_pciercx_cfg517_s { + uint32_t phy_ctrl:32; + } s; + struct cvmx_pciercx_cfg517_s cn52xx; + struct cvmx_pciercx_cfg517_s cn52xxp1; + struct cvmx_pciercx_cfg517_s cn56xx; + struct cvmx_pciercx_cfg517_s cn56xxp1; +}; + +#endif diff --git a/arch/mips/include/asm/octeon/cvmx-pescx-defs.h b/arch/mips/include/asm/octeon/cvmx-pescx-defs.h new file mode 100644 index 00000000000..f40cfaf8445 --- /dev/null +++ b/arch/mips/include/asm/octeon/cvmx-pescx-defs.h @@ -0,0 +1,410 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +#ifndef __CVMX_PESCX_DEFS_H__ +#define __CVMX_PESCX_DEFS_H__ + +#define CVMX_PESCX_BIST_STATUS(block_id) \ + CVMX_ADD_IO_SEG(0x00011800C8000018ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PESCX_BIST_STATUS2(block_id) \ + CVMX_ADD_IO_SEG(0x00011800C8000418ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PESCX_CFG_RD(block_id) \ + CVMX_ADD_IO_SEG(0x00011800C8000030ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PESCX_CFG_WR(block_id) \ + CVMX_ADD_IO_SEG(0x00011800C8000028ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PESCX_CPL_LUT_VALID(block_id) \ + CVMX_ADD_IO_SEG(0x00011800C8000098ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PESCX_CTL_STATUS(block_id) \ + CVMX_ADD_IO_SEG(0x00011800C8000000ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PESCX_CTL_STATUS2(block_id) \ + CVMX_ADD_IO_SEG(0x00011800C8000400ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PESCX_DBG_INFO(block_id) \ + CVMX_ADD_IO_SEG(0x00011800C8000008ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PESCX_DBG_INFO_EN(block_id) \ + CVMX_ADD_IO_SEG(0x00011800C80000A0ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PESCX_DIAG_STATUS(block_id) \ + CVMX_ADD_IO_SEG(0x00011800C8000020ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PESCX_P2N_BAR0_START(block_id) \ + CVMX_ADD_IO_SEG(0x00011800C8000080ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PESCX_P2N_BAR1_START(block_id) \ + CVMX_ADD_IO_SEG(0x00011800C8000088ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PESCX_P2N_BAR2_START(block_id) \ + CVMX_ADD_IO_SEG(0x00011800C8000090ull + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PESCX_P2P_BARX_END(offset, block_id) \ + CVMX_ADD_IO_SEG(0x00011800C8000048ull + (((offset) & 3) * 16) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PESCX_P2P_BARX_START(offset, block_id) \ + CVMX_ADD_IO_SEG(0x00011800C8000040ull + (((offset) & 3) * 16) + (((block_id) & 1) * 0x8000000ull)) +#define CVMX_PESCX_TLP_CREDITS(block_id) \ + CVMX_ADD_IO_SEG(0x00011800C8000038ull + (((block_id) & 1) * 0x8000000ull)) + +union cvmx_pescx_bist_status { + uint64_t u64; + struct cvmx_pescx_bist_status_s { + uint64_t reserved_13_63:51; + uint64_t rqdata5:1; + uint64_t ctlp_or:1; + uint64_t ntlp_or:1; + uint64_t ptlp_or:1; + uint64_t retry:1; + uint64_t rqdata0:1; + uint64_t rqdata1:1; + uint64_t rqdata2:1; + uint64_t rqdata3:1; + uint64_t rqdata4:1; + uint64_t rqhdr1:1; + uint64_t rqhdr0:1; + uint64_t sot:1; + } s; + struct cvmx_pescx_bist_status_s cn52xx; + struct cvmx_pescx_bist_status_cn52xxp1 { + uint64_t reserved_12_63:52; + uint64_t ctlp_or:1; + uint64_t ntlp_or:1; + uint64_t ptlp_or:1; + uint64_t retry:1; + uint64_t rqdata0:1; + uint64_t rqdata1:1; + uint64_t rqdata2:1; + uint64_t rqdata3:1; + uint64_t rqdata4:1; + uint64_t rqhdr1:1; + uint64_t rqhdr0:1; + uint64_t sot:1; + } cn52xxp1; + struct cvmx_pescx_bist_status_s cn56xx; + struct cvmx_pescx_bist_status_cn52xxp1 cn56xxp1; +}; + +union cvmx_pescx_bist_status2 { + uint64_t u64; + struct cvmx_pescx_bist_status2_s { + uint64_t reserved_14_63:50; + uint64_t cto_p2e:1; + uint64_t e2p_cpl:1; + uint64_t e2p_n:1; + uint64_t e2p_p:1; + uint64_t e2p_rsl:1; + uint64_t dbg_p2e:1; + uint64_t peai_p2e:1; + uint64_t rsl_p2e:1; + uint64_t pef_tpf1:1; + uint64_t pef_tpf0:1; + uint64_t pef_tnf:1; + uint64_t pef_tcf1:1; + uint64_t pef_tc0:1; + uint64_t ppf:1; + } s; + struct cvmx_pescx_bist_status2_s cn52xx; + struct cvmx_pescx_bist_status2_s cn52xxp1; + struct cvmx_pescx_bist_status2_s cn56xx; + struct cvmx_pescx_bist_status2_s cn56xxp1; +}; + +union cvmx_pescx_cfg_rd { + uint64_t u64; + struct cvmx_pescx_cfg_rd_s { + uint64_t data:32; + uint64_t addr:32; + } s; + struct cvmx_pescx_cfg_rd_s cn52xx; + struct cvmx_pescx_cfg_rd_s cn52xxp1; + struct cvmx_pescx_cfg_rd_s cn56xx; + struct cvmx_pescx_cfg_rd_s cn56xxp1; +}; + +union cvmx_pescx_cfg_wr { + uint64_t u64; + struct cvmx_pescx_cfg_wr_s { + uint64_t data:32; + uint64_t addr:32; + } s; + struct cvmx_pescx_cfg_wr_s cn52xx; + struct cvmx_pescx_cfg_wr_s cn52xxp1; + struct cvmx_pescx_cfg_wr_s cn56xx; + struct cvmx_pescx_cfg_wr_s cn56xxp1; +}; + +union cvmx_pescx_cpl_lut_valid { + uint64_t u64; + struct cvmx_pescx_cpl_lut_valid_s { + uint64_t reserved_32_63:32; + uint64_t tag:32; + } s; + struct cvmx_pescx_cpl_lut_valid_s cn52xx; + struct cvmx_pescx_cpl_lut_valid_s cn52xxp1; + struct cvmx_pescx_cpl_lut_valid_s cn56xx; + struct cvmx_pescx_cpl_lut_valid_s cn56xxp1; +}; + +union cvmx_pescx_ctl_status { + uint64_t u64; + struct cvmx_pescx_ctl_status_s { + uint64_t reserved_28_63:36; + uint64_t dnum:5; + uint64_t pbus:8; + uint64_t qlm_cfg:2; + uint64_t lane_swp:1; + uint64_t pm_xtoff:1; + uint64_t pm_xpme:1; + uint64_t ob_p_cmd:1; + uint64_t reserved_7_8:2; + uint64_t nf_ecrc:1; + uint64_t dly_one:1; + uint64_t lnk_enb:1; + uint64_t ro_ctlp:1; + uint64_t reserved_2_2:1; + uint64_t inv_ecrc:1; + uint64_t inv_lcrc:1; + } s; + struct cvmx_pescx_ctl_status_s cn52xx; + struct cvmx_pescx_ctl_status_s cn52xxp1; + struct cvmx_pescx_ctl_status_cn56xx { + uint64_t reserved_28_63:36; + uint64_t dnum:5; + uint64_t pbus:8; + uint64_t qlm_cfg:2; + uint64_t reserved_12_12:1; + uint64_t pm_xtoff:1; + uint64_t pm_xpme:1; + uint64_t ob_p_cmd:1; + uint64_t reserved_7_8:2; + uint64_t nf_ecrc:1; + uint64_t dly_one:1; + uint64_t lnk_enb:1; + uint64_t ro_ctlp:1; + uint64_t reserved_2_2:1; + uint64_t inv_ecrc:1; + uint64_t inv_lcrc:1; + } cn56xx; + struct cvmx_pescx_ctl_status_cn56xx cn56xxp1; +}; + +union cvmx_pescx_ctl_status2 { + uint64_t u64; + struct cvmx_pescx_ctl_status2_s { + uint64_t reserved_2_63:62; + uint64_t pclk_run:1; + uint64_t pcierst:1; + } s; + struct cvmx_pescx_ctl_status2_s cn52xx; + struct cvmx_pescx_ctl_status2_cn52xxp1 { + uint64_t reserved_1_63:63; + uint64_t pcierst:1; + } cn52xxp1; + struct cvmx_pescx_ctl_status2_s cn56xx; + struct cvmx_pescx_ctl_status2_cn52xxp1 cn56xxp1; +}; + +union cvmx_pescx_dbg_info { + uint64_t u64; + struct cvmx_pescx_dbg_info_s { + uint64_t reserved_31_63:33; + uint64_t ecrc_e:1; + uint64_t rawwpp:1; + uint64_t racpp:1; + uint64_t ramtlp:1; + uint64_t rarwdns:1; + uint64_t caar:1; + uint64_t racca:1; + uint64_t racur:1; + uint64_t rauc:1; + uint64_t rqo:1; + uint64_t fcuv:1; + uint64_t rpe:1; + uint64_t fcpvwt:1; + uint64_t dpeoosd:1; + uint64_t rtwdle:1; + uint64_t rdwdle:1; + uint64_t mre:1; + uint64_t rte:1; + uint64_t acto:1; + uint64_t rvdm:1; + uint64_t rumep:1; + uint64_t rptamrc:1; + uint64_t rpmerc:1; + uint64_t rfemrc:1; + uint64_t rnfemrc:1; + uint64_t rcemrc:1; + uint64_t rpoison:1; + uint64_t recrce:1; + uint64_t rtlplle:1; + uint64_t rtlpmal:1; + uint64_t spoison:1; + } s; + struct cvmx_pescx_dbg_info_s cn52xx; + struct cvmx_pescx_dbg_info_s cn52xxp1; + struct cvmx_pescx_dbg_info_s cn56xx; + struct cvmx_pescx_dbg_info_s cn56xxp1; +}; + +union cvmx_pescx_dbg_info_en { + uint64_t u64; + struct cvmx_pescx_dbg_info_en_s { + uint64_t reserved_31_63:33; + uint64_t ecrc_e:1; + uint64_t rawwpp:1; + uint64_t racpp:1; + uint64_t ramtlp:1; + uint64_t rarwdns:1; + uint64_t caar:1; + uint64_t racca:1; + uint64_t racur:1; + uint64_t rauc:1; + uint64_t rqo:1; + uint64_t fcuv:1; + uint64_t rpe:1; + uint64_t fcpvwt:1; + uint64_t dpeoosd:1; + uint64_t rtwdle:1; + uint64_t rdwdle:1; + uint64_t mre:1; + uint64_t rte:1; + uint64_t acto:1; + uint64_t rvdm:1; + uint64_t rumep:1; + uint64_t rptamrc:1; + uint64_t rpmerc:1; + uint64_t rfemrc:1; + uint64_t rnfemrc:1; + uint64_t rcemrc:1; + uint64_t rpoison:1; + uint64_t recrce:1; + uint64_t rtlplle:1; + uint64_t rtlpmal:1; + uint64_t spoison:1; + } s; + struct cvmx_pescx_dbg_info_en_s cn52xx; + struct cvmx_pescx_dbg_info_en_s cn52xxp1; + struct cvmx_pescx_dbg_info_en_s cn56xx; + struct cvmx_pescx_dbg_info_en_s cn56xxp1; +}; + +union cvmx_pescx_diag_status { + uint64_t u64; + struct cvmx_pescx_diag_status_s { + uint64_t reserved_4_63:60; + uint64_t pm_dst:1; + uint64_t pm_stat:1; + uint64_t pm_en:1; + uint64_t aux_en:1; + } s; + struct cvmx_pescx_diag_status_s cn52xx; + struct cvmx_pescx_diag_status_s cn52xxp1; + struct cvmx_pescx_diag_status_s cn56xx; + struct cvmx_pescx_diag_status_s cn56xxp1; +}; + +union cvmx_pescx_p2n_bar0_start { + uint64_t u64; + struct cvmx_pescx_p2n_bar0_start_s { + uint64_t addr:50; + uint64_t reserved_0_13:14; + } s; + struct cvmx_pescx_p2n_bar0_start_s cn52xx; + struct cvmx_pescx_p2n_bar0_start_s cn52xxp1; + struct cvmx_pescx_p2n_bar0_start_s cn56xx; + struct cvmx_pescx_p2n_bar0_start_s cn56xxp1; +}; + +union cvmx_pescx_p2n_bar1_start { + uint64_t u64; + struct cvmx_pescx_p2n_bar1_start_s { + uint64_t addr:38; + uint64_t reserved_0_25:26; + } s; + struct cvmx_pescx_p2n_bar1_start_s cn52xx; + struct cvmx_pescx_p2n_bar1_start_s cn52xxp1; + struct cvmx_pescx_p2n_bar1_start_s cn56xx; + struct cvmx_pescx_p2n_bar1_start_s cn56xxp1; +}; + +union cvmx_pescx_p2n_bar2_start { + uint64_t u64; + struct cvmx_pescx_p2n_bar2_start_s { + uint64_t addr:25; + uint64_t reserved_0_38:39; + } s; + struct cvmx_pescx_p2n_bar2_start_s cn52xx; + struct cvmx_pescx_p2n_bar2_start_s cn52xxp1; + struct cvmx_pescx_p2n_bar2_start_s cn56xx; + struct cvmx_pescx_p2n_bar2_start_s cn56xxp1; +}; + +union cvmx_pescx_p2p_barx_end { + uint64_t u64; + struct cvmx_pescx_p2p_barx_end_s { + uint64_t addr:52; + uint64_t reserved_0_11:12; + } s; + struct cvmx_pescx_p2p_barx_end_s cn52xx; + struct cvmx_pescx_p2p_barx_end_s cn52xxp1; + struct cvmx_pescx_p2p_barx_end_s cn56xx; + struct cvmx_pescx_p2p_barx_end_s cn56xxp1; +}; + +union cvmx_pescx_p2p_barx_start { + uint64_t u64; + struct cvmx_pescx_p2p_barx_start_s { + uint64_t addr:52; + uint64_t reserved_0_11:12; + } s; + struct cvmx_pescx_p2p_barx_start_s cn52xx; + struct cvmx_pescx_p2p_barx_start_s cn52xxp1; + struct cvmx_pescx_p2p_barx_start_s cn56xx; + struct cvmx_pescx_p2p_barx_start_s cn56xxp1; +}; + +union cvmx_pescx_tlp_credits { + uint64_t u64; + struct cvmx_pescx_tlp_credits_s { + uint64_t reserved_0_63:64; + } s; + struct cvmx_pescx_tlp_credits_cn52xx { + uint64_t reserved_56_63:8; + uint64_t peai_ppf:8; + uint64_t pesc_cpl:8; + uint64_t pesc_np:8; + uint64_t pesc_p:8; + uint64_t npei_cpl:8; + uint64_t npei_np:8; + uint64_t npei_p:8; + } cn52xx; + struct cvmx_pescx_tlp_credits_cn52xxp1 { + uint64_t reserved_38_63:26; + uint64_t peai_ppf:8; + uint64_t pesc_cpl:5; + uint64_t pesc_np:5; + uint64_t pesc_p:5; + uint64_t npei_cpl:5; + uint64_t npei_np:5; + uint64_t npei_p:5; + } cn52xxp1; + struct cvmx_pescx_tlp_credits_cn52xx cn56xx; + struct cvmx_pescx_tlp_credits_cn52xxp1 cn56xxp1; +}; + +#endif diff --git a/arch/mips/include/asm/octeon/cvmx-pexp-defs.h b/arch/mips/include/asm/octeon/cvmx-pexp-defs.h new file mode 100644 index 00000000000..5ea5dc571b5 --- /dev/null +++ b/arch/mips/include/asm/octeon/cvmx-pexp-defs.h @@ -0,0 +1,229 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@caviumnetworks.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2008 Cavium Networks + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +/** + * cvmx-pexp-defs.h + * + * Configuration and status register (CSR) definitions for + * OCTEON PEXP. + * + */ +#ifndef __CVMX_PEXP_DEFS_H__ +#define __CVMX_PEXP_DEFS_H__ + +#define CVMX_PEXP_NPEI_BAR1_INDEXX(offset) \ + CVMX_ADD_IO_SEG(0x00011F0000008000ull + (((offset) & 31) * 16)) +#define CVMX_PEXP_NPEI_BIST_STATUS \ + CVMX_ADD_IO_SEG(0x00011F0000008580ull) +#define CVMX_PEXP_NPEI_BIST_STATUS2 \ + CVMX_ADD_IO_SEG(0x00011F0000008680ull) +#define CVMX_PEXP_NPEI_CTL_PORT0 \ + CVMX_ADD_IO_SEG(0x00011F0000008250ull) +#define CVMX_PEXP_NPEI_CTL_PORT1 \ + CVMX_ADD_IO_SEG(0x00011F0000008260ull) +#define CVMX_PEXP_NPEI_CTL_STATUS \ + CVMX_ADD_IO_SEG(0x00011F0000008570ull) +#define CVMX_PEXP_NPEI_CTL_STATUS2 \ + CVMX_ADD_IO_SEG(0x00011F000000BC00ull) +#define CVMX_PEXP_NPEI_DATA_OUT_CNT \ + CVMX_ADD_IO_SEG(0x00011F00000085F0ull) +#define CVMX_PEXP_NPEI_DBG_DATA \ + CVMX_ADD_IO_SEG(0x00011F0000008510ull) +#define CVMX_PEXP_NPEI_DBG_SELECT \ + CVMX_ADD_IO_SEG(0x00011F0000008500ull) +#define CVMX_PEXP_NPEI_DMA0_INT_LEVEL \ + CVMX_ADD_IO_SEG(0x00011F00000085C0ull) +#define CVMX_PEXP_NPEI_DMA1_INT_LEVEL \ + CVMX_ADD_IO_SEG(0x00011F00000085D0ull) +#define CVMX_PEXP_NPEI_DMAX_COUNTS(offset) \ + CVMX_ADD_IO_SEG(0x00011F0000008450ull + (((offset) & 7) * 16)) +#define CVMX_PEXP_NPEI_DMAX_DBELL(offset) \ + CVMX_ADD_IO_SEG(0x00011F00000083B0ull + (((offset) & 7) * 16)) +#define CVMX_PEXP_NPEI_DMAX_IBUFF_SADDR(offset) \ + CVMX_ADD_IO_SEG(0x00011F0000008400ull + (((offset) & 7) * 16)) +#define CVMX_PEXP_NPEI_DMAX_NADDR(offset) \ + CVMX_ADD_IO_SEG(0x00011F00000084A0ull + (((offset) & 7) * 16)) +#define CVMX_PEXP_NPEI_DMA_CNTS \ + CVMX_ADD_IO_SEG(0x00011F00000085E0ull) +#define CVMX_PEXP_NPEI_DMA_CONTROL \ + CVMX_ADD_IO_SEG(0x00011F00000083A0ull) +#define CVMX_PEXP_NPEI_INT_A_ENB \ + CVMX_ADD_IO_SEG(0x00011F0000008560ull) +#define CVMX_PEXP_NPEI_INT_A_ENB2 \ + CVMX_ADD_IO_SEG(0x00011F000000BCE0ull) +#define CVMX_PEXP_NPEI_INT_A_SUM \ + CVMX_ADD_IO_SEG(0x00011F0000008550ull) +#define CVMX_PEXP_NPEI_INT_ENB \ + CVMX_ADD_IO_SEG(0x00011F0000008540ull) +#define CVMX_PEXP_NPEI_INT_ENB2 \ + CVMX_ADD_IO_SEG(0x00011F000000BCD0ull) +#define CVMX_PEXP_NPEI_INT_INFO \ + CVMX_ADD_IO_SEG(0x00011F0000008590ull) +#define CVMX_PEXP_NPEI_INT_SUM \ + CVMX_ADD_IO_SEG(0x00011F0000008530ull) +#define CVMX_PEXP_NPEI_INT_SUM2 \ + CVMX_ADD_IO_SEG(0x00011F000000BCC0ull) +#define CVMX_PEXP_NPEI_LAST_WIN_RDATA0 \ + CVMX_ADD_IO_SEG(0x00011F0000008600ull) +#define CVMX_PEXP_NPEI_LAST_WIN_RDATA1 \ + CVMX_ADD_IO_SEG(0x00011F0000008610ull) +#define CVMX_PEXP_NPEI_MEM_ACCESS_CTL \ + CVMX_ADD_IO_SEG(0x00011F00000084F0ull) +#define CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(offset) \ + CVMX_ADD_IO_SEG(0x00011F0000008280ull + (((offset) & 31) * 16) - 16 * 12) +#define CVMX_PEXP_NPEI_MSI_ENB0 \ + CVMX_ADD_IO_SEG(0x00011F000000BC50ull) +#define CVMX_PEXP_NPEI_MSI_ENB1 \ + CVMX_ADD_IO_SEG(0x00011F000000BC60ull) +#define CVMX_PEXP_NPEI_MSI_ENB2 \ + CVMX_ADD_IO_SEG(0x00011F000000BC70ull) +#define CVMX_PEXP_NPEI_MSI_ENB3 \ + CVMX_ADD_IO_SEG(0x00011F000000BC80ull) +#define CVMX_PEXP_NPEI_MSI_RCV0 \ + CVMX_ADD_IO_SEG(0x00011F000000BC10ull) +#define CVMX_PEXP_NPEI_MSI_RCV1 \ + CVMX_ADD_IO_SEG(0x00011F000000BC20ull) +#define CVMX_PEXP_NPEI_MSI_RCV2 \ + CVMX_ADD_IO_SEG(0x00011F000000BC30ull) +#define CVMX_PEXP_NPEI_MSI_RCV3 \ + CVMX_ADD_IO_SEG(0x00011F000000BC40ull) +#define CVMX_PEXP_NPEI_MSI_RD_MAP \ + CVMX_ADD_IO_SEG(0x00011F000000BCA0ull) +#define CVMX_PEXP_NPEI_MSI_W1C_ENB0 \ + CVMX_ADD_IO_SEG(0x00011F000000BCF0ull) +#define CVMX_PEXP_NPEI_MSI_W1C_ENB1 \ + CVMX_ADD_IO_SEG(0x00011F000000BD00ull) +#define CVMX_PEXP_NPEI_MSI_W1C_ENB2 \ + CVMX_ADD_IO_SEG(0x00011F000000BD10ull) +#define CVMX_PEXP_NPEI_MSI_W1C_ENB3 \ + CVMX_ADD_IO_SEG(0x00011F000000BD20ull) +#define CVMX_PEXP_NPEI_MSI_W1S_ENB0 \ + CVMX_ADD_IO_SEG(0x00011F000000BD30ull) +#define CVMX_PEXP_NPEI_MSI_W1S_ENB1 \ + CVMX_ADD_IO_SEG(0x00011F000000BD40ull) +#define CVMX_PEXP_NPEI_MSI_W1S_ENB2 \ + CVMX_ADD_IO_SEG(0x00011F000000BD50ull) +#define CVMX_PEXP_NPEI_MSI_W1S_ENB3 \ + CVMX_ADD_IO_SEG(0x00011F000000BD60ull) +#define CVMX_PEXP_NPEI_MSI_WR_MAP \ + CVMX_ADD_IO_SEG(0x00011F000000BC90ull) +#define CVMX_PEXP_NPEI_PCIE_CREDIT_CNT \ + CVMX_ADD_IO_SEG(0x00011F000000BD70ull) +#define CVMX_PEXP_NPEI_PCIE_MSI_RCV \ + CVMX_ADD_IO_SEG(0x00011F000000BCB0ull) +#define CVMX_PEXP_NPEI_PCIE_MSI_RCV_B1 \ + CVMX_ADD_IO_SEG(0x00011F0000008650ull) +#define CVMX_PEXP_NPEI_PCIE_MSI_RCV_B2 \ + CVMX_ADD_IO_SEG(0x00011F0000008660ull) +#define CVMX_PEXP_NPEI_PCIE_MSI_RCV_B3 \ + CVMX_ADD_IO_SEG(0x00011F0000008670ull) +#define CVMX_PEXP_NPEI_PKTX_CNTS(offset) \ + CVMX_ADD_IO_SEG(0x00011F000000A400ull + (((offset) & 31) * 16)) +#define CVMX_PEXP_NPEI_PKTX_INSTR_BADDR(offset) \ + CVMX_ADD_IO_SEG(0x00011F000000A800ull + (((offset) & 31) * 16)) +#define CVMX_PEXP_NPEI_PKTX_INSTR_BAOFF_DBELL(offset) \ + CVMX_ADD_IO_SEG(0x00011F000000AC00ull + (((offset) & 31) * 16)) +#define CVMX_PEXP_NPEI_PKTX_INSTR_FIFO_RSIZE(offset) \ + CVMX_ADD_IO_SEG(0x00011F000000B000ull + (((offset) & 31) * 16)) +#define CVMX_PEXP_NPEI_PKTX_INSTR_HEADER(offset) \ + CVMX_ADD_IO_SEG(0x00011F000000B400ull + (((offset) & 31) * 16)) +#define CVMX_PEXP_NPEI_PKTX_IN_BP(offset) \ + CVMX_ADD_IO_SEG(0x00011F000000B800ull + (((offset) & 31) * 16)) +#define CVMX_PEXP_NPEI_PKTX_SLIST_BADDR(offset) \ + CVMX_ADD_IO_SEG(0x00011F0000009400ull + (((offset) & 31) * 16)) +#define CVMX_PEXP_NPEI_PKTX_SLIST_BAOFF_DBELL(offset) \ + CVMX_ADD_IO_SEG(0x00011F0000009800ull + (((offset) & 31) * 16)) +#define CVMX_PEXP_NPEI_PKTX_SLIST_FIFO_RSIZE(offset) \ + CVMX_ADD_IO_SEG(0x00011F0000009C00ull + (((offset) & 31) * 16)) +#define CVMX_PEXP_NPEI_PKT_CNT_INT \ + CVMX_ADD_IO_SEG(0x00011F0000009110ull) +#define CVMX_PEXP_NPEI_PKT_CNT_INT_ENB \ + CVMX_ADD_IO_SEG(0x00011F0000009130ull) +#define CVMX_PEXP_NPEI_PKT_DATA_OUT_ES \ + CVMX_ADD_IO_SEG(0x00011F00000090B0ull) +#define CVMX_PEXP_NPEI_PKT_DATA_OUT_NS \ + CVMX_ADD_IO_SEG(0x00011F00000090A0ull) +#define CVMX_PEXP_NPEI_PKT_DATA_OUT_ROR \ + CVMX_ADD_IO_SEG(0x00011F0000009090ull) +#define CVMX_PEXP_NPEI_PKT_DPADDR \ + CVMX_ADD_IO_SEG(0x00011F0000009080ull) +#define CVMX_PEXP_NPEI_PKT_INPUT_CONTROL \ + CVMX_ADD_IO_SEG(0x00011F0000009150ull) +#define CVMX_PEXP_NPEI_PKT_INSTR_ENB \ + CVMX_ADD_IO_SEG(0x00011F0000009000ull) +#define CVMX_PEXP_NPEI_PKT_INSTR_RD_SIZE \ + CVMX_ADD_IO_SEG(0x00011F0000009190ull) +#define CVMX_PEXP_NPEI_PKT_INSTR_SIZE \ + CVMX_ADD_IO_SEG(0x00011F0000009020ull) +#define CVMX_PEXP_NPEI_PKT_INT_LEVELS \ + CVMX_ADD_IO_SEG(0x00011F0000009100ull) +#define CVMX_PEXP_NPEI_PKT_IN_BP \ + CVMX_ADD_IO_SEG(0x00011F00000086B0ull) +#define CVMX_PEXP_NPEI_PKT_IN_DONEX_CNTS(offset) \ + CVMX_ADD_IO_SEG(0x00011F000000A000ull + (((offset) & 31) * 16)) +#define CVMX_PEXP_NPEI_PKT_IN_INSTR_COUNTS \ + CVMX_ADD_IO_SEG(0x00011F00000086A0ull) +#define CVMX_PEXP_NPEI_PKT_IN_PCIE_PORT \ + CVMX_ADD_IO_SEG(0x00011F00000091A0ull) +#define CVMX_PEXP_NPEI_PKT_IPTR \ + CVMX_ADD_IO_SEG(0x00011F0000009070ull) +#define CVMX_PEXP_NPEI_PKT_OUTPUT_WMARK \ + CVMX_ADD_IO_SEG(0x00011F0000009160ull) +#define CVMX_PEXP_NPEI_PKT_OUT_BMODE \ + CVMX_ADD_IO_SEG(0x00011F00000090D0ull) +#define CVMX_PEXP_NPEI_PKT_OUT_ENB \ + CVMX_ADD_IO_SEG(0x00011F0000009010ull) +#define CVMX_PEXP_NPEI_PKT_PCIE_PORT \ + CVMX_ADD_IO_SEG(0x00011F00000090E0ull) +#define CVMX_PEXP_NPEI_PKT_PORT_IN_RST \ + CVMX_ADD_IO_SEG(0x00011F0000008690ull) +#define CVMX_PEXP_NPEI_PKT_SLIST_ES \ + CVMX_ADD_IO_SEG(0x00011F0000009050ull) +#define CVMX_PEXP_NPEI_PKT_SLIST_ID_SIZE \ + CVMX_ADD_IO_SEG(0x00011F0000009180ull) +#define CVMX_PEXP_NPEI_PKT_SLIST_NS \ + CVMX_ADD_IO_SEG(0x00011F0000009040ull) +#define CVMX_PEXP_NPEI_PKT_SLIST_ROR \ + CVMX_ADD_IO_SEG(0x00011F0000009030ull) +#define CVMX_PEXP_NPEI_PKT_TIME_INT \ + CVMX_ADD_IO_SEG(0x00011F0000009120ull) +#define CVMX_PEXP_NPEI_PKT_TIME_INT_ENB \ + CVMX_ADD_IO_SEG(0x00011F0000009140ull) +#define CVMX_PEXP_NPEI_RSL_INT_BLOCKS \ + CVMX_ADD_IO_SEG(0x00011F0000008520ull) +#define CVMX_PEXP_NPEI_SCRATCH_1 \ + CVMX_ADD_IO_SEG(0x00011F0000008270ull) +#define CVMX_PEXP_NPEI_STATE1 \ + CVMX_ADD_IO_SEG(0x00011F0000008620ull) +#define CVMX_PEXP_NPEI_STATE2 \ + CVMX_ADD_IO_SEG(0x00011F0000008630ull) +#define CVMX_PEXP_NPEI_STATE3 \ + CVMX_ADD_IO_SEG(0x00011F0000008640ull) +#define CVMX_PEXP_NPEI_WINDOW_CTL \ + CVMX_ADD_IO_SEG(0x00011F0000008380ull) + +#endif diff --git a/arch/mips/include/asm/octeon/cvmx.h b/arch/mips/include/asm/octeon/cvmx.h index 03fddfa3e92..e31e3fe14f8 100644 --- a/arch/mips/include/asm/octeon/cvmx.h +++ b/arch/mips/include/asm/octeon/cvmx.h @@ -376,6 +376,18 @@ static inline uint64_t cvmx_get_cycle(void) } /** + * Wait for the specified number of cycle + * + */ +static inline void cvmx_wait(uint64_t cycles) +{ + uint64_t done = cvmx_get_cycle() + cycles; + + while (cvmx_get_cycle() < done) + ; /* Spin */ +} + +/** * Reads a chip global cycle counter. This counts CPU cycles since * chip reset. The counter is 64 bit. * This register does not exist on CN38XX pass 1 silicion diff --git a/arch/mips/include/asm/octeon/octeon-feature.h b/arch/mips/include/asm/octeon/octeon-feature.h index 04fac684069..ef24a7b4ea5 100644 --- a/arch/mips/include/asm/octeon/octeon-feature.h +++ b/arch/mips/include/asm/octeon/octeon-feature.h @@ -57,6 +57,13 @@ enum octeon_feature { OCTEON_FEATURE_RAID, /* Octeon has a builtin USB */ OCTEON_FEATURE_USB, + /* Octeon IPD can run without using work queue entries */ + OCTEON_FEATURE_NO_WPTR, + /* Octeon has DFA state machines */ + OCTEON_FEATURE_DFA, + /* Octeon MDIO block supports clause 45 transactions for 10 + * Gig support */ + OCTEON_FEATURE_MDIO_CLAUSE_45, }; static inline int cvmx_fuse_read(int fuse); @@ -112,6 +119,26 @@ static inline int octeon_has_feature(enum octeon_feature feature) case OCTEON_FEATURE_USB: return !(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)); + case OCTEON_FEATURE_NO_WPTR: + return (OCTEON_IS_MODEL(OCTEON_CN56XX) + || OCTEON_IS_MODEL(OCTEON_CN52XX)) + && !OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) + && !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X); + case OCTEON_FEATURE_DFA: + if (!OCTEON_IS_MODEL(OCTEON_CN38XX) + && !OCTEON_IS_MODEL(OCTEON_CN31XX) + && !OCTEON_IS_MODEL(OCTEON_CN58XX)) + return 0; + else if (OCTEON_IS_MODEL(OCTEON_CN3020)) + return 0; + else if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS1)) + return 1; + else + return !cvmx_fuse_read(120); + case OCTEON_FEATURE_MDIO_CLAUSE_45: + return !(OCTEON_IS_MODEL(OCTEON_CN3XXX) + || OCTEON_IS_MODEL(OCTEON_CN58XX) + || OCTEON_IS_MODEL(OCTEON_CN50XX)); } return 0; } diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h index edc676084cd..cac9b1a206f 100644 --- a/arch/mips/include/asm/octeon/octeon.h +++ b/arch/mips/include/asm/octeon/octeon.h @@ -245,4 +245,6 @@ static inline uint32_t octeon_npi_read32(uint64_t address) return cvmx_read64_uint32(address ^ 4); } +extern struct cvmx_bootinfo *octeon_bootinfo; + #endif /* __ASM_OCTEON_OCTEON_H */ diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index 72c80d2034c..dc0eaa73128 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h @@ -32,6 +32,11 @@ #define PAGE_SIZE (1UL << PAGE_SHIFT) #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) +#define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3) +#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) +#define HPAGE_MASK (~(HPAGE_SIZE - 1)) +#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) + #ifndef __ASSEMBLY__ #include <linux/pfn.h> diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h index 51b34a48c84..1073e6df862 100644 --- a/arch/mips/include/asm/pgtable-bits.h +++ b/arch/mips/include/asm/pgtable-bits.h @@ -72,6 +72,7 @@ #else #define _PAGE_R4KBUG (1<<5) /* workaround for r4k bug */ +#define _PAGE_HUGE (1<<5) /* huge tlb page */ #define _PAGE_GLOBAL (1<<6) #define _PAGE_VALID (1<<7) #define _PAGE_SILENT_READ (1<<7) /* synonym */ diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 6a0edf72ffb..1a9f9b25755 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -292,6 +292,16 @@ static inline pte_t pte_mkyoung(pte_t pte) pte_val(pte) |= _PAGE_SILENT_READ; return pte; } + +#ifdef _PAGE_HUGE +static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; } + +static inline pte_t pte_mkhuge(pte_t pte) +{ + pte_val(pte) |= _PAGE_HUGE; + return pte; +} +#endif /* _PAGE_HUGE */ #endif static inline int pte_special(pte_t pte) { return 0; } static inline pte_t pte_mkspecial(pte_t pte) { return pte; } diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h index 4c140db3678..387bf59f1e3 100644 --- a/arch/mips/include/asm/r4kcache.h +++ b/arch/mips/include/asm/r4kcache.h @@ -399,6 +399,7 @@ __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16) __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32) __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32) __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32) +__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64) __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64) __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64) __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128) diff --git a/arch/mips/include/asm/suspend.h b/arch/mips/include/asm/suspend.h new file mode 100644 index 00000000000..294cdb66c5f --- /dev/null +++ b/arch/mips/include/asm/suspend.h @@ -0,0 +1,9 @@ +#ifndef __ASM_SUSPEND_H +#define __ASM_SUSPEND_H + +static inline int arch_prepare_suspend(void) { return 0; } + +/* References to section boundaries */ +extern const void __nosave_begin, __nosave_end; + +#endif /* __ASM_SUSPEND_H */ diff --git a/arch/mips/include/asm/txx9/dmac.h b/arch/mips/include/asm/txx9/dmac.h new file mode 100644 index 00000000000..5e9151fccbb --- /dev/null +++ b/arch/mips/include/asm/txx9/dmac.h @@ -0,0 +1,51 @@ +/* + * TXx9 SoC DMA Controller + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ASM_TXX9_DMAC_H +#define __ASM_TXX9_DMAC_H + +#include <linux/dmaengine.h> + +#define TXX9_DMA_MAX_NR_CHANNELS 4 + +/** + * struct txx9dmac_platform_data - Controller configuration parameters + * @memcpy_chan: Channel used for DMA_MEMCPY + * @have_64bit_regs: DMAC have 64 bit registers + */ +struct txx9dmac_platform_data { + int memcpy_chan; + bool have_64bit_regs; +}; + +/** + * struct txx9dmac_chan_platform_data - Channel configuration parameters + * @dmac_dev: A platform device for DMAC + */ +struct txx9dmac_chan_platform_data { + struct platform_device *dmac_dev; +}; + +/** + * struct txx9dmac_slave - Controller-specific information about a slave + * @tx_reg: physical address of data register used for + * memory-to-peripheral transfers + * @rx_reg: physical address of data register used for + * peripheral-to-memory transfers + * @reg_width: peripheral register width + */ +struct txx9dmac_slave { + u64 tx_reg; + u64 rx_reg; + unsigned int reg_width; +}; + +void txx9_dmac_init(int id, unsigned long baseaddr, int irq, + const struct txx9dmac_platform_data *pdata); + +#endif /* __ASM_TXX9_DMAC_H */ diff --git a/arch/mips/include/asm/txx9/generic.h b/arch/mips/include/asm/txx9/generic.h index 9cde0090cbf..827dc22be2e 100644 --- a/arch/mips/include/asm/txx9/generic.h +++ b/arch/mips/include/asm/txx9/generic.h @@ -91,4 +91,10 @@ void txx9_7segled_init(unsigned int num, void (*putc)(unsigned int pos, unsigned char val)); int txx9_7segled_putc(unsigned int pos, char c); +void __init txx9_aclc_init(unsigned long baseaddr, int irq, + unsigned int dmac_id, + unsigned int dma_chan_out, + unsigned int dma_chan_in); +void __init txx9_sramc_init(struct resource *r); + #endif /* __ASM_TXX9_GENERIC_H */ diff --git a/arch/mips/include/asm/txx9/tx4927.h b/arch/mips/include/asm/txx9/tx4927.h index 7d813f1cb98..18c98c52afd 100644 --- a/arch/mips/include/asm/txx9/tx4927.h +++ b/arch/mips/include/asm/txx9/tx4927.h @@ -41,6 +41,7 @@ #define TX4927_SDRAMC_REG (TX4927_REG_BASE + 0x8000) #define TX4927_EBUSC_REG (TX4927_REG_BASE + 0x9000) +#define TX4927_DMA_REG (TX4927_REG_BASE + 0xb000) #define TX4927_PCIC_REG (TX4927_REG_BASE + 0xd000) #define TX4927_CCFG_REG (TX4927_REG_BASE + 0xe000) #define TX4927_IRC_REG (TX4927_REG_BASE + 0xf600) @@ -49,6 +50,7 @@ #define TX4927_NR_SIO 2 #define TX4927_SIO_REG(ch) (TX4927_REG_BASE + 0xf300 + (ch) * 0x100) #define TX4927_PIO_REG (TX4927_REG_BASE + 0xf500) +#define TX4927_ACLC_REG (TX4927_REG_BASE + 0xf700) #define TX4927_IR_ECCERR 0 #define TX4927_IR_WTOERR 1 @@ -265,5 +267,7 @@ int tx4927_pciclk66_setup(void); void tx4927_setup_pcierr_irq(void); void tx4927_irq_init(void); void tx4927_mtd_init(int ch); +void tx4927_dmac_init(int memcpy_chan); +void tx4927_aclc_init(unsigned int dma_chan_out, unsigned int dma_chan_in); #endif /* __ASM_TXX9_TX4927_H */ diff --git a/arch/mips/include/asm/txx9/tx4938.h b/arch/mips/include/asm/txx9/tx4938.h index cd8bc202175..8a178f186f7 100644 --- a/arch/mips/include/asm/txx9/tx4938.h +++ b/arch/mips/include/asm/txx9/tx4938.h @@ -305,5 +305,8 @@ struct tx4938ide_platform_info { }; void tx4938_ata_init(unsigned int irq, unsigned int shift, int tune); +void tx4938_dmac_init(int memcpy_chan0, int memcpy_chan1); +void tx4938_aclc_init(void); +void tx4938_sramc_init(void); #endif diff --git a/arch/mips/include/asm/txx9/tx4939.h b/arch/mips/include/asm/txx9/tx4939.h index f02c50b3abf..d4f342cd593 100644 --- a/arch/mips/include/asm/txx9/tx4939.h +++ b/arch/mips/include/asm/txx9/tx4939.h @@ -45,6 +45,8 @@ #define TX4939_RTC_REG (TX4939_REG_BASE + 0xfb00) #define TX4939_CIR_REG (TX4939_REG_BASE + 0xfc00) +#define TX4939_RNG_REG (TX4939_CRYPTO_REG + 0xb0) + struct tx4939_le_reg { __u32 r; __u32 unused; @@ -544,5 +546,9 @@ void tx4939_ata_init(void); void tx4939_rtc_init(void); void tx4939_ndfmc_init(unsigned int hold, unsigned int spw, unsigned char ch_mask, unsigned char wide_mask); +void tx4939_dmac_init(int memcpy_chan0, int memcpy_chan1); +void tx4939_aclc_init(void); +void tx4939_sramc_init(void); +void tx4939_rng_init(void); #endif /* __ASM_TXX9_TX4939_H */ diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index c901c22d7ad..8d006ec6567 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -14,6 +14,7 @@ #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/kbuild.h> +#include <linux/suspend.h> #include <asm/ptrace.h> #include <asm/processor.h> @@ -326,3 +327,15 @@ void output_octeon_cop2_state_defines(void) BLANK(); } #endif + +#ifdef CONFIG_HIBERNATION +void output_pbe_defines(void) +{ + COMMENT(" Linux struct pbe offsets. "); + OFFSET(PBE_ADDRESS, pbe, address); + OFFSET(PBE_ORIG_ADDRESS, pbe, orig_address); + OFFSET(PBE_NEXT, pbe, next); + DEFINE(PBE_SIZE, sizeof(struct pbe)); + BLANK(); +} +#endif diff --git a/arch/mips/kernel/cevt-txx9.c b/arch/mips/kernel/cevt-txx9.c index 2e911e3da8d..0037f21baf0 100644 --- a/arch/mips/kernel/cevt-txx9.c +++ b/arch/mips/kernel/cevt-txx9.c @@ -20,22 +20,29 @@ #define TIMER_CCD 0 /* 1/2 */ #define TIMER_CLK(imclk) ((imclk) / (2 << TIMER_CCD)) -static struct txx9_tmr_reg __iomem *txx9_cs_tmrptr; +struct txx9_clocksource { + struct clocksource cs; + struct txx9_tmr_reg __iomem *tmrptr; +}; static cycle_t txx9_cs_read(struct clocksource *cs) { - return __raw_readl(&txx9_cs_tmrptr->trr); + struct txx9_clocksource *txx9_cs = + container_of(cs, struct txx9_clocksource, cs); + return __raw_readl(&txx9_cs->tmrptr->trr); } /* Use 1 bit smaller width to use full bits in that width */ #define TXX9_CLOCKSOURCE_BITS (TXX9_TIMER_BITS - 1) -static struct clocksource txx9_clocksource = { - .name = "TXx9", - .rating = 200, - .read = txx9_cs_read, - .mask = CLOCKSOURCE_MASK(TXX9_CLOCKSOURCE_BITS), - .flags = CLOCK_SOURCE_IS_CONTINUOUS, +static struct txx9_clocksource txx9_clocksource = { + .cs = { + .name = "TXx9", + .rating = 200, + .read = txx9_cs_read, + .mask = CLOCKSOURCE_MASK(TXX9_CLOCKSOURCE_BITS), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + }, }; void __init txx9_clocksource_init(unsigned long baseaddr, @@ -43,8 +50,8 @@ void __init txx9_clocksource_init(unsigned long baseaddr, { struct txx9_tmr_reg __iomem *tmrptr; - clocksource_set_clock(&txx9_clocksource, TIMER_CLK(imbusclk)); - clocksource_register(&txx9_clocksource); + clocksource_set_clock(&txx9_clocksource.cs, TIMER_CLK(imbusclk)); + clocksource_register(&txx9_clocksource.cs); tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg)); __raw_writel(TCR_BASE, &tmrptr->tcr); @@ -53,10 +60,13 @@ void __init txx9_clocksource_init(unsigned long baseaddr, __raw_writel(TXx9_TMITMR_TZCE, &tmrptr->itmr); __raw_writel(1 << TXX9_CLOCKSOURCE_BITS, &tmrptr->cpra); __raw_writel(TCR_BASE | TXx9_TMTCR_TCE, &tmrptr->tcr); - txx9_cs_tmrptr = tmrptr; + txx9_clocksource.tmrptr = tmrptr; } -static struct txx9_tmr_reg __iomem *txx9_tmrptr; +struct txx9_clock_event_device { + struct clock_event_device cd; + struct txx9_tmr_reg __iomem *tmrptr; +}; static void txx9tmr_stop_and_clear(struct txx9_tmr_reg __iomem *tmrptr) { @@ -69,7 +79,9 @@ static void txx9tmr_stop_and_clear(struct txx9_tmr_reg __iomem *tmrptr) static void txx9tmr_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) { - struct txx9_tmr_reg __iomem *tmrptr = txx9_tmrptr; + struct txx9_clock_event_device *txx9_cd = + container_of(evt, struct txx9_clock_event_device, cd); + struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr; txx9tmr_stop_and_clear(tmrptr); switch (mode) { @@ -99,7 +111,9 @@ static void txx9tmr_set_mode(enum clock_event_mode mode, static int txx9tmr_set_next_event(unsigned long delta, struct clock_event_device *evt) { - struct txx9_tmr_reg __iomem *tmrptr = txx9_tmrptr; + struct txx9_clock_event_device *txx9_cd = + container_of(evt, struct txx9_clock_event_device, cd); + struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr; txx9tmr_stop_and_clear(tmrptr); /* start timer */ @@ -108,18 +122,22 @@ static int txx9tmr_set_next_event(unsigned long delta, return 0; } -static struct clock_event_device txx9tmr_clock_event_device = { - .name = "TXx9", - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, - .rating = 200, - .set_mode = txx9tmr_set_mode, - .set_next_event = txx9tmr_set_next_event, +static struct txx9_clock_event_device txx9_clock_event_device = { + .cd = { + .name = "TXx9", + .features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT, + .rating = 200, + .set_mode = txx9tmr_set_mode, + .set_next_event = txx9tmr_set_next_event, + }, }; static irqreturn_t txx9tmr_interrupt(int irq, void *dev_id) { - struct clock_event_device *cd = &txx9tmr_clock_event_device; - struct txx9_tmr_reg __iomem *tmrptr = txx9_tmrptr; + struct txx9_clock_event_device *txx9_cd = dev_id; + struct clock_event_device *cd = &txx9_cd->cd; + struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr; __raw_writel(0, &tmrptr->tisr); /* ack interrupt */ cd->event_handler(cd); @@ -130,19 +148,20 @@ static struct irqaction txx9tmr_irq = { .handler = txx9tmr_interrupt, .flags = IRQF_DISABLED | IRQF_PERCPU, .name = "txx9tmr", + .dev_id = &txx9_clock_event_device, }; void __init txx9_clockevent_init(unsigned long baseaddr, int irq, unsigned int imbusclk) { - struct clock_event_device *cd = &txx9tmr_clock_event_device; + struct clock_event_device *cd = &txx9_clock_event_device.cd; struct txx9_tmr_reg __iomem *tmrptr; tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg)); txx9tmr_stop_and_clear(tmrptr); __raw_writel(TIMER_CCD, &tmrptr->ccdr); __raw_writel(0, &tmrptr->itmr); - txx9_tmrptr = tmrptr; + txx9_clock_event_device.tmrptr = tmrptr; clockevent_set_clock(cd, TIMER_CLK(imbusclk)); cd->max_delta_ns = diff --git a/arch/mips/kernel/init_task.c b/arch/mips/kernel/init_task.c index 149cd914526..5b457a40c78 100644 --- a/arch/mips/kernel/init_task.c +++ b/arch/mips/kernel/init_task.c @@ -11,10 +11,6 @@ static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); -struct mm_struct init_mm = INIT_MM(init_mm); - -EXPORT_SYMBOL(init_mm); - /* * Initial thread structure. * diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index 5f5af7d4c89..37d51cd124e 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c @@ -924,6 +924,7 @@ void ipi_decode(struct smtc_ipi *pipi) int irq = MIPS_CPU_IRQ_BASE + 1; smtc_ipi_nq(&freeIPIq, pipi); + switch (type_copy) { case SMTC_CLOCK_TICK: irq_enter(); diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index e83da174b53..08f1edf355e 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -1502,7 +1502,7 @@ void __cpuinit per_cpu_trap_init(void) status_set); if (cpu_has_mips_r2) { - unsigned int enable = 0x0000000f; + unsigned int enable = 0x0000000f | cpu_hwrena_impl_bits; if (!noulri && cpu_has_userlocal) enable |= (1 << 29); @@ -1510,10 +1510,6 @@ void __cpuinit per_cpu_trap_init(void) write_c0_hwrena(enable); } -#ifdef CONFIG_CPU_CAVIUM_OCTEON - write_c0_hwrena(0xc000000f); /* Octeon has register 30 and 31 */ -#endif - #ifdef CONFIG_MIPS_MT_SMTC if (!secondaryTC) { #endif /* CONFIG_MIPS_MT_SMTC */ diff --git a/arch/mips/lib/delay.c b/arch/mips/lib/delay.c index f69c6b569eb..6b3b1de9dca 100644 --- a/arch/mips/lib/delay.c +++ b/arch/mips/lib/delay.c @@ -43,7 +43,7 @@ void __udelay(unsigned long us) { unsigned int lpj = current_cpu_data.udelay_val; - __delay((us * 0x000010c7 * HZ * lpj) >> 32); + __delay((us * 0x000010c7ull * HZ * lpj) >> 32); } EXPORT_SYMBOL(__udelay); @@ -51,6 +51,6 @@ void __ndelay(unsigned long ns) { unsigned int lpj = current_cpu_data.udelay_val; - __delay((us * 0x00000005 * HZ * lpj) >> 32); + __delay((ns * 0x00000005ull * HZ * lpj) >> 32); } EXPORT_SYMBOL(__ndelay); diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile index d7ec9552229..f0e43559970 100644 --- a/arch/mips/mm/Makefile +++ b/arch/mips/mm/Makefile @@ -8,6 +8,7 @@ obj-y += cache.o dma-default.o extable.o fault.o \ obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o obj-$(CONFIG_64BIT) += pgtable-64.o obj-$(CONFIG_HIGHMEM) += highmem.o +obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_CPU_LOONGSON2) += c-r4k.o cex-gen.o tlb-r4k.o obj-$(CONFIG_CPU_MIPS32) += c-r4k.o cex-gen.o tlb-r4k.o diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 171951d2305..71fe4cb778c 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -100,6 +100,12 @@ static inline void r4k_blast_dcache_page_dc32(unsigned long addr) blast_dcache32_page(addr); } +static inline void r4k_blast_dcache_page_dc64(unsigned long addr) +{ + R4600_HIT_CACHEOP_WAR_IMPL; + blast_dcache64_page(addr); +} + static void __cpuinit r4k_blast_dcache_page_setup(void) { unsigned long dc_lsize = cpu_dcache_line_size(); @@ -110,6 +116,8 @@ static void __cpuinit r4k_blast_dcache_page_setup(void) r4k_blast_dcache_page = blast_dcache16_page; else if (dc_lsize == 32) r4k_blast_dcache_page = r4k_blast_dcache_page_dc32; + else if (dc_lsize == 64) + r4k_blast_dcache_page = r4k_blast_dcache_page_dc64; } static void (* r4k_blast_dcache_page_indexed)(unsigned long addr); @@ -124,6 +132,8 @@ static void __cpuinit r4k_blast_dcache_page_indexed_setup(void) r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed; else if (dc_lsize == 32) r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed; + else if (dc_lsize == 64) + r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed; } static void (* r4k_blast_dcache)(void); @@ -138,6 +148,8 @@ static void __cpuinit r4k_blast_dcache_setup(void) r4k_blast_dcache = blast_dcache16; else if (dc_lsize == 32) r4k_blast_dcache = blast_dcache32; + else if (dc_lsize == 64) + r4k_blast_dcache = blast_dcache64; } /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */ diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index 4fdb7f5216b..7e48e76148a 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -20,9 +20,10 @@ #include <dma-coherence.h> -static inline unsigned long dma_addr_to_virt(dma_addr_t dma_addr) +static inline unsigned long dma_addr_to_virt(struct device *dev, + dma_addr_t dma_addr) { - unsigned long addr = plat_dma_addr_to_phys(dma_addr); + unsigned long addr = plat_dma_addr_to_phys(dev, dma_addr); return (unsigned long)phys_to_virt(addr); } @@ -111,7 +112,7 @@ EXPORT_SYMBOL(dma_alloc_coherent); void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) { - plat_unmap_dma_mem(dev, dma_handle); + plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); free_pages((unsigned long) vaddr, get_order(size)); } @@ -122,7 +123,7 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr, { unsigned long addr = (unsigned long) vaddr; - plat_unmap_dma_mem(dev, dma_handle); + plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); if (!plat_device_is_coherent(dev)) addr = CAC_ADDR(addr); @@ -170,10 +171,10 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction) { if (cpu_is_noncoherent_r10000(dev)) - __dma_sync(dma_addr_to_virt(dma_addr), size, + __dma_sync(dma_addr_to_virt(dev, dma_addr), size, direction); - plat_unmap_dma_mem(dev, dma_addr); + plat_unmap_dma_mem(dev, dma_addr, size, direction); } EXPORT_SYMBOL(dma_unmap_single); @@ -232,7 +233,7 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, if (addr) __dma_sync(addr, sg->length, direction); } - plat_unmap_dma_mem(dev, sg->dma_address); + plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction); } } @@ -246,7 +247,7 @@ void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, if (cpu_is_noncoherent_r10000(dev)) { unsigned long addr; - addr = dma_addr_to_virt(dma_handle); + addr = dma_addr_to_virt(dev, dma_handle); __dma_sync(addr, size, direction); } } @@ -262,7 +263,7 @@ void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, if (!plat_device_is_coherent(dev)) { unsigned long addr; - addr = dma_addr_to_virt(dma_handle); + addr = dma_addr_to_virt(dev, dma_handle); __dma_sync(addr, size, direction); } } @@ -277,7 +278,7 @@ void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, if (cpu_is_noncoherent_r10000(dev)) { unsigned long addr; - addr = dma_addr_to_virt(dma_handle); + addr = dma_addr_to_virt(dev, dma_handle); __dma_sync(addr + offset, size, direction); } } @@ -293,7 +294,7 @@ void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, if (!plat_device_is_coherent(dev)) { unsigned long addr; - addr = dma_addr_to_virt(dma_handle); + addr = dma_addr_to_virt(dev, dma_handle); __dma_sync(addr + offset, size, direction); } } diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c new file mode 100644 index 00000000000..471c09aa161 --- /dev/null +++ b/arch/mips/mm/hugetlbpage.c @@ -0,0 +1,101 @@ +/* + * MIPS Huge TLB Page Support for Kernel. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com> + * Copyright 2005, Embedded Alley Solutions, Inc. + * Matt Porter <mporter@embeddedalley.com> + * Copyright (C) 2008, 2009 Cavium Networks, Inc. + */ + +#include <linux/init.h> +#include <linux/fs.h> +#include <linux/mm.h> +#include <linux/hugetlb.h> +#include <linux/pagemap.h> +#include <linux/smp_lock.h> +#include <linux/slab.h> +#include <linux/err.h> +#include <linux/sysctl.h> +#include <asm/mman.h> +#include <asm/tlb.h> +#include <asm/tlbflush.h> + +pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, + unsigned long sz) +{ + pgd_t *pgd; + pud_t *pud; + pte_t *pte = NULL; + + pgd = pgd_offset(mm, addr); + pud = pud_alloc(mm, pgd, addr); + if (pud) + pte = (pte_t *)pmd_alloc(mm, pud, addr); + + return pte; +} + +pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd = NULL; + + pgd = pgd_offset(mm, addr); + if (pgd_present(*pgd)) { + pud = pud_offset(pgd, addr); + if (pud_present(*pud)) + pmd = pmd_offset(pud, addr); + } + return (pte_t *) pmd; +} + +int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) +{ + return 0; +} + +/* + * This function checks for proper alignment of input addr and len parameters. + */ +int is_aligned_hugepage_range(unsigned long addr, unsigned long len) +{ + if (len & ~HPAGE_MASK) + return -EINVAL; + if (addr & ~HPAGE_MASK) + return -EINVAL; + return 0; +} + +struct page * +follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) +{ + return ERR_PTR(-EINVAL); +} + +int pmd_huge(pmd_t pmd) +{ + return (pmd_val(pmd) & _PAGE_HUGE) != 0; +} + +int pud_huge(pud_t pud) +{ + return (pud_val(pud) & _PAGE_HUGE) != 0; +} + +struct page * +follow_huge_pmd(struct mm_struct *mm, unsigned long address, + pmd_t *pmd, int write) +{ + struct page *page; + + page = pte_page(*(pte_t *)pmd); + if (page) + page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT); + return page; +} + diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index 892be426787..f60fe513eb6 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -11,6 +11,7 @@ #include <linux/init.h> #include <linux/sched.h> #include <linux/mm.h> +#include <linux/hugetlb.h> #include <asm/cpu.h> #include <asm/bootinfo.h> @@ -295,21 +296,41 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) pudp = pud_offset(pgdp, address); pmdp = pmd_offset(pudp, address); idx = read_c0_index(); - ptep = pte_offset_map(pmdp, address); +#ifdef CONFIG_HUGETLB_PAGE + /* this could be a huge page */ + if (pmd_huge(*pmdp)) { + unsigned long lo; + write_c0_pagemask(PM_HUGE_MASK); + ptep = (pte_t *)pmdp; + lo = pte_val(*ptep) >> 6; + write_c0_entrylo0(lo); + write_c0_entrylo1(lo + (HPAGE_SIZE >> 7)); + + mtc0_tlbw_hazard(); + if (idx < 0) + tlb_write_random(); + else + tlb_write_indexed(); + write_c0_pagemask(PM_DEFAULT_MASK); + } else +#endif + { + ptep = pte_offset_map(pmdp, address); #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) - write_c0_entrylo0(ptep->pte_high); - ptep++; - write_c0_entrylo1(ptep->pte_high); + write_c0_entrylo0(ptep->pte_high); + ptep++; + write_c0_entrylo1(ptep->pte_high); #else - write_c0_entrylo0(pte_val(*ptep++) >> 6); - write_c0_entrylo1(pte_val(*ptep) >> 6); + write_c0_entrylo0(pte_val(*ptep++) >> 6); + write_c0_entrylo1(pte_val(*ptep) >> 6); #endif - mtc0_tlbw_hazard(); - if (idx < 0) - tlb_write_random(); - else - tlb_write_indexed(); + mtc0_tlbw_hazard(); + if (idx < 0) + tlb_write_random(); + else + tlb_write_indexed(); + } tlbw_use_hazard(); FLUSH_ITLB_VM(vma); EXIT_CRITICAL(flags); diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 0615b62efd6..8f606ead826 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -6,8 +6,9 @@ * Synthesize TLB refill handlers at runtime. * * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer - * Copyright (C) 2005, 2007 Maciej W. Rozycki + * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) + * Copyright (C) 2008, 2009 Cavium Networks, Inc. * * ... and the days got worse and worse and now you see * I've gone completly out of my mind. @@ -19,6 +20,7 @@ * (Condolences to Napoleon XIV) */ +#include <linux/bug.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/string.h> @@ -82,6 +84,9 @@ enum label_id { label_nopage_tlbm, label_smp_pgtable_change, label_r3000_write_probe_fail, +#ifdef CONFIG_HUGETLB_PAGE + label_tlb_huge_update, +#endif }; UASM_L_LA(_second_part) @@ -98,6 +103,9 @@ UASM_L_LA(_nopage_tlbs) UASM_L_LA(_nopage_tlbm) UASM_L_LA(_smp_pgtable_change) UASM_L_LA(_r3000_write_probe_fail) +#ifdef CONFIG_HUGETLB_PAGE +UASM_L_LA(_tlb_huge_update) +#endif /* * For debug purposes. @@ -125,6 +133,7 @@ static inline void dump_handler(const u32 *handler, int count) #define C0_TCBIND 2, 2 #define C0_ENTRYLO1 3, 0 #define C0_CONTEXT 4, 0 +#define C0_PAGEMASK 5, 0 #define C0_BADVADDR 8, 0 #define C0_ENTRYHI 10, 0 #define C0_EPC 14, 0 @@ -258,7 +267,8 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, } if (cpu_has_mips_r2) { - uasm_i_ehb(p); + if (cpu_has_mips_r2_exec_hazard) + uasm_i_ehb(p); tlbw(p); return; } @@ -310,7 +320,6 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, case CPU_BCM3302: case CPU_BCM4710: case CPU_LOONGSON2: - case CPU_CAVIUM_OCTEON: case CPU_R5500: if (m4kc_tlbp_war()) uasm_i_nop(p); @@ -382,6 +391,98 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, } } +#ifdef CONFIG_HUGETLB_PAGE +static __cpuinit void build_huge_tlb_write_entry(u32 **p, + struct uasm_label **l, + struct uasm_reloc **r, + unsigned int tmp, + enum tlb_write_entry wmode) +{ + /* Set huge page tlb entry size */ + uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16); + uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff); + uasm_i_mtc0(p, tmp, C0_PAGEMASK); + + build_tlb_write_entry(p, l, r, wmode); + + /* Reset default page size */ + if (PM_DEFAULT_MASK >> 16) { + uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16); + uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff); + uasm_il_b(p, r, label_leave); + uasm_i_mtc0(p, tmp, C0_PAGEMASK); + } else if (PM_DEFAULT_MASK) { + uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK); + uasm_il_b(p, r, label_leave); + uasm_i_mtc0(p, tmp, C0_PAGEMASK); + } else { + uasm_il_b(p, r, label_leave); + uasm_i_mtc0(p, 0, C0_PAGEMASK); + } +} + +/* + * Check if Huge PTE is present, if so then jump to LABEL. + */ +static void __cpuinit +build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp, + unsigned int pmd, int lid) +{ + UASM_i_LW(p, tmp, 0, pmd); + uasm_i_andi(p, tmp, tmp, _PAGE_HUGE); + uasm_il_bnez(p, r, tmp, lid); +} + +static __cpuinit void build_huge_update_entries(u32 **p, + unsigned int pte, + unsigned int tmp) +{ + int small_sequence; + + /* + * A huge PTE describes an area the size of the + * configured huge page size. This is twice the + * of the large TLB entry size we intend to use. + * A TLB entry half the size of the configured + * huge page size is configured into entrylo0 + * and entrylo1 to cover the contiguous huge PTE + * address space. + */ + small_sequence = (HPAGE_SIZE >> 7) < 0x10000; + + /* We can clobber tmp. It isn't used after this.*/ + if (!small_sequence) + uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16)); + + UASM_i_SRL(p, pte, pte, 6); /* convert to entrylo */ + uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* load it */ + /* convert to entrylo1 */ + if (small_sequence) + UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7); + else + UASM_i_ADDU(p, pte, pte, tmp); + + uasm_i_mtc0(p, pte, C0_ENTRYLO1); /* load it */ +} + +static __cpuinit void build_huge_handler_tail(u32 **p, + struct uasm_reloc **r, + struct uasm_label **l, + unsigned int pte, + unsigned int ptr) +{ +#ifdef CONFIG_SMP + UASM_i_SC(p, pte, 0, ptr); + uasm_il_beqz(p, r, pte, label_tlb_huge_update); + UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */ +#else + UASM_i_SW(p, pte, 0, ptr); +#endif + build_huge_update_entries(p, pte, ptr); + build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed); +} +#endif /* CONFIG_HUGETLB_PAGE */ + #ifdef CONFIG_64BIT /* * TMP and PTR are scratch. @@ -649,6 +750,14 @@ static void __cpuinit build_update_entries(u32 **p, unsigned int tmp, #endif } +/* + * For a 64-bit kernel, we are using the 64-bit XTLB refill exception + * because EXL == 0. If we wrap, we can also use the 32 instruction + * slots before the XTLB refill exception handler which belong to the + * unused TLB refill exception. + */ +#define MIPS64_REFILL_INSNS 32 + static void __cpuinit build_r4000_tlb_refill_handler(void) { u32 *p = tlb_handler; @@ -680,12 +789,23 @@ static void __cpuinit build_r4000_tlb_refill_handler(void) build_get_pgde32(&p, K0, K1); /* get pgd in K1 */ #endif +#ifdef CONFIG_HUGETLB_PAGE + build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update); +#endif + build_get_ptep(&p, K0, K1); build_update_entries(&p, K0, K1); build_tlb_write_entry(&p, &l, &r, tlb_random); uasm_l_leave(&l, p); uasm_i_eret(&p); /* return from trap */ +#ifdef CONFIG_HUGETLB_PAGE + uasm_l_tlb_huge_update(&l, p); + UASM_i_LW(&p, K0, 0, K1); + build_huge_update_entries(&p, K0, K1); + build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random); +#endif + #ifdef CONFIG_64BIT build_get_pgd_vmalloc64(&p, &l, &r, K0, K1); #endif @@ -702,9 +822,10 @@ static void __cpuinit build_r4000_tlb_refill_handler(void) if ((p - tlb_handler) > 64) panic("TLB refill handler space exceeded"); #else - if (((p - tlb_handler) > 63) - || (((p - tlb_handler) > 61) - && uasm_insn_has_bdelay(relocs, tlb_handler + 29))) + if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1) + || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3) + && uasm_insn_has_bdelay(relocs, + tlb_handler + MIPS64_REFILL_INSNS - 3))) panic("TLB refill handler space exceeded"); #endif @@ -717,39 +838,74 @@ static void __cpuinit build_r4000_tlb_refill_handler(void) uasm_copy_handler(relocs, labels, tlb_handler, p, f); final_len = p - tlb_handler; #else /* CONFIG_64BIT */ - f = final_handler + 32; - if ((p - tlb_handler) <= 32) { + f = final_handler + MIPS64_REFILL_INSNS; + if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) { /* Just copy the handler. */ uasm_copy_handler(relocs, labels, tlb_handler, p, f); final_len = p - tlb_handler; } else { - u32 *split = tlb_handler + 30; +#if defined(CONFIG_HUGETLB_PAGE) + const enum label_id ls = label_tlb_huge_update; +#elif defined(MODULE_START) + const enum label_id ls = label_module_alloc; +#else + const enum label_id ls = label_vmalloc; +#endif + u32 *split; + int ov = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++) + ; + BUG_ON(i == ARRAY_SIZE(labels)); + split = labels[i].addr; /* - * Find the split point. + * See if we have overflown one way or the other. */ - if (uasm_insn_has_bdelay(relocs, split - 1)) - split--; - + if (split > tlb_handler + MIPS64_REFILL_INSNS || + split < p - MIPS64_REFILL_INSNS) + ov = 1; + + if (ov) { + /* + * Split two instructions before the end. One + * for the branch and one for the instruction + * in the delay slot. + */ + split = tlb_handler + MIPS64_REFILL_INSNS - 2; + + /* + * If the branch would fall in a delay slot, + * we must back up an additional instruction + * so that it is no longer in a delay slot. + */ + if (uasm_insn_has_bdelay(relocs, split - 1)) + split--; + } /* Copy first part of the handler. */ uasm_copy_handler(relocs, labels, tlb_handler, split, f); f += split - tlb_handler; - /* Insert branch. */ - uasm_l_split(&l, final_handler); - uasm_il_b(&f, &r, label_split); - if (uasm_insn_has_bdelay(relocs, split)) - uasm_i_nop(&f); - else { - uasm_copy_handler(relocs, labels, split, split + 1, f); - uasm_move_labels(labels, f, f + 1, -1); - f++; - split++; + if (ov) { + /* Insert branch. */ + uasm_l_split(&l, final_handler); + uasm_il_b(&f, &r, label_split); + if (uasm_insn_has_bdelay(relocs, split)) + uasm_i_nop(&f); + else { + uasm_copy_handler(relocs, labels, + split, split + 1, f); + uasm_move_labels(labels, f, f + 1, -1); + f++; + split++; + } } /* Copy the rest of the handler. */ uasm_copy_handler(relocs, labels, split, p, final_handler); - final_len = (f - (final_handler + 32)) + (p - split); + final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) + + (p - split); } #endif /* CONFIG_64BIT */ @@ -782,7 +938,7 @@ u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned; u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned; static void __cpuinit -iPTE_LW(u32 **p, struct uasm_label **l, unsigned int pte, unsigned int ptr) +iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) { #ifdef CONFIG_SMP # ifdef CONFIG_64BIT_PHYS_ADDR @@ -862,13 +1018,13 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, * with it's original value. */ static void __cpuinit -build_pte_present(u32 **p, struct uasm_label **l, struct uasm_reloc **r, +build_pte_present(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, enum label_id lid) { uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); uasm_il_bnez(p, r, pte, lid); - iPTE_LW(p, l, pte, ptr); + iPTE_LW(p, pte, ptr); } /* Make PTE valid, store result in PTR. */ @@ -886,13 +1042,13 @@ build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte, * restore PTE with value from PTR when done. */ static void __cpuinit -build_pte_writable(u32 **p, struct uasm_label **l, struct uasm_reloc **r, +build_pte_writable(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, enum label_id lid) { uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE); uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE); uasm_il_bnez(p, r, pte, lid); - iPTE_LW(p, l, pte, ptr); + iPTE_LW(p, pte, ptr); } /* Make PTE writable, update software status bits as well, then store @@ -913,12 +1069,12 @@ build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte, * restore PTE with value from PTR when done. */ static void __cpuinit -build_pte_modifiable(u32 **p, struct uasm_label **l, struct uasm_reloc **r, +build_pte_modifiable(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, enum label_id lid) { uasm_i_andi(p, pte, pte, _PAGE_WRITE); uasm_il_beqz(p, r, pte, lid); - iPTE_LW(p, l, pte, ptr); + iPTE_LW(p, pte, ptr); } /* @@ -994,7 +1150,7 @@ static void __cpuinit build_r3000_tlb_load_handler(void) memset(relocs, 0, sizeof(relocs)); build_r3000_tlbchange_handler_head(&p, K0, K1); - build_pte_present(&p, &l, &r, K0, K1, label_nopage_tlbl); + build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); uasm_i_nop(&p); /* load delay */ build_make_valid(&p, &r, K0, K1); build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); @@ -1024,7 +1180,7 @@ static void __cpuinit build_r3000_tlb_store_handler(void) memset(relocs, 0, sizeof(relocs)); build_r3000_tlbchange_handler_head(&p, K0, K1); - build_pte_writable(&p, &l, &r, K0, K1, label_nopage_tlbs); + build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs); uasm_i_nop(&p); /* load delay */ build_make_write(&p, &r, K0, K1); build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); @@ -1054,7 +1210,7 @@ static void __cpuinit build_r3000_tlb_modify_handler(void) memset(relocs, 0, sizeof(relocs)); build_r3000_tlbchange_handler_head(&p, K0, K1); - build_pte_modifiable(&p, &l, &r, K0, K1, label_nopage_tlbm); + build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm); uasm_i_nop(&p); /* load delay */ build_make_write(&p, &r, K0, K1); build_r3000_pte_reload_tlbwi(&p, K0, K1); @@ -1087,6 +1243,15 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, build_get_pgde32(p, pte, ptr); /* get pgd in ptr */ #endif +#ifdef CONFIG_HUGETLB_PAGE + /* + * For huge tlb entries, pmd doesn't contain an address but + * instead contains the tlb pte. Check the PAGE_HUGE bit and + * see if we need to jump to huge tlb processing. + */ + build_is_huge_pte(p, r, pte, ptr, label_tlb_huge_update); +#endif + UASM_i_MFC0(p, pte, C0_BADVADDR); UASM_i_LW(p, ptr, 0, ptr); UASM_i_SRL(p, pte, pte, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2); @@ -1096,7 +1261,7 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, #ifdef CONFIG_SMP uasm_l_smp_pgtable_change(l, *p); #endif - iPTE_LW(p, l, pte, ptr); /* get even pte */ + iPTE_LW(p, pte, ptr); /* get even pte */ if (!m4kc_tlbp_war()) build_tlb_probe_entry(p); } @@ -1138,12 +1303,25 @@ static void __cpuinit build_r4000_tlb_load_handler(void) } build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); - build_pte_present(&p, &l, &r, K0, K1, label_nopage_tlbl); + build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); if (m4kc_tlbp_war()) build_tlb_probe_entry(&p); build_make_valid(&p, &r, K0, K1); build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); +#ifdef CONFIG_HUGETLB_PAGE + /* + * This is the entry point when build_r4000_tlbchange_handler_head + * spots a huge page. + */ + uasm_l_tlb_huge_update(&l, p); + iPTE_LW(&p, K0, K1); + build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); + build_tlb_probe_entry(&p); + uasm_i_ori(&p, K0, K0, (_PAGE_ACCESSED | _PAGE_VALID)); + build_huge_handler_tail(&p, &r, &l, K0, K1); +#endif + uasm_l_nopage_tlbl(&l, p); uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); uasm_i_nop(&p); @@ -1169,12 +1347,26 @@ static void __cpuinit build_r4000_tlb_store_handler(void) memset(relocs, 0, sizeof(relocs)); build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); - build_pte_writable(&p, &l, &r, K0, K1, label_nopage_tlbs); + build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs); if (m4kc_tlbp_war()) build_tlb_probe_entry(&p); build_make_write(&p, &r, K0, K1); build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); +#ifdef CONFIG_HUGETLB_PAGE + /* + * This is the entry point when + * build_r4000_tlbchange_handler_head spots a huge page. + */ + uasm_l_tlb_huge_update(&l, p); + iPTE_LW(&p, K0, K1); + build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs); + build_tlb_probe_entry(&p); + uasm_i_ori(&p, K0, K0, + _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); + build_huge_handler_tail(&p, &r, &l, K0, K1); +#endif + uasm_l_nopage_tlbs(&l, p); uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); uasm_i_nop(&p); @@ -1200,13 +1392,27 @@ static void __cpuinit build_r4000_tlb_modify_handler(void) memset(relocs, 0, sizeof(relocs)); build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); - build_pte_modifiable(&p, &l, &r, K0, K1, label_nopage_tlbm); + build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm); if (m4kc_tlbp_war()) build_tlb_probe_entry(&p); /* Present and writable bits set, set accessed and dirty bits. */ build_make_write(&p, &r, K0, K1); build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); +#ifdef CONFIG_HUGETLB_PAGE + /* + * This is the entry point when + * build_r4000_tlbchange_handler_head spots a huge page. + */ + uasm_l_tlb_huge_update(&l, p); + iPTE_LW(&p, K0, K1); + build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm); + build_tlb_probe_entry(&p); + uasm_i_ori(&p, K0, K0, + _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); + build_huge_handler_tail(&p, &r, &l, K0, K1); +#endif + uasm_l_nopage_tlbm(&l, p); uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); uasm_i_nop(&p); diff --git a/arch/mips/power/Makefile b/arch/mips/power/Makefile new file mode 100644 index 00000000000..73d56b87cb9 --- /dev/null +++ b/arch/mips/power/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_HIBERNATION) += cpu.o hibernate.o diff --git a/arch/mips/power/cpu.c b/arch/mips/power/cpu.c new file mode 100644 index 00000000000..7995df45dc8 --- /dev/null +++ b/arch/mips/power/cpu.c @@ -0,0 +1,43 @@ +/* + * Suspend support specific for mips. + * + * Licensed under the GPLv2 + * + * Copyright (C) 2009 Lemote Inc. & Insititute of Computing Technology + * Author: Hu Hongbing <huhb@lemote.com> + * Wu Zhangjin <wuzj@lemote.com> + */ +#include <asm/suspend.h> +#include <asm/fpu.h> +#include <asm/dsp.h> + +static u32 saved_status; +struct pt_regs saved_regs; + +void save_processor_state(void) +{ + saved_status = read_c0_status(); + + if (is_fpu_owner()) + save_fp(current); + if (cpu_has_dsp) + save_dsp(current); +} + +void restore_processor_state(void) +{ + write_c0_status(saved_status); + + if (is_fpu_owner()) + restore_fp(current); + if (cpu_has_dsp) + restore_dsp(current); +} + +int pfn_is_nosave(unsigned long pfn) +{ + unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin)); + unsigned long nosave_end_pfn = PFN_UP(__pa(&__nosave_end)); + + return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn); +} diff --git a/arch/mips/power/hibernate.S b/arch/mips/power/hibernate.S new file mode 100644 index 00000000000..486bd3fd01a --- /dev/null +++ b/arch/mips/power/hibernate.S @@ -0,0 +1,70 @@ +/* + * Hibernation support specific for mips - temporary page tables + * + * Licensed under the GPLv2 + * + * Copyright (C) 2009 Lemote Inc. & Insititute of Computing Technology + * Author: Hu Hongbing <huhb@lemote.com> + * Wu Zhangjin <wuzj@lemote.com> + */ +#include <asm/asm-offsets.h> +#include <asm/regdef.h> +#include <asm/asm.h> + +.text +LEAF(swsusp_arch_suspend) + PTR_LA t0, saved_regs + PTR_S ra, PT_R31(t0) + PTR_S sp, PT_R29(t0) + PTR_S fp, PT_R30(t0) + PTR_S gp, PT_R28(t0) + PTR_S s0, PT_R16(t0) + PTR_S s1, PT_R17(t0) + PTR_S s2, PT_R18(t0) + PTR_S s3, PT_R19(t0) + PTR_S s4, PT_R20(t0) + PTR_S s5, PT_R21(t0) + PTR_S s6, PT_R22(t0) + PTR_S s7, PT_R23(t0) + j swsusp_save +END(swsusp_arch_suspend) + +LEAF(swsusp_arch_resume) + PTR_L t0, restore_pblist +0: + PTR_L t1, PBE_ADDRESS(t0) /* source */ + PTR_L t2, PBE_ORIG_ADDRESS(t0) /* destination */ + PTR_ADDIU t3, t1, _PAGE_SIZE +1: + REG_L t8, (t1) + REG_S t8, (t2) + PTR_ADDIU t1, t1, SZREG + PTR_ADDIU t2, t2, SZREG + bne t1, t3, 1b + PTR_L t0, PBE_NEXT(t0) + bnez t0, 0b + /* flush caches to make sure context is in memory */ + PTR_L t0, __flush_cache_all + jalr t0 + /* flush tlb entries */ +#ifdef CONFIG_SMP + jal flush_tlb_all +#else + jal local_flush_tlb_all +#endif + PTR_LA t0, saved_regs + PTR_L ra, PT_R31(t0) + PTR_L sp, PT_R29(t0) + PTR_L fp, PT_R30(t0) + PTR_L gp, PT_R28(t0) + PTR_L s0, PT_R16(t0) + PTR_L s1, PT_R17(t0) + PTR_L s2, PT_R18(t0) + PTR_L s3, PT_R19(t0) + PTR_L s4, PT_R20(t0) + PTR_L s5, PT_R21(t0) + PTR_L s6, PT_R22(t0) + PTR_L s7, PT_R23(t0) + PTR_LI v0, 0x0 + jr ra +END(swsusp_arch_resume) diff --git a/arch/mips/rb532/irq.c b/arch/mips/rb532/irq.c index 53eeb5e7bc5..f07882029a9 100644 --- a/arch/mips/rb532/irq.c +++ b/arch/mips/rb532/irq.c @@ -151,7 +151,8 @@ static void rb532_disable_irq(unsigned int irq_nr) mask |= intr_bit; WRITE_MASK(addr, mask); - if (group == GPIO_MAPPED_IRQ_GROUP) + /* There is a maximum of 14 GPIO interrupts */ + if (group == GPIO_MAPPED_IRQ_GROUP && irq_nr <= (GROUP4_IRQ_BASE + 13)) rb532_gpio_set_istat(0, irq_nr - GPIO_MAPPED_IRQ_BASE); /* @@ -174,7 +175,7 @@ static int rb532_set_type(unsigned int irq_nr, unsigned type) int gpio = irq_nr - GPIO_MAPPED_IRQ_BASE; int group = irq_to_group(irq_nr); - if (group != GPIO_MAPPED_IRQ_GROUP) + if (group != GPIO_MAPPED_IRQ_GROUP || irq_nr > (GROUP4_IRQ_BASE + 13)) return (type == IRQ_TYPE_LEVEL_HIGH) ? 0 : -EINVAL; switch (type) { diff --git a/arch/mips/sibyte/Kconfig b/arch/mips/sibyte/Kconfig index 366b19d33f7..3e639bda43f 100644 --- a/arch/mips/sibyte/Kconfig +++ b/arch/mips/sibyte/Kconfig @@ -75,6 +75,8 @@ config SIBYTE_SB1xxx_SOC select SWAP_IO_SPACE select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_64BIT_KERNEL + select CFE + select SYS_HAS_EARLY_PRINTK choice prompt "SiByte SOC Stepping" @@ -128,13 +130,6 @@ config SIBYTE_ENABLE_LDT_IF_PCI bool select SIBYTE_HAS_LDT if PCI -config SIMULATION - bool "Running under simulation" - depends on SIBYTE_SB1xxx_SOC - help - Build a kernel suitable for running under the GDB simulator. - Primarily adjusts the kernel's notion of time. - config SB1_CEX_ALWAYS_FATAL bool "All cache exceptions considered fatal (no recovery attempted)" depends on SIBYTE_SB1xxx_SOC @@ -143,34 +138,14 @@ config SB1_CERR_STALL bool "Stall (rather than panic) on fatal cache error" depends on SIBYTE_SB1xxx_SOC -config SIBYTE_CFE - bool "Booting from CFE" - depends on SIBYTE_SB1xxx_SOC - select CFE - select SYS_HAS_EARLY_PRINTK - help - Make use of the CFE API for enumerating available memory, - controlling secondary CPUs, and possibly console output. - config SIBYTE_CFE_CONSOLE bool "Use firmware console" - depends on SIBYTE_CFE + depends on SIBYTE_SB1xxx_SOC help Use the CFE API's console write routines during boot. Other console options (VT console, sb1250 duart console, etc.) should not be configured. -config SIBYTE_STANDALONE - bool - depends on SIBYTE_SB1xxx_SOC && !SIBYTE_CFE - select SYS_HAS_EARLY_PRINTK - default y - -config SIBYTE_STANDALONE_RAM_SIZE - int "Memory size (in megabytes)" - depends on SIBYTE_STANDALONE - default "32" - config SIBYTE_BUS_WATCHER bool "Support for Bus Watcher statistics" depends on SIBYTE_SB1xxx_SOC diff --git a/arch/mips/sibyte/cfe/Makefile b/arch/mips/sibyte/cfe/Makefile deleted file mode 100644 index 02b32e142ad..00000000000 --- a/arch/mips/sibyte/cfe/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -lib-y = setup.o -lib-$(CONFIG_SIBYTE_CFE_CONSOLE) += console.o diff --git a/arch/mips/sibyte/common/Makefile b/arch/mips/sibyte/common/Makefile index 48a91b9e587..4f659837c7c 100644 --- a/arch/mips/sibyte/common/Makefile +++ b/arch/mips/sibyte/common/Makefile @@ -1,5 +1,5 @@ -obj-y := - +obj-y := cfe.o +obj-$(CONFIG_SIBYTE_CFE_CONSOLE) += cfe_console.o obj-$(CONFIG_SIBYTE_TBPROF) += sb_tbprof.o EXTRA_CFLAGS += -Werror diff --git a/arch/mips/sibyte/cfe/setup.c b/arch/mips/sibyte/common/cfe.c index eb5396cf81b..eb5396cf81b 100644 --- a/arch/mips/sibyte/cfe/setup.c +++ b/arch/mips/sibyte/common/cfe.c diff --git a/arch/mips/sibyte/cfe/console.c b/arch/mips/sibyte/common/cfe_console.c index 81e3d54376e..81e3d54376e 100644 --- a/arch/mips/sibyte/cfe/console.c +++ b/arch/mips/sibyte/common/cfe_console.c diff --git a/arch/mips/sibyte/sb1250/Makefile b/arch/mips/sibyte/sb1250/Makefile index 697793783a2..1896f4e77a3 100644 --- a/arch/mips/sibyte/sb1250/Makefile +++ b/arch/mips/sibyte/sb1250/Makefile @@ -1,7 +1,6 @@ obj-y := setup.o irq.o time.o obj-$(CONFIG_SMP) += smp.o -obj-$(CONFIG_SIBYTE_STANDALONE) += prom.o obj-$(CONFIG_SIBYTE_BUS_WATCHER) += bus_watcher.o EXTRA_CFLAGS += -Werror diff --git a/arch/mips/sibyte/sb1250/irq.c b/arch/mips/sibyte/sb1250/irq.c index 409dec79886..5e7f2016cce 100644 --- a/arch/mips/sibyte/sb1250/irq.c +++ b/arch/mips/sibyte/sb1250/irq.c @@ -111,11 +111,6 @@ static int sb1250_set_affinity(unsigned int irq, const struct cpumask *mask) i = cpumask_first(mask); - if (cpumask_weight(mask) > 1) { - printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq); - return -1; - } - /* Convert logical CPU to physical CPU */ cpu = cpu_logical_map(i); diff --git a/arch/mips/sibyte/sb1250/prom.c b/arch/mips/sibyte/sb1250/prom.c deleted file mode 100644 index 65b1af66b67..00000000000 --- a/arch/mips/sibyte/sb1250/prom.c +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright (C) 2000, 2001 Broadcom Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - */ - -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/mm.h> -#include <linux/blkdev.h> -#include <linux/bootmem.h> -#include <linux/smp.h> -#include <linux/initrd.h> -#include <linux/pm.h> - -#include <asm/bootinfo.h> -#include <asm/reboot.h> - -#define MAX_RAM_SIZE ((CONFIG_SIBYTE_STANDALONE_RAM_SIZE * 1024 * 1024) - 1) - -static __init void prom_meminit(void) -{ -#ifdef CONFIG_BLK_DEV_INITRD - unsigned long initrd_pstart; - unsigned long initrd_pend; - - initrd_pstart = __pa(initrd_start); - initrd_pend = __pa(initrd_end); - if (initrd_start && - ((initrd_pstart > MAX_RAM_SIZE) - || (initrd_pend > MAX_RAM_SIZE))) { - panic("initrd out of addressable memory"); - } - - add_memory_region(0, initrd_pstart, - BOOT_MEM_RAM); - add_memory_region(initrd_pstart, initrd_pend - initrd_pstart, - BOOT_MEM_RESERVED); - add_memory_region(initrd_pend, - (CONFIG_SIBYTE_STANDALONE_RAM_SIZE * 1024 * 1024) - initrd_pend, - BOOT_MEM_RAM); -#else - add_memory_region(0, CONFIG_SIBYTE_STANDALONE_RAM_SIZE * 1024 * 1024, - BOOT_MEM_RAM); -#endif -} - -void prom_cpu0_exit(void *unused) -{ - while (1) ; -} - -static void prom_linux_exit(void) -{ -#ifdef CONFIG_SMP - if (smp_processor_id()) { - smp_call_function(prom_cpu0_exit, NULL, 1); - } -#endif - while(1); -} - -/* - * prom_init is called just after the cpu type is determined, from setup_arch() - */ -void __init prom_init(void) -{ - _machine_restart = (void (*)(char *))prom_linux_exit; - _machine_halt = prom_linux_exit; - pm_power_off = prom_linux_exit; - - strcpy(arcs_cmdline, "root=/dev/ram0 "); - - prom_meminit(); -} - -void __init prom_free_prom_memory(void) -{ - /* Not sure what I'm supposed to do here. Nothing, I think */ -} - -void prom_putchar(char c) -{ -} diff --git a/arch/mips/sibyte/swarm/setup.c b/arch/mips/sibyte/swarm/setup.c index 080c966263b..672e45d495a 100644 --- a/arch/mips/sibyte/swarm/setup.c +++ b/arch/mips/sibyte/swarm/setup.c @@ -136,20 +136,6 @@ void __init plat_mem_setup(void) if (m41t81_probe()) swarm_rtc_type = RTC_M4LT81; - printk("This kernel optimized for " -#ifdef CONFIG_SIMULATION - "simulation" -#else - "board" -#endif - " runs " -#ifdef CONFIG_SIBYTE_CFE - "with" -#else - "without" -#endif - " CFE\n"); - #ifdef CONFIG_VT screen_info = (struct screen_info) { 0, 0, /* orig-x, orig-y */ diff --git a/arch/mips/sni/eisa.c b/arch/mips/sni/eisa.c index 7396cd71990..6827feb4de9 100644 --- a/arch/mips/sni/eisa.c +++ b/arch/mips/sni/eisa.c @@ -38,7 +38,7 @@ int __init sni_eisa_root_init(void) if (!r) return r; - eisa_root_dev.dev.driver_data = &eisa_bus_root; + dev_set_drvdata(&eisa_root_dev.dev, &eisa_bus_root); if (eisa_root_register(&eisa_bus_root)) { /* A real bridge may have been registered before diff --git a/arch/mips/txx9/Kconfig b/arch/mips/txx9/Kconfig index 0db7cf38ed8..852ae4bb7a8 100644 --- a/arch/mips/txx9/Kconfig +++ b/arch/mips/txx9/Kconfig @@ -69,6 +69,7 @@ config SOC_TX4927 select IRQ_TXX9 select PCI_TX4927 select GPIO_TXX9 + select HAS_TXX9_ACLC config SOC_TX4938 bool @@ -78,6 +79,7 @@ config SOC_TX4938 select IRQ_TXX9 select PCI_TX4927 select GPIO_TXX9 + select HAS_TXX9_ACLC config SOC_TX4939 bool @@ -85,6 +87,7 @@ config SOC_TX4939 select HAS_TXX9_SERIAL select HW_HAS_PCI select PCI_TX4927 + select HAS_TXX9_ACLC config TXX9_7SEGLED bool diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c index 8a266c6a3f5..3b7d77d61ce 100644 --- a/arch/mips/txx9/generic/setup.c +++ b/arch/mips/txx9/generic/setup.c @@ -24,6 +24,7 @@ #include <linux/serial_core.h> #include <linux/mtd/physmap.h> #include <linux/leds.h> +#include <linux/sysdev.h> #include <asm/bootinfo.h> #include <asm/time.h> #include <asm/reboot.h> @@ -33,6 +34,7 @@ #include <asm/txx9/pci.h> #include <asm/txx9tmr.h> #include <asm/txx9/ndfmc.h> +#include <asm/txx9/dmac.h> #ifdef CONFIG_CPU_TX49XX #include <asm/txx9/tx4938.h> #endif @@ -821,3 +823,176 @@ void __init txx9_iocled_init(unsigned long baseaddr, { } #endif /* CONFIG_LEDS_GPIO */ + +void __init txx9_dmac_init(int id, unsigned long baseaddr, int irq, + const struct txx9dmac_platform_data *pdata) +{ +#if defined(CONFIG_TXX9_DMAC) || defined(CONFIG_TXX9_DMAC_MODULE) + struct resource res[] = { + { + .start = baseaddr, + .end = baseaddr + 0x800 - 1, + .flags = IORESOURCE_MEM, +#ifndef CONFIG_MACH_TX49XX + }, { + .start = irq, + .flags = IORESOURCE_IRQ, +#endif + } + }; +#ifdef CONFIG_MACH_TX49XX + struct resource chan_res[] = { + { + .flags = IORESOURCE_IRQ, + } + }; +#endif + struct platform_device *pdev = platform_device_alloc("txx9dmac", id); + struct txx9dmac_chan_platform_data cpdata; + int i; + + if (!pdev || + platform_device_add_resources(pdev, res, ARRAY_SIZE(res)) || + platform_device_add_data(pdev, pdata, sizeof(*pdata)) || + platform_device_add(pdev)) { + platform_device_put(pdev); + return; + } + memset(&cpdata, 0, sizeof(cpdata)); + cpdata.dmac_dev = pdev; + for (i = 0; i < TXX9_DMA_MAX_NR_CHANNELS; i++) { +#ifdef CONFIG_MACH_TX49XX + chan_res[0].start = irq + i; +#endif + pdev = platform_device_alloc("txx9dmac-chan", + id * TXX9_DMA_MAX_NR_CHANNELS + i); + if (!pdev || +#ifdef CONFIG_MACH_TX49XX + platform_device_add_resources(pdev, chan_res, + ARRAY_SIZE(chan_res)) || +#endif + platform_device_add_data(pdev, &cpdata, sizeof(cpdata)) || + platform_device_add(pdev)) + platform_device_put(pdev); + } +#endif +} + +void __init txx9_aclc_init(unsigned long baseaddr, int irq, + unsigned int dmac_id, + unsigned int dma_chan_out, + unsigned int dma_chan_in) +{ +#if defined(CONFIG_SND_SOC_TXX9ACLC) || \ + defined(CONFIG_SND_SOC_TXX9ACLC_MODULE) + unsigned int dma_base = dmac_id * TXX9_DMA_MAX_NR_CHANNELS; + struct resource res[] = { + { + .start = baseaddr, + .end = baseaddr + 0x100 - 1, + .flags = IORESOURCE_MEM, + }, { + .start = irq, + .flags = IORESOURCE_IRQ, + }, { + .name = "txx9dmac-chan", + .start = dma_base + dma_chan_out, + .flags = IORESOURCE_DMA, + }, { + .name = "txx9dmac-chan", + .start = dma_base + dma_chan_in, + .flags = IORESOURCE_DMA, + } + }; + struct platform_device *pdev = + platform_device_alloc("txx9aclc-ac97", -1); + + if (!pdev || + platform_device_add_resources(pdev, res, ARRAY_SIZE(res)) || + platform_device_add(pdev)) + platform_device_put(pdev); +#endif +} + +static struct sysdev_class txx9_sramc_sysdev_class; + +struct txx9_sramc_sysdev { + struct sys_device dev; + struct bin_attribute bindata_attr; + void __iomem *base; +}; + +static ssize_t txx9_sram_read(struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t pos, size_t size) +{ + struct txx9_sramc_sysdev *dev = bin_attr->private; + size_t ramsize = bin_attr->size; + + if (pos >= ramsize) + return 0; + if (pos + size > ramsize) + size = ramsize - pos; + memcpy_fromio(buf, dev->base + pos, size); + return size; +} + +static ssize_t txx9_sram_write(struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t pos, size_t size) +{ + struct txx9_sramc_sysdev *dev = bin_attr->private; + size_t ramsize = bin_attr->size; + + if (pos >= ramsize) + return 0; + if (pos + size > ramsize) + size = ramsize - pos; + memcpy_toio(dev->base + pos, buf, size); + return size; +} + +void __init txx9_sramc_init(struct resource *r) +{ + struct txx9_sramc_sysdev *dev; + size_t size; + int err; + + if (!txx9_sramc_sysdev_class.name) { + txx9_sramc_sysdev_class.name = "txx9_sram"; + err = sysdev_class_register(&txx9_sramc_sysdev_class); + if (err) { + txx9_sramc_sysdev_class.name = NULL; + return; + } + } + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return; + size = resource_size(r); + dev->base = ioremap(r->start, size); + if (!dev->base) + goto exit; + dev->dev.cls = &txx9_sramc_sysdev_class; + dev->bindata_attr.attr.name = "bindata"; + dev->bindata_attr.attr.mode = S_IRUSR | S_IWUSR; + dev->bindata_attr.read = txx9_sram_read; + dev->bindata_attr.write = txx9_sram_write; + dev->bindata_attr.size = size; + dev->bindata_attr.private = dev; + err = sysdev_register(&dev->dev); + if (err) + goto exit; + err = sysfs_create_bin_file(&dev->dev.kobj, &dev->bindata_attr); + if (err) { + sysdev_unregister(&dev->dev); + goto exit; + } + return; +exit: + if (dev) { + if (dev->base) + iounmap(dev->base); + kfree(dev); + } +} diff --git a/arch/mips/txx9/generic/setup_tx4927.c b/arch/mips/txx9/generic/setup_tx4927.c index 1093549df1a..3418b2a90f7 100644 --- a/arch/mips/txx9/generic/setup_tx4927.c +++ b/arch/mips/txx9/generic/setup_tx4927.c @@ -22,6 +22,7 @@ #include <asm/txx9tmr.h> #include <asm/txx9pio.h> #include <asm/txx9/generic.h> +#include <asm/txx9/dmac.h> #include <asm/txx9/tx4927.h> static void __init tx4927_wdr_init(void) @@ -253,6 +254,60 @@ void __init tx4927_mtd_init(int ch) txx9_physmap_flash_init(ch, start, size, &pdata); } +void __init tx4927_dmac_init(int memcpy_chan) +{ + struct txx9dmac_platform_data plat_data = { + .memcpy_chan = memcpy_chan, + .have_64bit_regs = true, + }; + + txx9_dmac_init(0, TX4927_DMA_REG & 0xfffffffffULL, + TXX9_IRQ_BASE + TX4927_IR_DMA(0), &plat_data); +} + +void __init tx4927_aclc_init(unsigned int dma_chan_out, + unsigned int dma_chan_in) +{ + u64 pcfg = __raw_readq(&tx4927_ccfgptr->pcfg); + __u64 dmasel_mask = 0, dmasel = 0; + unsigned long flags; + + if (!(pcfg & TX4927_PCFG_SEL2)) + return; + /* setup DMASEL (playback:ACLC ch0, capture:ACLC ch1) */ + switch (dma_chan_out) { + case 0: + dmasel_mask |= TX4927_PCFG_DMASEL0_MASK; + dmasel |= TX4927_PCFG_DMASEL0_ACL0; + break; + case 2: + dmasel_mask |= TX4927_PCFG_DMASEL2_MASK; + dmasel |= TX4927_PCFG_DMASEL2_ACL0; + break; + default: + return; + } + switch (dma_chan_in) { + case 1: + dmasel_mask |= TX4927_PCFG_DMASEL1_MASK; + dmasel |= TX4927_PCFG_DMASEL1_ACL1; + break; + case 3: + dmasel_mask |= TX4927_PCFG_DMASEL3_MASK; + dmasel |= TX4927_PCFG_DMASEL3_ACL1; + break; + default: + return; + } + local_irq_save(flags); + txx9_clear64(&tx4927_ccfgptr->pcfg, dmasel_mask); + txx9_set64(&tx4927_ccfgptr->pcfg, dmasel); + local_irq_restore(flags); + txx9_aclc_init(TX4927_ACLC_REG & 0xfffffffffULL, + TXX9_IRQ_BASE + TX4927_IR_ACLC, + 0, dma_chan_out, dma_chan_in); +} + static void __init tx4927_stop_unused_modules(void) { __u64 pcfg, rst = 0, ckd = 0; diff --git a/arch/mips/txx9/generic/setup_tx4938.c b/arch/mips/txx9/generic/setup_tx4938.c index 3925219b897..eb208011023 100644 --- a/arch/mips/txx9/generic/setup_tx4938.c +++ b/arch/mips/txx9/generic/setup_tx4938.c @@ -24,6 +24,7 @@ #include <asm/txx9pio.h> #include <asm/txx9/generic.h> #include <asm/txx9/ndfmc.h> +#include <asm/txx9/dmac.h> #include <asm/txx9/tx4938.h> static void __init tx4938_wdr_init(void) @@ -239,11 +240,6 @@ void __init tx4938_setup(void) for (i = 0; i < TX4938_NR_TMR; i++) txx9_tmr_init(TX4938_TMR_REG(i) & 0xfffffffffULL); - /* DMA */ - for (i = 0; i < 2; i++) - ____raw_writeq(TX4938_DMA_MCR_MSTEN, - (void __iomem *)(TX4938_DMA_REG(i) + 0x50)); - /* PIO */ txx9_gpio_init(TX4938_PIO_REG & 0xfffffffffULL, 0, TX4938_NUM_PIO); __raw_writel(0, &tx4938_pioptr->maskcpu); @@ -403,6 +399,38 @@ void __init tx4938_ndfmc_init(unsigned int hold, unsigned int spw) txx9_ndfmc_init(baseaddr, &plat_data); } +void __init tx4938_dmac_init(int memcpy_chan0, int memcpy_chan1) +{ + struct txx9dmac_platform_data plat_data = { + .have_64bit_regs = true, + }; + int i; + + for (i = 0; i < 2; i++) { + plat_data.memcpy_chan = i ? memcpy_chan1 : memcpy_chan0; + txx9_dmac_init(i, TX4938_DMA_REG(i) & 0xfffffffffULL, + TXX9_IRQ_BASE + TX4938_IR_DMA(i, 0), + &plat_data); + } +} + +void __init tx4938_aclc_init(void) +{ + u64 pcfg = __raw_readq(&tx4938_ccfgptr->pcfg); + + if ((pcfg & TX4938_PCFG_SEL2) && + !(pcfg & TX4938_PCFG_ETH0_SEL)) + txx9_aclc_init(TX4938_ACLC_REG & 0xfffffffffULL, + TXX9_IRQ_BASE + TX4938_IR_ACLC, + 1, 0, 1); +} + +void __init tx4938_sramc_init(void) +{ + if (tx4938_sram_resource.start) + txx9_sramc_init(&tx4938_sram_resource); +} + static void __init tx4938_stop_unused_modules(void) { __u64 pcfg, rst = 0, ckd = 0; diff --git a/arch/mips/txx9/generic/setup_tx4939.c b/arch/mips/txx9/generic/setup_tx4939.c index c2bf150c883..3dc19f48295 100644 --- a/arch/mips/txx9/generic/setup_tx4939.c +++ b/arch/mips/txx9/generic/setup_tx4939.c @@ -28,6 +28,7 @@ #include <asm/txx9tmr.h> #include <asm/txx9/generic.h> #include <asm/txx9/ndfmc.h> +#include <asm/txx9/dmac.h> #include <asm/txx9/tx4939.h> static void __init tx4939_wdr_init(void) @@ -259,11 +260,6 @@ void __init tx4939_setup(void) for (i = 0; i < TX4939_NR_TMR; i++) txx9_tmr_init(TX4939_TMR_REG(i) & 0xfffffffffULL); - /* DMA */ - for (i = 0; i < 2; i++) - ____raw_writeq(TX4938_DMA_MCR_MSTEN, - (void __iomem *)(TX4939_DMA_REG(i) + 0x50)); - /* set PCIC1 reset (required to prevent hangup on BIST) */ txx9_set64(&tx4939_ccfgptr->clkctr, TX4939_CLKCTR_PCI1RST); pcfg = ____raw_readq(&tx4939_ccfgptr->pcfg); @@ -474,6 +470,53 @@ void __init tx4939_ndfmc_init(unsigned int hold, unsigned int spw, txx9_ndfmc_init(TX4939_NDFMC_REG & 0xfffffffffULL, &plat_data); } +void __init tx4939_dmac_init(int memcpy_chan0, int memcpy_chan1) +{ + struct txx9dmac_platform_data plat_data = { + .have_64bit_regs = true, + }; + int i; + + for (i = 0; i < 2; i++) { + plat_data.memcpy_chan = i ? memcpy_chan1 : memcpy_chan0; + txx9_dmac_init(i, TX4939_DMA_REG(i) & 0xfffffffffULL, + TXX9_IRQ_BASE + TX4939_IR_DMA(i, 0), + &plat_data); + } +} + +void __init tx4939_aclc_init(void) +{ + u64 pcfg = __raw_readq(&tx4939_ccfgptr->pcfg); + + if ((pcfg & TX4939_PCFG_I2SMODE_MASK) == TX4939_PCFG_I2SMODE_ACLC) + txx9_aclc_init(TX4939_ACLC_REG & 0xfffffffffULL, + TXX9_IRQ_BASE + TX4939_IR_ACLC, 1, 0, 1); +} + +void __init tx4939_sramc_init(void) +{ + if (tx4939_sram_resource.start) + txx9_sramc_init(&tx4939_sram_resource); +} + +void __init tx4939_rng_init(void) +{ + static struct resource res = { + .start = TX4939_RNG_REG & 0xfffffffffULL, + .end = (TX4939_RNG_REG & 0xfffffffffULL) + 0x30 - 1, + .flags = IORESOURCE_MEM, + }; + static struct platform_device pdev = { + .name = "tx4939-rng", + .id = -1, + .num_resources = 1, + .resource = &res, + }; + + platform_device_register(&pdev); +} + static void __init tx4939_stop_unused_modules(void) { __u64 pcfg, rst = 0, ckd = 0; diff --git a/arch/mips/txx9/rbtx4927/setup.c b/arch/mips/txx9/rbtx4927/setup.c index 01129a9d50f..ee468eaee4f 100644 --- a/arch/mips/txx9/rbtx4927/setup.c +++ b/arch/mips/txx9/rbtx4927/setup.c @@ -337,6 +337,14 @@ static void __init rbtx4927_device_init(void) rbtx4927_ne_init(); tx4927_wdt_init(); rbtx4927_mtd_init(); + if (TX4927_REV_PCODE() == 0x4927) { + tx4927_dmac_init(2); + tx4927_aclc_init(0, 1); + } else { + tx4938_dmac_init(0, 2); + tx4938_aclc_init(); + } + platform_device_register_simple("txx9aclc-generic", -1, NULL, 0); txx9_iocled_init(RBTX4927_LED_ADDR - IO_BASE, -1, 3, 1, "green", NULL); rbtx4927_gpioled_init(); } diff --git a/arch/mips/txx9/rbtx4938/setup.c b/arch/mips/txx9/rbtx4938/setup.c index 65d13df8878..d66509b1428 100644 --- a/arch/mips/txx9/rbtx4938/setup.c +++ b/arch/mips/txx9/rbtx4938/setup.c @@ -355,6 +355,10 @@ static void __init rbtx4938_device_init(void) /* TC58DVM82A1FT: tDH=10ns, tWP=tRP=tREADID=35ns */ tx4938_ndfmc_init(10, 35); tx4938_ata_init(RBTX4938_IRQ_IOC_ATA, 0, 1); + tx4938_dmac_init(0, 2); + tx4938_aclc_init(); + platform_device_register_simple("txx9aclc-generic", -1, NULL, 0); + tx4938_sramc_init(); txx9_iocled_init(RBTX4938_LED_ADDR - IO_BASE, -1, 8, 1, "green", NULL); } diff --git a/arch/mips/txx9/rbtx4939/setup.c b/arch/mips/txx9/rbtx4939/setup.c index 4199c6fd4d1..c033ffe71cd 100644 --- a/arch/mips/txx9/rbtx4939/setup.c +++ b/arch/mips/txx9/rbtx4939/setup.c @@ -498,6 +498,11 @@ static void __init rbtx4939_device_init(void) tx4939_wdt_init(); tx4939_ata_init(); tx4939_rtc_init(); + tx4939_dmac_init(0, 2); + tx4939_aclc_init(); + platform_device_register_simple("txx9aclc-generic", -1, NULL, 0); + tx4939_sramc_init(); + tx4939_rng_init(); } static void __init rbtx4939_setup(void) diff --git a/arch/mn10300/include/asm/kmap_types.h b/arch/mn10300/include/asm/kmap_types.h index 3398f9f3560..76d093b58d4 100644 --- a/arch/mn10300/include/asm/kmap_types.h +++ b/arch/mn10300/include/asm/kmap_types.h @@ -1,31 +1,6 @@ -/* MN10300 kmap_atomic() slot IDs - * - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ #ifndef _ASM_KMAP_TYPES_H #define _ASM_KMAP_TYPES_H -enum km_type { - KM_BOUNCE_READ, - KM_SKB_SUNRPC_DATA, - KM_SKB_DATA_SOFTIRQ, - KM_USER0, - KM_USER1, - KM_BIO_SRC_IRQ, - KM_BIO_DST_IRQ, - KM_PTE0, - KM_PTE1, - KM_IRQ0, - KM_IRQ1, - KM_SOFTIRQ0, - KM_SOFTIRQ1, - KM_TYPE_NR -}; +#include <asm-generic/kmap_types.h> #endif /* _ASM_KMAP_TYPES_H */ diff --git a/arch/mn10300/kernel/init_task.c b/arch/mn10300/kernel/init_task.c index 5ac3566f8c9..80d423b80af 100644 --- a/arch/mn10300/kernel/init_task.c +++ b/arch/mn10300/kernel/init_task.c @@ -20,9 +20,6 @@ static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); -struct mm_struct init_mm = INIT_MM(init_mm); -EXPORT_SYMBOL(init_mm); - /* * Initial thread structure. * diff --git a/arch/parisc/include/asm/errno.h b/arch/parisc/include/asm/errno.h index e2f3ddc796b..9992abdd782 100644 --- a/arch/parisc/include/asm/errno.h +++ b/arch/parisc/include/asm/errno.h @@ -120,5 +120,6 @@ #define EOWNERDEAD 254 /* Owner died */ #define ENOTRECOVERABLE 255 /* State not recoverable */ +#define ERFKILL 256 /* Operation not possible due to RF-kill */ #endif diff --git a/arch/parisc/include/asm/kmap_types.h b/arch/parisc/include/asm/kmap_types.h index 806aae3c533..58e91ed0388 100644 --- a/arch/parisc/include/asm/kmap_types.h +++ b/arch/parisc/include/asm/kmap_types.h @@ -1,30 +1,12 @@ #ifndef _ASM_KMAP_TYPES_H #define _ASM_KMAP_TYPES_H - #ifdef CONFIG_DEBUG_HIGHMEM -# define D(n) __KM_FENCE_##n , -#else -# define D(n) +#define __WITH_KM_FENCE #endif -enum km_type { -D(0) KM_BOUNCE_READ, -D(1) KM_SKB_SUNRPC_DATA, -D(2) KM_SKB_DATA_SOFTIRQ, -D(3) KM_USER0, -D(4) KM_USER1, -D(5) KM_BIO_SRC_IRQ, -D(6) KM_BIO_DST_IRQ, -D(7) KM_PTE0, -D(8) KM_PTE1, -D(9) KM_IRQ0, -D(10) KM_IRQ1, -D(11) KM_SOFTIRQ0, -D(12) KM_SOFTIRQ1, -D(13) KM_TYPE_NR -}; +#include <asm-generic/kmap_types.h> -#undef D +#undef __WITH_KM_FENCE #endif diff --git a/arch/parisc/kernel/init_task.c b/arch/parisc/kernel/init_task.c index 1e25a45d64c..82974b20fc1 100644 --- a/arch/parisc/kernel/init_task.c +++ b/arch/parisc/kernel/init_task.c @@ -36,10 +36,6 @@ static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); -struct mm_struct init_mm = INIT_MM(init_mm); - -EXPORT_SYMBOL(init_mm); - /* * Initial task structure. * diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig index 7d044dfd923..12dc7c40961 100644 --- a/arch/powerpc/configs/ppc6xx_defconfig +++ b/arch/powerpc/configs/ppc6xx_defconfig @@ -1808,7 +1808,7 @@ CONFIG_PCF8575=m CONFIG_SENSORS_PCA9539=m CONFIG_SENSORS_PCF8591=m # CONFIG_TPS65010 is not set -CONFIG_SENSORS_MAX6875=m +CONFIG_EEPROM_MAX6875=m CONFIG_SENSORS_TSL2550=m CONFIG_MCU_MPC8349EMITX=m # CONFIG_I2C_DEBUG_CORE is not set diff --git a/arch/powerpc/include/asm/8253pit.h b/arch/powerpc/include/asm/8253pit.h index b70d6e53b30..a71c9c1455a 100644 --- a/arch/powerpc/include/asm/8253pit.h +++ b/arch/powerpc/include/asm/8253pit.h @@ -1,10 +1,3 @@ -#ifndef _ASM_POWERPC_8253PIT_H -#define _ASM_POWERPC_8253PIT_H - /* * 8253/8254 Programmable Interval Timer */ - -#define PIT_TICK_RATE 1193182UL - -#endif /* _ASM_POWERPC_8253PIT_H */ diff --git a/arch/powerpc/include/asm/qe.h b/arch/powerpc/include/asm/qe.h index e0faf332c9c..157c5ca581c 100644 --- a/arch/powerpc/include/asm/qe.h +++ b/arch/powerpc/include/asm/qe.h @@ -675,6 +675,8 @@ struct ucc_slow_pram { #define UCC_GETH_UPSMR_RMM 0x00001000 #define UCC_GETH_UPSMR_CAM 0x00000400 #define UCC_GETH_UPSMR_BRO 0x00000200 +#define UCC_GETH_UPSMR_SMM 0x00000080 +#define UCC_GETH_UPSMR_SGMM 0x00000020 /* UCC Transmit On Demand Register (UTODR) */ #define UCC_SLOW_TOD 0x8000 diff --git a/arch/powerpc/kernel/init_task.c b/arch/powerpc/kernel/init_task.c index 688b329800b..ffc4253fef5 100644 --- a/arch/powerpc/kernel/init_task.c +++ b/arch/powerpc/kernel/init_task.c @@ -9,10 +9,6 @@ static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); -struct mm_struct init_mm = INIT_MM(init_mm); - -EXPORT_SYMBOL(init_mm); - /* * Initial thread structure. * diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 2f0e64b5364..ef6f64950e9 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -44,10 +44,7 @@ #include <asm/sections.h> #include <asm/machdep.h> -#ifdef CONFIG_LOGO_LINUX_CLUT224 #include <linux/linux_logo.h> -extern const struct linux_logo logo_linux_clut224; -#endif /* * Properties whose value is longer than this get excluded from our diff --git a/arch/powerpc/platforms/82xx/ep8248e.c b/arch/powerpc/platforms/82xx/ep8248e.c index 0eb6d7f6224..51fcae41f08 100644 --- a/arch/powerpc/platforms/82xx/ep8248e.c +++ b/arch/powerpc/platforms/82xx/ep8248e.c @@ -14,6 +14,7 @@ #include <linux/interrupt.h> #include <linux/fsl_devices.h> #include <linux/mdio-bitbang.h> +#include <linux/of_mdio.h> #include <linux/of_platform.h> #include <asm/io.h> @@ -115,7 +116,7 @@ static int __devinit ep8248e_mdio_probe(struct of_device *ofdev, struct mii_bus *bus; struct resource res; struct device_node *node; - int ret, i; + int ret; node = of_get_parent(ofdev->node); of_node_put(node); @@ -130,17 +131,13 @@ static int __devinit ep8248e_mdio_probe(struct of_device *ofdev, if (!bus) return -ENOMEM; - bus->phy_mask = 0; bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - for (i = 0; i < PHY_MAX_ADDR; i++) - bus->irq[i] = -1; - bus->name = "ep8248e-mdio-bitbang"; bus->parent = &ofdev->dev; snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start); - return mdiobus_register(bus); + return of_mdiobus_register(bus, ofdev->node); } static int ep8248e_mdio_remove(struct of_device *ofdev) diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c index 296b5268754..5e0a191764f 100644 --- a/arch/powerpc/platforms/cell/ras.c +++ b/arch/powerpc/platforms/cell/ras.c @@ -122,8 +122,8 @@ static int __init cbe_ptcal_enable_on_node(int nid, int order) area->nid = nid; area->order = order; - area->pages = alloc_pages_node(area->nid, GFP_KERNEL | GFP_THISNODE, - area->order); + area->pages = alloc_pages_exact_node(area->nid, GFP_KERNEL|GFP_THISNODE, + area->order); if (!area->pages) { printk(KERN_WARNING "%s: no page on node %d\n", diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index 9abd210d87c..8547e86bfb4 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c @@ -752,17 +752,8 @@ static int __init init_spu_base(void) goto out_unregister_sysdev_class; } - if (ret > 0) { - /* - * We cannot put the forward declaration in - * <linux/linux_logo.h> because of conflicting session type - * conflicts for const and __initdata with different compiler - * versions - */ - extern const struct linux_logo logo_spe_clut224; - + if (ret > 0) fb_append_extra_logo(&logo_spe_clut224, ret); - } mutex_lock(&spu_full_list_mutex); xmon_register_spus(&spu_full_list); diff --git a/arch/powerpc/platforms/pasemi/gpio_mdio.c b/arch/powerpc/platforms/pasemi/gpio_mdio.c index 75cc165d5be..3bf546797cb 100644 --- a/arch/powerpc/platforms/pasemi/gpio_mdio.c +++ b/arch/powerpc/platforms/pasemi/gpio_mdio.c @@ -29,7 +29,7 @@ #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/phy.h> -#include <linux/platform_device.h> +#include <linux/of_mdio.h> #include <linux/of_platform.h> #define DELAY 1 @@ -39,6 +39,7 @@ static void __iomem *gpio_regs; struct gpio_priv { int mdc_pin; int mdio_pin; + int mdio_irqs[PHY_MAX_ADDR]; }; #define MDC_PIN(bus) (((struct gpio_priv *)bus->priv)->mdc_pin) @@ -218,12 +219,11 @@ static int __devinit gpio_mdio_probe(struct of_device *ofdev, const struct of_device_id *match) { struct device *dev = &ofdev->dev; - struct device_node *phy_dn, *np = ofdev->node; + struct device_node *np = ofdev->node; struct mii_bus *new_bus; struct gpio_priv *priv; const unsigned int *prop; int err; - int i; err = -ENOMEM; priv = kzalloc(sizeof(struct gpio_priv), GFP_KERNEL); @@ -244,27 +244,7 @@ static int __devinit gpio_mdio_probe(struct of_device *ofdev, snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", *prop); new_bus->priv = priv; - new_bus->phy_mask = 0; - - new_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); - - if (!new_bus->irq) - goto out_free_bus; - - for (i = 0; i < PHY_MAX_ADDR; i++) - new_bus->irq[i] = NO_IRQ; - - for (phy_dn = of_get_next_child(np, NULL); - phy_dn != NULL; - phy_dn = of_get_next_child(np, phy_dn)) { - const unsigned int *ip, *regp; - - ip = of_get_property(phy_dn, "interrupts", NULL); - regp = of_get_property(phy_dn, "reg", NULL); - if (!ip || !regp || *regp >= PHY_MAX_ADDR) - continue; - new_bus->irq[*regp] = irq_create_mapping(NULL, *ip); - } + new_bus->irq = priv->mdio_irqs; prop = of_get_property(np, "mdc-pin", NULL); priv->mdc_pin = *prop; @@ -275,7 +255,7 @@ static int __devinit gpio_mdio_probe(struct of_device *ofdev, new_bus->parent = dev; dev_set_drvdata(dev, new_bus); - err = mdiobus_register(new_bus); + err = of_mdiobus_register(new_bus, np); if (err != 0) { printk(KERN_ERR "%s: Cannot register as MDIO bus, err %d\n", @@ -286,8 +266,6 @@ static int __devinit gpio_mdio_probe(struct of_device *ofdev, return 0; out_free_irq: - kfree(new_bus->irq); -out_free_bus: kfree(new_bus); out_free_priv: kfree(priv); diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 99dc3ded6b4..a14dba0e4d6 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -348,6 +348,9 @@ config ARCH_ENABLE_MEMORY_HOTPLUG config ARCH_ENABLE_MEMORY_HOTREMOVE def_bool y +config ARCH_HIBERNATION_POSSIBLE + def_bool y if 64BIT + source "mm/Kconfig" comment "I/O subsystem configuration" @@ -592,6 +595,12 @@ config SECCOMP endmenu +menu "Power Management" + +source "kernel/power/Kconfig" + +endmenu + source "net/Kconfig" config PCMCIA diff --git a/arch/s390/Makefile b/arch/s390/Makefile index 578c61f15a4..0ff387cebf8 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -88,7 +88,9 @@ LDFLAGS_vmlinux := -e start head-y := arch/s390/kernel/head.o arch/s390/kernel/init_task.o core-y += arch/s390/mm/ arch/s390/kernel/ arch/s390/crypto/ \ - arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/ + arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/ \ + arch/s390/power/ + libs-y += arch/s390/lib/ drivers-y += drivers/s390/ drivers-$(CONFIG_MATHEMU) += arch/s390/math-emu/ diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index 1dfc7100c7e..264528e4f58 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c @@ -5,7 +5,7 @@ * Exports appldata_register_ops() and appldata_unregister_ops() for the * data gathering modules. * - * Copyright IBM Corp. 2003, 2008 + * Copyright IBM Corp. 2003, 2009 * * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> */ @@ -26,6 +26,8 @@ #include <linux/notifier.h> #include <linux/cpu.h> #include <linux/workqueue.h> +#include <linux/suspend.h> +#include <linux/platform_device.h> #include <asm/appldata.h> #include <asm/timer.h> #include <asm/uaccess.h> @@ -41,6 +43,9 @@ #define TOD_MICRO 0x01000 /* nr. of TOD clock units for 1 microsecond */ + +static struct platform_device *appldata_pdev; + /* * /proc entries (sysctl) */ @@ -86,6 +91,7 @@ static atomic_t appldata_expire_count = ATOMIC_INIT(0); static DEFINE_SPINLOCK(appldata_timer_lock); static int appldata_interval = APPLDATA_CPU_INTERVAL; static int appldata_timer_active; +static int appldata_timer_suspended = 0; /* * Work queue @@ -475,6 +481,93 @@ void appldata_unregister_ops(struct appldata_ops *ops) /********************** module-ops management <END> **************************/ +/**************************** suspend / resume *******************************/ +static int appldata_freeze(struct device *dev) +{ + struct appldata_ops *ops; + int rc; + struct list_head *lh; + + get_online_cpus(); + spin_lock(&appldata_timer_lock); + if (appldata_timer_active) { + __appldata_vtimer_setup(APPLDATA_DEL_TIMER); + appldata_timer_suspended = 1; + } + spin_unlock(&appldata_timer_lock); + put_online_cpus(); + + mutex_lock(&appldata_ops_mutex); + list_for_each(lh, &appldata_ops_list) { + ops = list_entry(lh, struct appldata_ops, list); + if (ops->active == 1) { + rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC, + (unsigned long) ops->data, ops->size, + ops->mod_lvl); + if (rc != 0) + pr_err("Stopping the data collection for %s " + "failed with rc=%d\n", ops->name, rc); + } + } + mutex_unlock(&appldata_ops_mutex); + return 0; +} + +static int appldata_restore(struct device *dev) +{ + struct appldata_ops *ops; + int rc; + struct list_head *lh; + + get_online_cpus(); + spin_lock(&appldata_timer_lock); + if (appldata_timer_suspended) { + __appldata_vtimer_setup(APPLDATA_ADD_TIMER); + appldata_timer_suspended = 0; + } + spin_unlock(&appldata_timer_lock); + put_online_cpus(); + + mutex_lock(&appldata_ops_mutex); + list_for_each(lh, &appldata_ops_list) { + ops = list_entry(lh, struct appldata_ops, list); + if (ops->active == 1) { + ops->callback(ops->data); // init record + rc = appldata_diag(ops->record_nr, + APPLDATA_START_INTERVAL_REC, + (unsigned long) ops->data, ops->size, + ops->mod_lvl); + if (rc != 0) { + pr_err("Starting the data collection for %s " + "failed with rc=%d\n", ops->name, rc); + } + } + } + mutex_unlock(&appldata_ops_mutex); + return 0; +} + +static int appldata_thaw(struct device *dev) +{ + return appldata_restore(dev); +} + +static struct dev_pm_ops appldata_pm_ops = { + .freeze = appldata_freeze, + .thaw = appldata_thaw, + .restore = appldata_restore, +}; + +static struct platform_driver appldata_pdrv = { + .driver = { + .name = "appldata", + .owner = THIS_MODULE, + .pm = &appldata_pm_ops, + }, +}; +/************************* suspend / resume <END> ****************************/ + + /******************************* init / exit *********************************/ static void __cpuinit appldata_online_cpu(int cpu) @@ -531,11 +624,23 @@ static struct notifier_block __cpuinitdata appldata_nb = { */ static int __init appldata_init(void) { - int i; + int i, rc; + + rc = platform_driver_register(&appldata_pdrv); + if (rc) + return rc; + appldata_pdev = platform_device_register_simple("appldata", -1, NULL, + 0); + if (IS_ERR(appldata_pdev)) { + rc = PTR_ERR(appldata_pdev); + goto out_driver; + } appldata_wq = create_singlethread_workqueue("appldata"); - if (!appldata_wq) - return -ENOMEM; + if (!appldata_wq) { + rc = -ENOMEM; + goto out_device; + } get_online_cpus(); for_each_online_cpu(i) @@ -547,6 +652,12 @@ static int __init appldata_init(void) appldata_sysctl_header = register_sysctl_table(appldata_dir_table); return 0; + +out_device: + platform_device_unregister(appldata_pdev); +out_driver: + platform_driver_unregister(&appldata_pdrv); + return rc; } __initcall(appldata_init); diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h index ba007d8df94..2a541955117 100644 --- a/arch/s390/include/asm/ccwdev.h +++ b/arch/s390/include/asm/ccwdev.h @@ -1,11 +1,9 @@ /* - * include/asm-s390/ccwdev.h - * include/asm-s390x/ccwdev.h + * Copyright IBM Corp. 2002, 2009 * - * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Author(s): Arnd Bergmann <arndb@de.ibm.com> + * Author(s): Arnd Bergmann <arndb@de.ibm.com> * - * Interface for CCW device drivers + * Interface for CCW device drivers */ #ifndef _S390_CCWDEV_H_ #define _S390_CCWDEV_H_ @@ -104,6 +102,11 @@ struct ccw_device { * @set_offline: called when setting device offline * @notify: notify driver of device state changes * @shutdown: called at device shutdown + * @prepare: prepare for pm state transition + * @complete: undo work done in @prepare + * @freeze: callback for freezing during hibernation snapshotting + * @thaw: undo work done in @freeze + * @restore: callback for restoring after hibernation * @driver: embedded device driver structure * @name: device driver name */ @@ -116,6 +119,11 @@ struct ccw_driver { int (*set_offline) (struct ccw_device *); int (*notify) (struct ccw_device *, int); void (*shutdown) (struct ccw_device *); + int (*prepare) (struct ccw_device *); + void (*complete) (struct ccw_device *); + int (*freeze)(struct ccw_device *); + int (*thaw) (struct ccw_device *); + int (*restore)(struct ccw_device *); struct device_driver driver; char *name; }; @@ -184,6 +192,7 @@ extern void ccw_device_get_id(struct ccw_device *, struct ccw_dev_id *); #define to_ccwdrv(n) container_of(n, struct ccw_driver, driver) extern struct ccw_device *ccw_device_probe_console(void); +extern int ccw_device_force_console(void); // FIXME: these have to go extern int _ccw_device_get_subchannel_number(struct ccw_device *); diff --git a/arch/s390/include/asm/ccwgroup.h b/arch/s390/include/asm/ccwgroup.h index a27f68985a7..c79c1e787b8 100644 --- a/arch/s390/include/asm/ccwgroup.h +++ b/arch/s390/include/asm/ccwgroup.h @@ -38,6 +38,11 @@ struct ccwgroup_device { * @set_online: function called when device is set online * @set_offline: function called when device is set offline * @shutdown: function called when device is shut down + * @prepare: prepare for pm state transition + * @complete: undo work done in @prepare + * @freeze: callback for freezing during hibernation snapshotting + * @thaw: undo work done in @freeze + * @restore: callback for restoring after hibernation * @driver: embedded driver structure */ struct ccwgroup_driver { @@ -51,6 +56,11 @@ struct ccwgroup_driver { int (*set_online) (struct ccwgroup_device *); int (*set_offline) (struct ccwgroup_device *); void (*shutdown)(struct ccwgroup_device *); + int (*prepare) (struct ccwgroup_device *); + void (*complete) (struct ccwgroup_device *); + int (*freeze)(struct ccwgroup_device *); + int (*thaw) (struct ccwgroup_device *); + int (*restore)(struct ccwgroup_device *); struct device_driver driver; }; diff --git a/arch/s390/include/asm/kmap_types.h b/arch/s390/include/asm/kmap_types.h index fd157464822..94ec3ee0798 100644 --- a/arch/s390/include/asm/kmap_types.h +++ b/arch/s390/include/asm/kmap_types.h @@ -2,22 +2,7 @@ #ifndef _ASM_KMAP_TYPES_H #define _ASM_KMAP_TYPES_H -enum km_type { - KM_BOUNCE_READ, - KM_SKB_SUNRPC_DATA, - KM_SKB_DATA_SOFTIRQ, - KM_USER0, - KM_USER1, - KM_BIO_SRC_IRQ, - KM_BIO_DST_IRQ, - KM_PTE0, - KM_PTE1, - KM_IRQ0, - KM_IRQ1, - KM_SOFTIRQ0, - KM_SOFTIRQ1, - KM_TYPE_NR -}; +#include <asm-generic/kmap_types.h> #endif #endif /* __KERNEL__ */ diff --git a/arch/s390/include/asm/suspend.h b/arch/s390/include/asm/suspend.h new file mode 100644 index 00000000000..dc75c616eaf --- /dev/null +++ b/arch/s390/include/asm/suspend.h @@ -0,0 +1,10 @@ +#ifndef __ASM_S390_SUSPEND_H +#define __ASM_S390_SUSPEND_H + +static inline int arch_prepare_suspend(void) +{ + return 0; +} + +#endif + diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h index 3a8b26eb1f2..4fb83c1cdb7 100644 --- a/arch/s390/include/asm/system.h +++ b/arch/s390/include/asm/system.h @@ -1,11 +1,7 @@ /* - * include/asm-s390/system.h + * Copyright IBM Corp. 1999, 2009 * - * S390 version - * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), - * - * Derived from "include/asm-i386/system.h" + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> */ #ifndef __ASM_SYSTEM_H @@ -469,6 +465,20 @@ extern psw_t sysc_restore_trace_psw; extern psw_t io_restore_trace_psw; #endif +static inline int tprot(unsigned long addr) +{ + int rc = -EFAULT; + + asm volatile( + " tprot 0(%1),0\n" + "0: ipm %0\n" + " srl %0,28\n" + "1:\n" + EX_TABLE(0b,1b) + : "+d" (rc) : "a" (addr) : "cc"); + return rc; +} + #endif /* __KERNEL__ */ #endif diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index fb263736826..f9b144049dc 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -1,7 +1,7 @@ /* * arch/s390/kernel/early.c * - * Copyright IBM Corp. 2007 + * Copyright IBM Corp. 2007, 2009 * Author(s): Hongjie Yang <hongjie@us.ibm.com>, * Heiko Carstens <heiko.carstens@de.ibm.com> */ @@ -210,7 +210,7 @@ static noinline __init void detect_machine_type(void) machine_flags |= MACHINE_FLAG_VM; } -static __init void early_pgm_check_handler(void) +static void early_pgm_check_handler(void) { unsigned long addr; const struct exception_table_entry *fixup; @@ -222,7 +222,7 @@ static __init void early_pgm_check_handler(void) S390_lowcore.program_old_psw.addr = fixup->fixup | PSW_ADDR_AMODE; } -static noinline __init void setup_lowcore_early(void) +void setup_lowcore_early(void) { psw_t psw; diff --git a/arch/s390/kernel/init_task.c b/arch/s390/kernel/init_task.c index 7db95c0b869..fe787f9e5f3 100644 --- a/arch/s390/kernel/init_task.c +++ b/arch/s390/kernel/init_task.c @@ -18,10 +18,6 @@ static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); -struct mm_struct init_mm = INIT_MM(init_mm); - -EXPORT_SYMBOL(init_mm); - /* * Initial thread structure. * diff --git a/arch/s390/kernel/mem_detect.c b/arch/s390/kernel/mem_detect.c index 9872999c66d..559af0d0787 100644 --- a/arch/s390/kernel/mem_detect.c +++ b/arch/s390/kernel/mem_detect.c @@ -1,6 +1,7 @@ /* - * Copyright IBM Corp. 2008 - * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> + * Copyright IBM Corp. 2008, 2009 + * + * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> */ #include <linux/kernel.h> @@ -9,20 +10,6 @@ #include <asm/sclp.h> #include <asm/setup.h> -static inline int tprot(unsigned long addr) -{ - int rc = -EFAULT; - - asm volatile( - " tprot 0(%1),0\n" - "0: ipm %0\n" - " srl %0,28\n" - "1:\n" - EX_TABLE(0b,1b) - : "+d" (rc) : "a" (addr) : "cc"); - return rc; -} - #define ADDR2G (1ULL << 31) static void find_memory_chunks(struct mem_chunk chunk[]) diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index cc8c484984e..fd8e3111a4e 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -1,7 +1,7 @@ /* * arch/s390/kernel/smp.c * - * Copyright IBM Corp. 1999,2007 + * Copyright IBM Corp. 1999, 2009 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), * Martin Schwidefsky (schwidefsky@de.ibm.com) * Heiko Carstens (heiko.carstens@de.ibm.com) @@ -1031,6 +1031,42 @@ out: static SYSDEV_CLASS_ATTR(dispatching, 0644, dispatching_show, dispatching_store); +/* + * If the resume kernel runs on another cpu than the suspended kernel, + * we have to switch the cpu IDs in the logical map. + */ +void smp_switch_boot_cpu_in_resume(u32 resume_phys_cpu_id, + struct _lowcore *suspend_lowcore) +{ + int cpu, suspend_cpu_id, resume_cpu_id; + u32 suspend_phys_cpu_id; + + suspend_phys_cpu_id = __cpu_logical_map[suspend_lowcore->cpu_nr]; + suspend_cpu_id = suspend_lowcore->cpu_nr; + + for_each_present_cpu(cpu) { + if (__cpu_logical_map[cpu] == resume_phys_cpu_id) { + resume_cpu_id = cpu; + goto found; + } + } + panic("Could not find resume cpu in logical map.\n"); + +found: + printk("Resume cpu ID: %i/%i\n", resume_phys_cpu_id, resume_cpu_id); + printk("Suspend cpu ID: %i/%i\n", suspend_phys_cpu_id, suspend_cpu_id); + + __cpu_logical_map[resume_cpu_id] = suspend_phys_cpu_id; + __cpu_logical_map[suspend_cpu_id] = resume_phys_cpu_id; + + lowcore_ptr[suspend_cpu_id]->cpu_addr = resume_phys_cpu_id; +} + +u32 smp_get_phys_cpu_id(void) +{ + return __cpu_logical_map[smp_processor_id()]; +} + static int __init topology_init(void) { int cpu; diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 4ca8e826bf3..56566720798 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -313,3 +313,22 @@ int s390_enable_sie(void) return 0; } EXPORT_SYMBOL_GPL(s390_enable_sie); + +#ifdef CONFIG_DEBUG_PAGEALLOC +#ifdef CONFIG_HIBERNATION +bool kernel_page_present(struct page *page) +{ + unsigned long addr; + int cc; + + addr = page_to_phys(page); + asm("lra %1,0(%1)\n" + "ipm %0\n" + "srl %0,28" + :"=d"(cc),"+a"(addr)::"cc"); + return cc == 0; +} + +#endif /* CONFIG_HIBERNATION */ +#endif /* CONFIG_DEBUG_PAGEALLOC */ + diff --git a/arch/s390/power/Makefile b/arch/s390/power/Makefile new file mode 100644 index 00000000000..973bb45a8fe --- /dev/null +++ b/arch/s390/power/Makefile @@ -0,0 +1,8 @@ +# +# Makefile for s390 PM support +# + +obj-$(CONFIG_HIBERNATION) += suspend.o +obj-$(CONFIG_HIBERNATION) += swsusp.o +obj-$(CONFIG_HIBERNATION) += swsusp_64.o +obj-$(CONFIG_HIBERNATION) += swsusp_asm64.o diff --git a/arch/s390/power/suspend.c b/arch/s390/power/suspend.c new file mode 100644 index 00000000000..b3351eceebb --- /dev/null +++ b/arch/s390/power/suspend.c @@ -0,0 +1,40 @@ +/* + * Suspend support specific for s390. + * + * Copyright IBM Corp. 2009 + * + * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com> + */ + +#include <linux/mm.h> +#include <linux/suspend.h> +#include <linux/reboot.h> +#include <linux/pfn.h> +#include <asm/sections.h> +#include <asm/ipl.h> + +/* + * References to section boundaries + */ +extern const void __nosave_begin, __nosave_end; + +/* + * check if given pfn is in the 'nosave' or in the read only NSS section + */ +int pfn_is_nosave(unsigned long pfn) +{ + unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT; + unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) + >> PAGE_SHIFT; + unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1; + unsigned long stext_pfn = PFN_DOWN(__pa(&_stext)); + + if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn) + return 1; + if (pfn >= stext_pfn && pfn <= eshared_pfn) { + if (ipl_info.type == IPL_TYPE_NSS) + return 1; + } else if ((tprot(pfn * PAGE_SIZE) && pfn > 0)) + return 1; + return 0; +} diff --git a/arch/s390/power/swsusp.c b/arch/s390/power/swsusp.c new file mode 100644 index 00000000000..e6a4fe9f5f2 --- /dev/null +++ b/arch/s390/power/swsusp.c @@ -0,0 +1,30 @@ +/* + * Support for suspend and resume on s390 + * + * Copyright IBM Corp. 2009 + * + * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com> + * + */ + + +/* + * save CPU registers before creating a hibernation image and before + * restoring the memory state from it + */ +void save_processor_state(void) +{ + /* implentation contained in the + * swsusp_arch_suspend function + */ +} + +/* + * restore the contents of CPU registers + */ +void restore_processor_state(void) +{ + /* implentation contained in the + * swsusp_arch_resume function + */ +} diff --git a/arch/s390/power/swsusp_64.c b/arch/s390/power/swsusp_64.c new file mode 100644 index 00000000000..9516a517d72 --- /dev/null +++ b/arch/s390/power/swsusp_64.c @@ -0,0 +1,17 @@ +/* + * Support for suspend and resume on s390 + * + * Copyright IBM Corp. 2009 + * + * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com> + * + */ + +#include <asm/system.h> +#include <linux/interrupt.h> + +void do_after_copyback(void) +{ + mb(); +} + diff --git a/arch/s390/power/swsusp_asm64.S b/arch/s390/power/swsusp_asm64.S new file mode 100644 index 00000000000..3c74e7d827c --- /dev/null +++ b/arch/s390/power/swsusp_asm64.S @@ -0,0 +1,199 @@ +/* + * S390 64-bit swsusp implementation + * + * Copyright IBM Corp. 2009 + * + * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com> + * Michael Holzheu <holzheu@linux.vnet.ibm.com> + */ + +#include <asm/page.h> +#include <asm/ptrace.h> +#include <asm/asm-offsets.h> + +/* + * Save register context in absolute 0 lowcore and call swsusp_save() to + * create in-memory kernel image. The context is saved in the designated + * "store status" memory locations (see POP). + * We return from this function twice. The first time during the suspend to + * disk process. The second time via the swsusp_arch_resume() function + * (see below) in the resume process. + * This function runs with disabled interrupts. + */ + .section .text + .align 2 + .globl swsusp_arch_suspend +swsusp_arch_suspend: + stmg %r6,%r15,__SF_GPRS(%r15) + lgr %r1,%r15 + aghi %r15,-STACK_FRAME_OVERHEAD + stg %r1,__SF_BACKCHAIN(%r15) + + /* Deactivate DAT */ + stnsm __SF_EMPTY(%r15),0xfb + + /* Switch off lowcore protection */ + stctg %c0,%c0,__SF_EMPTY(%r15) + ni __SF_EMPTY+4(%r15),0xef + lctlg %c0,%c0,__SF_EMPTY(%r15) + + /* Store prefix register on stack */ + stpx __SF_EMPTY(%r15) + + /* Setup base register for lowcore (absolute 0) */ + llgf %r1,__SF_EMPTY(%r15) + + /* Get pointer to save area */ + aghi %r1,0x1000 + + /* Store registers */ + mvc 0x318(4,%r1),__SF_EMPTY(%r15) /* move prefix to lowcore */ + stfpc 0x31c(%r1) /* store fpu control */ + std 0,0x200(%r1) /* store f0 */ + std 1,0x208(%r1) /* store f1 */ + std 2,0x210(%r1) /* store f2 */ + std 3,0x218(%r1) /* store f3 */ + std 4,0x220(%r1) /* store f4 */ + std 5,0x228(%r1) /* store f5 */ + std 6,0x230(%r1) /* store f6 */ + std 7,0x238(%r1) /* store f7 */ + std 8,0x240(%r1) /* store f8 */ + std 9,0x248(%r1) /* store f9 */ + std 10,0x250(%r1) /* store f10 */ + std 11,0x258(%r1) /* store f11 */ + std 12,0x260(%r1) /* store f12 */ + std 13,0x268(%r1) /* store f13 */ + std 14,0x270(%r1) /* store f14 */ + std 15,0x278(%r1) /* store f15 */ + stam %a0,%a15,0x340(%r1) /* store access registers */ + stctg %c0,%c15,0x380(%r1) /* store control registers */ + stmg %r0,%r15,0x280(%r1) /* store general registers */ + + stpt 0x328(%r1) /* store timer */ + stckc 0x330(%r1) /* store clock comparator */ + + /* Activate DAT */ + stosm __SF_EMPTY(%r15),0x04 + + /* Set prefix page to zero */ + xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15) + spx __SF_EMPTY(%r15) + + /* Setup lowcore */ + brasl %r14,setup_lowcore_early + + /* Save image */ + brasl %r14,swsusp_save + + /* Switch on lowcore protection */ + stctg %c0,%c0,__SF_EMPTY(%r15) + oi __SF_EMPTY+4(%r15),0x10 + lctlg %c0,%c0,__SF_EMPTY(%r15) + + /* Restore prefix register and return */ + lghi %r1,0x1000 + spx 0x318(%r1) + lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) + lghi %r2,0 + br %r14 + +/* + * Restore saved memory image to correct place and restore register context. + * Then we return to the function that called swsusp_arch_suspend(). + * swsusp_arch_resume() runs with disabled interrupts. + */ + .globl swsusp_arch_resume +swsusp_arch_resume: + stmg %r6,%r15,__SF_GPRS(%r15) + lgr %r1,%r15 + aghi %r15,-STACK_FRAME_OVERHEAD + stg %r1,__SF_BACKCHAIN(%r15) + + /* Save boot cpu number */ + brasl %r14,smp_get_phys_cpu_id + lgr %r10,%r2 + + /* Deactivate DAT */ + stnsm __SF_EMPTY(%r15),0xfb + + /* Switch off lowcore protection */ + stctg %c0,%c0,__SF_EMPTY(%r15) + ni __SF_EMPTY+4(%r15),0xef + lctlg %c0,%c0,__SF_EMPTY(%r15) + + /* Set prefix page to zero */ + xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15) + spx __SF_EMPTY(%r15) + + /* Restore saved image */ + larl %r1,restore_pblist + lg %r1,0(%r1) + ltgr %r1,%r1 + jz 2f +0: + lg %r2,8(%r1) + lg %r4,0(%r1) + lghi %r3,PAGE_SIZE + lghi %r5,PAGE_SIZE +1: + mvcle %r2,%r4,0 + jo 1b + lg %r1,16(%r1) + ltgr %r1,%r1 + jnz 0b +2: + ptlb /* flush tlb */ + + /* Restore registers */ + lghi %r13,0x1000 /* %r1 = pointer to save arae */ + + spt 0x328(%r13) /* reprogram timer */ + //sckc 0x330(%r13) /* set clock comparator */ + + lctlg %c0,%c15,0x380(%r13) /* load control registers */ + lam %a0,%a15,0x340(%r13) /* load access registers */ + + lfpc 0x31c(%r13) /* load fpu control */ + ld 0,0x200(%r13) /* load f0 */ + ld 1,0x208(%r13) /* load f1 */ + ld 2,0x210(%r13) /* load f2 */ + ld 3,0x218(%r13) /* load f3 */ + ld 4,0x220(%r13) /* load f4 */ + ld 5,0x228(%r13) /* load f5 */ + ld 6,0x230(%r13) /* load f6 */ + ld 7,0x238(%r13) /* load f7 */ + ld 8,0x240(%r13) /* load f8 */ + ld 9,0x248(%r13) /* load f9 */ + ld 10,0x250(%r13) /* load f10 */ + ld 11,0x258(%r13) /* load f11 */ + ld 12,0x260(%r13) /* load f12 */ + ld 13,0x268(%r13) /* load f13 */ + ld 14,0x270(%r13) /* load f14 */ + ld 15,0x278(%r13) /* load f15 */ + + /* Load old stack */ + lg %r15,0x2f8(%r13) + + /* Pointer to save arae */ + lghi %r13,0x1000 + + /* Switch CPUs */ + lgr %r2,%r10 /* get cpu id */ + llgf %r3,0x318(%r13) + brasl %r14,smp_switch_boot_cpu_in_resume + + /* Restore prefix register */ + spx 0x318(%r13) + + /* Switch on lowcore protection */ + stctg %c0,%c0,__SF_EMPTY(%r15) + oi __SF_EMPTY+4(%r15),0x10 + lctlg %c0,%c0,__SF_EMPTY(%r15) + + /* Activate DAT */ + stosm __SF_EMPTY(%r15),0x04 + + /* Return 0 */ + lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) + lghi %r2,0 + br %r14 diff --git a/arch/sh/include/asm/kmap_types.h b/arch/sh/include/asm/kmap_types.h index 84d565c696b..5962b08b6dd 100644 --- a/arch/sh/include/asm/kmap_types.h +++ b/arch/sh/include/asm/kmap_types.h @@ -3,30 +3,12 @@ /* Dummy header just to define km_type. */ - #ifdef CONFIG_DEBUG_HIGHMEM -# define D(n) __KM_FENCE_##n , -#else -# define D(n) +#define __WITH_KM_FENCE #endif -enum km_type { -D(0) KM_BOUNCE_READ, -D(1) KM_SKB_SUNRPC_DATA, -D(2) KM_SKB_DATA_SOFTIRQ, -D(3) KM_USER0, -D(4) KM_USER1, -D(5) KM_BIO_SRC_IRQ, -D(6) KM_BIO_DST_IRQ, -D(7) KM_PTE0, -D(8) KM_PTE1, -D(9) KM_IRQ0, -D(10) KM_IRQ1, -D(11) KM_SOFTIRQ0, -D(12) KM_SOFTIRQ1, -D(13) KM_TYPE_NR -}; +#include <asm-generic/kmap_types.h> -#undef D +#undef __WITH_KM_FENCE #endif diff --git a/arch/sh/kernel/init_task.c b/arch/sh/kernel/init_task.c index 80c35ff71d5..1719957c0a6 100644 --- a/arch/sh/kernel/init_task.c +++ b/arch/sh/kernel/init_task.c @@ -10,9 +10,6 @@ static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct pt_regs fake_swapper_regs; -struct mm_struct init_mm = INIT_MM(init_mm); -EXPORT_SYMBOL(init_mm); - /* * Initial thread structure. * diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index cc12cd48bbc..3f8b6a92eab 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -37,6 +37,8 @@ config SPARC64 select HAVE_KPROBES select HAVE_LMB select HAVE_SYSCALL_WRAPPERS + select HAVE_DYNAMIC_FTRACE + select HAVE_FTRACE_MCOUNT_RECORD select USE_GENERIC_SMP_HELPERS if SMP select RTC_DRV_CMOS select RTC_DRV_BQ4802 @@ -93,6 +95,9 @@ config AUDIT_ARCH config HAVE_SETUP_PER_CPU_AREA def_bool y if SPARC64 +config HAVE_DYNAMIC_PER_CPU_AREA + def_bool y if SPARC64 + config GENERIC_HARDIRQS_NO__DO_IRQ bool def_bool y if SPARC64 diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig index b5d63bd8716..0123a4c596c 100644 --- a/arch/sparc/configs/sparc64_defconfig +++ b/arch/sparc/configs/sparc64_defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.30-rc2 -# Fri Apr 17 02:03:07 2009 +# Linux kernel version: 2.6.30 +# Tue Jun 16 04:59:36 2009 # CONFIG_64BIT=y CONFIG_SPARC=y @@ -19,6 +19,7 @@ CONFIG_LOCKDEP_SUPPORT=y CONFIG_HAVE_LATENCYTOP_SUPPORT=y CONFIG_AUDIT_ARCH=y CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_HAVE_DYNAMIC_PER_CPU_AREA=y CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y CONFIG_MMU=y CONFIG_ARCH_NO_VIRT_TO_BUS=y @@ -82,7 +83,6 @@ CONFIG_SYSCTL_SYSCALL=y CONFIG_KALLSYMS=y # CONFIG_KALLSYMS_ALL is not set # CONFIG_KALLSYMS_EXTRA_PASS is not set -# CONFIG_STRIP_ASM_SYMS is not set CONFIG_HOTPLUG=y CONFIG_PRINTK=y CONFIG_BUG=y @@ -95,16 +95,21 @@ CONFIG_TIMERFD=y CONFIG_EVENTFD=y CONFIG_SHMEM=y CONFIG_AIO=y + +# +# Performance Counters +# CONFIG_VM_EVENT_COUNTERS=y CONFIG_PCI_QUIRKS=y CONFIG_SLUB_DEBUG=y +# CONFIG_STRIP_ASM_SYMS is not set # CONFIG_COMPAT_BRK is not set # CONFIG_SLAB is not set CONFIG_SLUB=y # CONFIG_SLOB is not set CONFIG_PROFILING=y CONFIG_TRACEPOINTS=y -# CONFIG_MARKERS is not set +CONFIG_MARKERS=y CONFIG_OPROFILE=m CONFIG_HAVE_OPROFILE=y CONFIG_KPROBES=y @@ -202,6 +207,7 @@ CONFIG_NR_QUICK=1 CONFIG_UNEVICTABLE_LRU=y CONFIG_HAVE_MLOCK=y CONFIG_HAVE_MLOCKED_PAGE_BIT=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=8192 CONFIG_SCHED_SMT=y CONFIG_SCHED_MC=y # CONFIG_PREEMPT_NONE is not set @@ -321,6 +327,7 @@ CONFIG_VLAN_8021Q=m # CONFIG_ECONET is not set # CONFIG_WAN_ROUTER is not set # CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set # CONFIG_NET_SCHED is not set # CONFIG_DCB is not set @@ -340,7 +347,11 @@ CONFIG_WIRELESS=y CONFIG_WIRELESS_OLD_REGULATORY=y # CONFIG_WIRELESS_EXT is not set # CONFIG_LIB80211 is not set -# CONFIG_MAC80211 is not set + +# +# CFG80211 needs to be enabled for MAC80211 +# +CONFIG_MAC80211_DEFAULT_PS_VALUE=0 # CONFIG_WIMAX is not set # CONFIG_RFKILL is not set # CONFIG_NET_9P is not set @@ -364,6 +375,7 @@ CONFIG_EXTRA_FIRMWARE="" CONFIG_CONNECTOR=m # CONFIG_MTD is not set CONFIG_OF_DEVICE=y +CONFIG_OF_MDIO=m # CONFIG_PARPORT is not set CONFIG_BLK_DEV=y # CONFIG_BLK_DEV_FD is not set @@ -399,6 +411,7 @@ CONFIG_MISC_DEVICES=y # CONFIG_EEPROM_AT24 is not set # CONFIG_EEPROM_LEGACY is not set # CONFIG_EEPROM_93CX6 is not set +# CONFIG_CB710_CORE is not set CONFIG_HAVE_IDE=y CONFIG_IDE=y @@ -477,10 +490,6 @@ CONFIG_BLK_DEV_SR=m CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_CHR_DEV_SG=m # CONFIG_CHR_DEV_SCH is not set - -# -# Some SCSI devices (e.g. CD jukebox) support multiple LUNs -# CONFIG_SCSI_MULTI_LUN=y CONFIG_SCSI_CONSTANTS=y # CONFIG_SCSI_LOGGING is not set @@ -499,6 +508,7 @@ CONFIG_SCSI_FC_ATTRS=y CONFIG_SCSI_LOWLEVEL=y # CONFIG_ISCSI_TCP is not set # CONFIG_SCSI_CXGB3_ISCSI is not set +# CONFIG_SCSI_BNX2_ISCSI is not set # CONFIG_BLK_DEV_3W_XXXX_RAID is not set # CONFIG_SCSI_3W_9XXX is not set # CONFIG_SCSI_ACARD is not set @@ -507,6 +517,7 @@ CONFIG_SCSI_LOWLEVEL=y # CONFIG_SCSI_AIC7XXX_OLD is not set # CONFIG_SCSI_AIC79XX is not set # CONFIG_SCSI_AIC94XX is not set +# CONFIG_SCSI_MVSAS is not set # CONFIG_SCSI_ARCMSR is not set # CONFIG_MEGARAID_NEWGEN is not set # CONFIG_MEGARAID_LEGACY is not set @@ -521,7 +532,6 @@ CONFIG_SCSI_LOWLEVEL=y # CONFIG_SCSI_IPS is not set # CONFIG_SCSI_INITIO is not set # CONFIG_SCSI_INIA100 is not set -# CONFIG_SCSI_MVSAS is not set # CONFIG_SCSI_STEX is not set # CONFIG_SCSI_SYM53C8XX_2 is not set # CONFIG_SCSI_QLOGIC_1280 is not set @@ -569,7 +579,6 @@ CONFIG_DM_ZERO=m # CONFIG_IEEE1394 is not set # CONFIG_I2O is not set CONFIG_NETDEVICES=y -CONFIG_COMPAT_NET_DEV_OPS=y # CONFIG_DUMMY is not set # CONFIG_BONDING is not set # CONFIG_MACVLAN is not set @@ -635,6 +644,7 @@ CONFIG_NET_PCI=y # CONFIG_SMSC9420 is not set # CONFIG_SUNDANCE is not set # CONFIG_TLAN is not set +# CONFIG_KS8842 is not set # CONFIG_VIA_RHINE is not set # CONFIG_SC92031 is not set # CONFIG_ATL2 is not set @@ -1127,6 +1137,11 @@ CONFIG_SND_VERBOSE_PROCFS=y # CONFIG_SND_VERBOSE_PRINTK is not set # CONFIG_SND_DEBUG is not set CONFIG_SND_VMASTER=y +CONFIG_SND_RAWMIDI_SEQ=m +# CONFIG_SND_OPL3_LIB_SEQ is not set +# CONFIG_SND_OPL4_LIB_SEQ is not set +# CONFIG_SND_SBAWE_SEQ is not set +# CONFIG_SND_EMU10K1_SEQ is not set CONFIG_SND_MPU401_UART=m CONFIG_SND_AC97_CODEC=m CONFIG_SND_DRIVERS=y @@ -1153,6 +1168,7 @@ CONFIG_SND_ALI5451=m # CONFIG_SND_OXYGEN is not set # CONFIG_SND_CS4281 is not set # CONFIG_SND_CS46XX is not set +# CONFIG_SND_CTXFI is not set # CONFIG_SND_DARLA20 is not set # CONFIG_SND_GINA20 is not set # CONFIG_SND_LAYLA20 is not set @@ -1183,6 +1199,7 @@ CONFIG_SND_ALI5451=m # CONFIG_SND_INTEL8X0 is not set # CONFIG_SND_INTEL8X0M is not set # CONFIG_SND_KORG1212 is not set +# CONFIG_SND_LX6464ES is not set # CONFIG_SND_MAESTRO3 is not set # CONFIG_SND_MIXART is not set # CONFIG_SND_NM256 is not set @@ -1229,6 +1246,7 @@ CONFIG_HID_BELKIN=y CONFIG_HID_CHERRY=y CONFIG_HID_CHICONY=y CONFIG_HID_CYPRESS=y +CONFIG_HID_DRAGONRISE=y # CONFIG_DRAGONRISE_FF is not set CONFIG_HID_EZKEY=y CONFIG_HID_KYE=y @@ -1246,9 +1264,14 @@ CONFIG_HID_PETALYNX=y CONFIG_HID_SAMSUNG=y CONFIG_HID_SONY=y CONFIG_HID_SUNPLUS=y +CONFIG_HID_GREENASIA=y # CONFIG_GREENASIA_FF is not set +CONFIG_HID_SMARTJOYPLUS=y +# CONFIG_SMARTJOYPLUS_FF is not set CONFIG_HID_TOPSEED=y +CONFIG_HID_THRUSTMASTER=y # CONFIG_THRUSTMASTER_FF is not set +CONFIG_HID_ZEROPLUS=y # CONFIG_ZEROPLUS_FF is not set CONFIG_USB_SUPPORT=y CONFIG_USB_ARCH_HAS_HCD=y @@ -1462,6 +1485,7 @@ CONFIG_FILE_LOCKING=y # CONFIG_GFS2_FS is not set # CONFIG_OCFS2_FS is not set # CONFIG_BTRFS_FS is not set +CONFIG_FSNOTIFY=y CONFIG_DNOTIFY=y CONFIG_INOTIFY=y CONFIG_INOTIFY_USER=y @@ -1636,25 +1660,28 @@ CONFIG_SYSCTL_SYSCALL_CHECK=y # CONFIG_DEBUG_PAGEALLOC is not set CONFIG_NOP_TRACER=y CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y CONFIG_TRACING_SUPPORT=y - -# -# Tracers -# +CONFIG_FTRACE=y # CONFIG_FUNCTION_TRACER is not set # CONFIG_IRQSOFF_TRACER is not set # CONFIG_SCHED_TRACER is not set -# CONFIG_CONTEXT_SWITCH_TRACER is not set -# CONFIG_EVENT_TRACER is not set # CONFIG_BOOT_TRACER is not set -# CONFIG_TRACE_BRANCH_PROFILING is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set # CONFIG_STACK_TRACER is not set # CONFIG_KMEMTRACE is not set # CONFIG_WORKQUEUE_TRACER is not set CONFIG_BLK_DEV_IO_TRACE=y # CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set # CONFIG_DYNAMIC_DEBUG is not set # CONFIG_SAMPLES is not set CONFIG_HAVE_ARCH_KGDB=y diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h index a11b89ee9ef..926397d345f 100644 --- a/arch/sparc/include/asm/cpudata_64.h +++ b/arch/sparc/include/asm/cpudata_64.h @@ -6,9 +6,6 @@ #ifndef _SPARC64_CPUDATA_H #define _SPARC64_CPUDATA_H -#include <asm/hypervisor.h> -#include <asm/asi.h> - #ifndef __ASSEMBLY__ #include <linux/percpu.h> @@ -38,202 +35,10 @@ DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); #define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu)) #define local_cpu_data() __get_cpu_var(__cpu_data) -/* Trap handling code needs to get at a few critical values upon - * trap entry and to process TSB misses. These cannot be in the - * per_cpu() area as we really need to lock them into the TLB and - * thus make them part of the main kernel image. As a result we - * try to make this as small as possible. - * - * This is padded out and aligned to 64-bytes to avoid false sharing - * on SMP. - */ - -/* If you modify the size of this structure, please update - * TRAP_BLOCK_SZ_SHIFT below. - */ -struct thread_info; -struct trap_per_cpu { -/* D-cache line 1: Basic thread information, cpu and device mondo queues */ - struct thread_info *thread; - unsigned long pgd_paddr; - unsigned long cpu_mondo_pa; - unsigned long dev_mondo_pa; - -/* D-cache line 2: Error Mondo Queue and kernel buffer pointers */ - unsigned long resum_mondo_pa; - unsigned long resum_kernel_buf_pa; - unsigned long nonresum_mondo_pa; - unsigned long nonresum_kernel_buf_pa; - -/* Dcache lines 3, 4, 5, and 6: Hypervisor Fault Status */ - struct hv_fault_status fault_info; - -/* Dcache line 7: Physical addresses of CPU send mondo block and CPU list. */ - unsigned long cpu_mondo_block_pa; - unsigned long cpu_list_pa; - unsigned long tsb_huge; - unsigned long tsb_huge_temp; - -/* Dcache line 8: IRQ work list, and keep trap_block a power-of-2 in size. */ - unsigned long irq_worklist_pa; - unsigned int cpu_mondo_qmask; - unsigned int dev_mondo_qmask; - unsigned int resum_qmask; - unsigned int nonresum_qmask; - void *hdesc; -} __attribute__((aligned(64))); -extern struct trap_per_cpu trap_block[NR_CPUS]; -extern void init_cur_cpu_trap(struct thread_info *); -extern void setup_tba(void); -extern int ncpus_probed; extern const struct seq_operations cpuinfo_op; -extern unsigned long real_hard_smp_processor_id(void); - -struct cpuid_patch_entry { - unsigned int addr; - unsigned int cheetah_safari[4]; - unsigned int cheetah_jbus[4]; - unsigned int starfire[4]; - unsigned int sun4v[4]; -}; -extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end; - -struct sun4v_1insn_patch_entry { - unsigned int addr; - unsigned int insn; -}; -extern struct sun4v_1insn_patch_entry __sun4v_1insn_patch, - __sun4v_1insn_patch_end; - -struct sun4v_2insn_patch_entry { - unsigned int addr; - unsigned int insns[2]; -}; -extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, - __sun4v_2insn_patch_end; - #endif /* !(__ASSEMBLY__) */ -#define TRAP_PER_CPU_THREAD 0x00 -#define TRAP_PER_CPU_PGD_PADDR 0x08 -#define TRAP_PER_CPU_CPU_MONDO_PA 0x10 -#define TRAP_PER_CPU_DEV_MONDO_PA 0x18 -#define TRAP_PER_CPU_RESUM_MONDO_PA 0x20 -#define TRAP_PER_CPU_RESUM_KBUF_PA 0x28 -#define TRAP_PER_CPU_NONRESUM_MONDO_PA 0x30 -#define TRAP_PER_CPU_NONRESUM_KBUF_PA 0x38 -#define TRAP_PER_CPU_FAULT_INFO 0x40 -#define TRAP_PER_CPU_CPU_MONDO_BLOCK_PA 0xc0 -#define TRAP_PER_CPU_CPU_LIST_PA 0xc8 -#define TRAP_PER_CPU_TSB_HUGE 0xd0 -#define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8 -#define TRAP_PER_CPU_IRQ_WORKLIST_PA 0xe0 -#define TRAP_PER_CPU_CPU_MONDO_QMASK 0xe8 -#define TRAP_PER_CPU_DEV_MONDO_QMASK 0xec -#define TRAP_PER_CPU_RESUM_QMASK 0xf0 -#define TRAP_PER_CPU_NONRESUM_QMASK 0xf4 - -#define TRAP_BLOCK_SZ_SHIFT 8 - -#include <asm/scratchpad.h> - -#define __GET_CPUID(REG) \ - /* Spitfire implementation (default). */ \ -661: ldxa [%g0] ASI_UPA_CONFIG, REG; \ - srlx REG, 17, REG; \ - and REG, 0x1f, REG; \ - nop; \ - .section .cpuid_patch, "ax"; \ - /* Instruction location. */ \ - .word 661b; \ - /* Cheetah Safari implementation. */ \ - ldxa [%g0] ASI_SAFARI_CONFIG, REG; \ - srlx REG, 17, REG; \ - and REG, 0x3ff, REG; \ - nop; \ - /* Cheetah JBUS implementation. */ \ - ldxa [%g0] ASI_JBUS_CONFIG, REG; \ - srlx REG, 17, REG; \ - and REG, 0x1f, REG; \ - nop; \ - /* Starfire implementation. */ \ - sethi %hi(0x1fff40000d0 >> 9), REG; \ - sllx REG, 9, REG; \ - or REG, 0xd0, REG; \ - lduwa [REG] ASI_PHYS_BYPASS_EC_E, REG;\ - /* sun4v implementation. */ \ - mov SCRATCHPAD_CPUID, REG; \ - ldxa [REG] ASI_SCRATCHPAD, REG; \ - nop; \ - nop; \ - .previous; - -#ifdef CONFIG_SMP - -#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ - __GET_CPUID(TMP) \ - sethi %hi(trap_block), DEST; \ - sllx TMP, TRAP_BLOCK_SZ_SHIFT, TMP; \ - or DEST, %lo(trap_block), DEST; \ - add DEST, TMP, DEST; \ - -/* Clobbers TMP, current address space PGD phys address into DEST. */ -#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \ - TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ - ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST; - -/* Clobbers TMP, loads local processor's IRQ work area into DEST. */ -#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \ - TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ - add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST; - -/* Clobbers TMP, loads DEST with current thread info pointer. */ -#define TRAP_LOAD_THREAD_REG(DEST, TMP) \ - TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ - ldx [DEST + TRAP_PER_CPU_THREAD], DEST; - -/* Given the current thread info pointer in THR, load the per-cpu - * area base of the current processor into DEST. REG1, REG2, and REG3 are - * clobbered. - * - * You absolutely cannot use DEST as a temporary in this code. The - * reason is that traps can happen during execution, and return from - * trap will load the fully resolved DEST per-cpu base. This can corrupt - * the calculations done by the macro mid-stream. - */ -#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) \ - lduh [THR + TI_CPU], REG1; \ - sethi %hi(__per_cpu_shift), REG3; \ - sethi %hi(__per_cpu_base), REG2; \ - ldx [REG3 + %lo(__per_cpu_shift)], REG3; \ - ldx [REG2 + %lo(__per_cpu_base)], REG2; \ - sllx REG1, REG3, REG3; \ - add REG3, REG2, DEST; - -#else - -#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ - sethi %hi(trap_block), DEST; \ - or DEST, %lo(trap_block), DEST; \ - -/* Uniprocessor versions, we know the cpuid is zero. */ -#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \ - TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ - ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST; - -/* Clobbers TMP, loads local processor's IRQ work area into DEST. */ -#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \ - TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ - add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST; - -#define TRAP_LOAD_THREAD_REG(DEST, TMP) \ - TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ - ldx [DEST + TRAP_PER_CPU_THREAD], DEST; - -/* No per-cpu areas on uniprocessor, so no need to load DEST. */ -#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) - -#endif /* !(CONFIG_SMP) */ +#include <asm/trap_block.h> #endif /* _SPARC64_CPUDATA_H */ diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h index 0f4150e2661..204e4bf6443 100644 --- a/arch/sparc/include/asm/dma-mapping.h +++ b/arch/sparc/include/asm/dma-mapping.h @@ -1,8 +1,166 @@ #ifndef ___ASM_SPARC_DMA_MAPPING_H #define ___ASM_SPARC_DMA_MAPPING_H -#if defined(__sparc__) && defined(__arch64__) -#include <asm/dma-mapping_64.h> -#else -#include <asm/dma-mapping_32.h> -#endif + +#include <linux/scatterlist.h> +#include <linux/mm.h> + +#define DMA_ERROR_CODE (~(dma_addr_t)0x0) + +extern int dma_supported(struct device *dev, u64 mask); +extern int dma_set_mask(struct device *dev, u64 dma_mask); + +#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) +#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) +#define dma_is_consistent(d, h) (1) + +struct dma_ops { + void *(*alloc_coherent)(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag); + void (*free_coherent)(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle); + dma_addr_t (*map_page)(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction direction); + void (*unmap_page)(struct device *dev, dma_addr_t dma_addr, + size_t size, + enum dma_data_direction direction); + int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction direction); + void (*unmap_sg)(struct device *dev, struct scatterlist *sg, + int nhwentries, + enum dma_data_direction direction); + void (*sync_single_for_cpu)(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction direction); + void (*sync_single_for_device)(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction direction); + void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, + int nelems, + enum dma_data_direction direction); + void (*sync_sg_for_device)(struct device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction dir); +}; +extern const struct dma_ops *dma_ops; + +static inline void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + return dma_ops->alloc_coherent(dev, size, dma_handle, flag); +} + +static inline void dma_free_coherent(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle) +{ + dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); +} + +static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, + size_t size, + enum dma_data_direction direction) +{ + return dma_ops->map_page(dev, virt_to_page(cpu_addr), + (unsigned long)cpu_addr & ~PAGE_MASK, size, + direction); +} + +static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, + size_t size, + enum dma_data_direction direction) +{ + dma_ops->unmap_page(dev, dma_addr, size, direction); +} + +static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction direction) +{ + return dma_ops->map_page(dev, page, offset, size, direction); +} + +static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, + size_t size, + enum dma_data_direction direction) +{ + dma_ops->unmap_page(dev, dma_address, size, direction); +} + +static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction direction) +{ + return dma_ops->map_sg(dev, sg, nents, direction); +} + +static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction direction) +{ + dma_ops->unmap_sg(dev, sg, nents, direction); +} + +static inline void dma_sync_single_for_cpu(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction direction) +{ + dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction); +} + +static inline void dma_sync_single_for_device(struct device *dev, + dma_addr_t dma_handle, + size_t size, + enum dma_data_direction direction) +{ + if (dma_ops->sync_single_for_device) + dma_ops->sync_single_for_device(dev, dma_handle, size, + direction); +} + +static inline void dma_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sg, int nelems, + enum dma_data_direction direction) +{ + dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction); +} + +static inline void dma_sync_sg_for_device(struct device *dev, + struct scatterlist *sg, int nelems, + enum dma_data_direction direction) +{ + if (dma_ops->sync_sg_for_device) + dma_ops->sync_sg_for_device(dev, sg, nelems, direction); +} + +static inline void dma_sync_single_range_for_cpu(struct device *dev, + dma_addr_t dma_handle, + unsigned long offset, + size_t size, + enum dma_data_direction dir) +{ + dma_sync_single_for_cpu(dev, dma_handle+offset, size, dir); +} + +static inline void dma_sync_single_range_for_device(struct device *dev, + dma_addr_t dma_handle, + unsigned long offset, + size_t size, + enum dma_data_direction dir) +{ + dma_sync_single_for_device(dev, dma_handle+offset, size, dir); +} + + +static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + return (dma_addr == DMA_ERROR_CODE); +} + +static inline int dma_get_cache_alignment(void) +{ + /* + * no easy way to get cache size on all processors, so return + * the maximum possible, to be safe + */ + return (1 << INTERNODE_CACHE_SHIFT); +} + #endif diff --git a/arch/sparc/include/asm/dma-mapping_32.h b/arch/sparc/include/asm/dma-mapping_32.h deleted file mode 100644 index 8a57ea0573e..00000000000 --- a/arch/sparc/include/asm/dma-mapping_32.h +++ /dev/null @@ -1,60 +0,0 @@ -#ifndef _ASM_SPARC_DMA_MAPPING_H -#define _ASM_SPARC_DMA_MAPPING_H - -#include <linux/types.h> - -struct device; -struct scatterlist; -struct page; - -#define DMA_ERROR_CODE (~(dma_addr_t)0x0) - -extern int dma_supported(struct device *dev, u64 mask); -extern int dma_set_mask(struct device *dev, u64 dma_mask); -extern void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag); -extern void dma_free_coherent(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle); -extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, - size_t size, - enum dma_data_direction direction); -extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, - size_t size, - enum dma_data_direction direction); -extern dma_addr_t dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction direction); -extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address, - size_t size, enum dma_data_direction direction); -extern int dma_map_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction direction); -extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction direction); -extern void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, - size_t size, - enum dma_data_direction direction); -extern void dma_sync_single_for_device(struct device *dev, - dma_addr_t dma_handle, - size_t size, - enum dma_data_direction direction); -extern void dma_sync_single_range_for_cpu(struct device *dev, - dma_addr_t dma_handle, - unsigned long offset, - size_t size, - enum dma_data_direction direction); -extern void dma_sync_single_range_for_device(struct device *dev, - dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction direction); -extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, - int nelems, enum dma_data_direction direction); -extern void dma_sync_sg_for_device(struct device *dev, - struct scatterlist *sg, int nelems, - enum dma_data_direction direction); -extern int dma_mapping_error(struct device *dev, dma_addr_t dma_addr); -extern int dma_get_cache_alignment(void); - -#define dma_alloc_noncoherent dma_alloc_coherent -#define dma_free_noncoherent dma_free_coherent - -#endif /* _ASM_SPARC_DMA_MAPPING_H */ diff --git a/arch/sparc/include/asm/dma-mapping_64.h b/arch/sparc/include/asm/dma-mapping_64.h deleted file mode 100644 index bfa64f9702d..00000000000 --- a/arch/sparc/include/asm/dma-mapping_64.h +++ /dev/null @@ -1,154 +0,0 @@ -#ifndef _ASM_SPARC64_DMA_MAPPING_H -#define _ASM_SPARC64_DMA_MAPPING_H - -#include <linux/scatterlist.h> -#include <linux/mm.h> - -#define DMA_ERROR_CODE (~(dma_addr_t)0x0) - -struct dma_ops { - void *(*alloc_coherent)(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag); - void (*free_coherent)(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle); - dma_addr_t (*map_single)(struct device *dev, void *cpu_addr, - size_t size, - enum dma_data_direction direction); - void (*unmap_single)(struct device *dev, dma_addr_t dma_addr, - size_t size, - enum dma_data_direction direction); - int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction direction); - void (*unmap_sg)(struct device *dev, struct scatterlist *sg, - int nhwentries, - enum dma_data_direction direction); - void (*sync_single_for_cpu)(struct device *dev, - dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction); - void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, - int nelems, - enum dma_data_direction direction); -}; -extern const struct dma_ops *dma_ops; - -extern int dma_supported(struct device *dev, u64 mask); -extern int dma_set_mask(struct device *dev, u64 dma_mask); - -static inline void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag) -{ - return dma_ops->alloc_coherent(dev, size, dma_handle, flag); -} - -static inline void dma_free_coherent(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle) -{ - dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); -} - -static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, - size_t size, - enum dma_data_direction direction) -{ - return dma_ops->map_single(dev, cpu_addr, size, direction); -} - -static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, - size_t size, - enum dma_data_direction direction) -{ - dma_ops->unmap_single(dev, dma_addr, size, direction); -} - -static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - return dma_ops->map_single(dev, page_address(page) + offset, - size, direction); -} - -static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, - size_t size, - enum dma_data_direction direction) -{ - dma_ops->unmap_single(dev, dma_address, size, direction); -} - -static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction direction) -{ - return dma_ops->map_sg(dev, sg, nents, direction); -} - -static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction direction) -{ - dma_ops->unmap_sg(dev, sg, nents, direction); -} - -static inline void dma_sync_single_for_cpu(struct device *dev, - dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction) -{ - dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction); -} - -static inline void dma_sync_single_for_device(struct device *dev, - dma_addr_t dma_handle, - size_t size, - enum dma_data_direction direction) -{ - /* No flushing needed to sync cpu writes to the device. */ -} - -static inline void dma_sync_single_range_for_cpu(struct device *dev, - dma_addr_t dma_handle, - unsigned long offset, - size_t size, - enum dma_data_direction direction) -{ - dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction); -} - -static inline void dma_sync_single_range_for_device(struct device *dev, - dma_addr_t dma_handle, - unsigned long offset, - size_t size, - enum dma_data_direction direction) -{ - /* No flushing needed to sync cpu writes to the device. */ -} - - -static inline void dma_sync_sg_for_cpu(struct device *dev, - struct scatterlist *sg, int nelems, - enum dma_data_direction direction) -{ - dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction); -} - -static inline void dma_sync_sg_for_device(struct device *dev, - struct scatterlist *sg, int nelems, - enum dma_data_direction direction) -{ - /* No flushing needed to sync cpu writes to the device. */ -} - -static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return (dma_addr == DMA_ERROR_CODE); -} - -static inline int dma_get_cache_alignment(void) -{ - /* no easy way to get cache size on all processors, so return - * the maximum possible, to be safe */ - return (1 << INTERNODE_CACHE_SHIFT); -} - -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) -#define dma_is_consistent(d, h) (1) - -#endif /* _ASM_SPARC64_DMA_MAPPING_H */ diff --git a/arch/sparc/include/asm/errno.h b/arch/sparc/include/asm/errno.h index a9ef172977d..4e2bc490d71 100644 --- a/arch/sparc/include/asm/errno.h +++ b/arch/sparc/include/asm/errno.h @@ -110,4 +110,6 @@ #define EOWNERDEAD 132 /* Owner died */ #define ENOTRECOVERABLE 133 /* State not recoverable */ +#define ERFKILL 134 /* Operation not possible due to RF-kill */ + #endif diff --git a/arch/sparc/include/asm/ftrace.h b/arch/sparc/include/asm/ftrace.h index d27716cd38c..b0f18e9893d 100644 --- a/arch/sparc/include/asm/ftrace.h +++ b/arch/sparc/include/asm/ftrace.h @@ -11,4 +11,15 @@ extern void _mcount(void); #endif +#ifdef CONFIG_DYNAMIC_FTRACE +/* reloction of mcount call site is the same as the address */ +static inline unsigned long ftrace_call_adjust(unsigned long addr) +{ + return addr; +} + +struct dyn_arch_ftrace { +}; +#endif /* CONFIG_DYNAMIC_FTRACE */ + #endif /* _ASM_SPARC64_FTRACE */ diff --git a/arch/sparc/include/asm/kmap_types.h b/arch/sparc/include/asm/kmap_types.h index 602f5e034f7..aad21745fbb 100644 --- a/arch/sparc/include/asm/kmap_types.h +++ b/arch/sparc/include/asm/kmap_types.h @@ -5,21 +5,6 @@ * is actually used on sparc. -DaveM */ -enum km_type { - KM_BOUNCE_READ, - KM_SKB_SUNRPC_DATA, - KM_SKB_DATA_SOFTIRQ, - KM_USER0, - KM_USER1, - KM_BIO_SRC_IRQ, - KM_BIO_DST_IRQ, - KM_PTE0, - KM_PTE1, - KM_IRQ0, - KM_IRQ1, - KM_SOFTIRQ0, - KM_SOFTIRQ1, - KM_TYPE_NR -}; +#include <asm-generic/kmap_types.h> #endif diff --git a/arch/sparc/include/asm/mdesc.h b/arch/sparc/include/asm/mdesc.h index 1acc7272e53..9faa046713f 100644 --- a/arch/sparc/include/asm/mdesc.h +++ b/arch/sparc/include/asm/mdesc.h @@ -71,7 +71,8 @@ struct mdesc_notifier_client { extern void mdesc_register_notifier(struct mdesc_notifier_client *client); -extern void mdesc_fill_in_cpu_data(cpumask_t mask); +extern void mdesc_fill_in_cpu_data(cpumask_t *mask); +extern void mdesc_populate_present_mask(cpumask_t *mask); extern void sun4v_mdesc_init(void); diff --git a/arch/sparc/include/asm/percpu_64.h b/arch/sparc/include/asm/percpu_64.h index bee64593023..007aafb4ae9 100644 --- a/arch/sparc/include/asm/percpu_64.h +++ b/arch/sparc/include/asm/percpu_64.h @@ -7,20 +7,16 @@ register unsigned long __local_per_cpu_offset asm("g5"); #ifdef CONFIG_SMP -extern void real_setup_per_cpu_areas(void); +#include <asm/trap_block.h> -extern unsigned long __per_cpu_base; -extern unsigned long __per_cpu_shift; #define __per_cpu_offset(__cpu) \ - (__per_cpu_base + ((unsigned long)(__cpu) << __per_cpu_shift)) + (trap_block[(__cpu)].__per_cpu_base) #define per_cpu_offset(x) (__per_cpu_offset(x)) #define __my_cpu_offset __local_per_cpu_offset #else /* ! SMP */ -#define real_setup_per_cpu_areas() do { } while (0) - #endif /* SMP */ #include <asm-generic/percpu.h> diff --git a/arch/sparc/include/asm/prom.h b/arch/sparc/include/asm/prom.h index 900d44714f8..be8d7aaeb60 100644 --- a/arch/sparc/include/asm/prom.h +++ b/arch/sparc/include/asm/prom.h @@ -86,6 +86,8 @@ extern int of_node_to_nid(struct device_node *dp); #endif extern void prom_build_devicetree(void); +extern void of_populate_present_mask(void); +extern void of_fill_in_cpu_data(void); /* Dummy ref counting routines - to be implemented later */ static inline struct device_node *of_node_get(struct device_node *node) diff --git a/arch/sparc/include/asm/trap_block.h b/arch/sparc/include/asm/trap_block.h new file mode 100644 index 00000000000..7e26b2db621 --- /dev/null +++ b/arch/sparc/include/asm/trap_block.h @@ -0,0 +1,207 @@ +#ifndef _SPARC_TRAP_BLOCK_H +#define _SPARC_TRAP_BLOCK_H + +#include <asm/hypervisor.h> +#include <asm/asi.h> + +#ifndef __ASSEMBLY__ + +/* Trap handling code needs to get at a few critical values upon + * trap entry and to process TSB misses. These cannot be in the + * per_cpu() area as we really need to lock them into the TLB and + * thus make them part of the main kernel image. As a result we + * try to make this as small as possible. + * + * This is padded out and aligned to 64-bytes to avoid false sharing + * on SMP. + */ + +/* If you modify the size of this structure, please update + * TRAP_BLOCK_SZ_SHIFT below. + */ +struct thread_info; +struct trap_per_cpu { +/* D-cache line 1: Basic thread information, cpu and device mondo queues */ + struct thread_info *thread; + unsigned long pgd_paddr; + unsigned long cpu_mondo_pa; + unsigned long dev_mondo_pa; + +/* D-cache line 2: Error Mondo Queue and kernel buffer pointers */ + unsigned long resum_mondo_pa; + unsigned long resum_kernel_buf_pa; + unsigned long nonresum_mondo_pa; + unsigned long nonresum_kernel_buf_pa; + +/* Dcache lines 3, 4, 5, and 6: Hypervisor Fault Status */ + struct hv_fault_status fault_info; + +/* Dcache line 7: Physical addresses of CPU send mondo block and CPU list. */ + unsigned long cpu_mondo_block_pa; + unsigned long cpu_list_pa; + unsigned long tsb_huge; + unsigned long tsb_huge_temp; + +/* Dcache line 8: IRQ work list, and keep trap_block a power-of-2 in size. */ + unsigned long irq_worklist_pa; + unsigned int cpu_mondo_qmask; + unsigned int dev_mondo_qmask; + unsigned int resum_qmask; + unsigned int nonresum_qmask; + unsigned long __per_cpu_base; +} __attribute__((aligned(64))); +extern struct trap_per_cpu trap_block[NR_CPUS]; +extern void init_cur_cpu_trap(struct thread_info *); +extern void setup_tba(void); +extern int ncpus_probed; + +extern unsigned long real_hard_smp_processor_id(void); + +struct cpuid_patch_entry { + unsigned int addr; + unsigned int cheetah_safari[4]; + unsigned int cheetah_jbus[4]; + unsigned int starfire[4]; + unsigned int sun4v[4]; +}; +extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end; + +struct sun4v_1insn_patch_entry { + unsigned int addr; + unsigned int insn; +}; +extern struct sun4v_1insn_patch_entry __sun4v_1insn_patch, + __sun4v_1insn_patch_end; + +struct sun4v_2insn_patch_entry { + unsigned int addr; + unsigned int insns[2]; +}; +extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, + __sun4v_2insn_patch_end; + + +#endif /* !(__ASSEMBLY__) */ + +#define TRAP_PER_CPU_THREAD 0x00 +#define TRAP_PER_CPU_PGD_PADDR 0x08 +#define TRAP_PER_CPU_CPU_MONDO_PA 0x10 +#define TRAP_PER_CPU_DEV_MONDO_PA 0x18 +#define TRAP_PER_CPU_RESUM_MONDO_PA 0x20 +#define TRAP_PER_CPU_RESUM_KBUF_PA 0x28 +#define TRAP_PER_CPU_NONRESUM_MONDO_PA 0x30 +#define TRAP_PER_CPU_NONRESUM_KBUF_PA 0x38 +#define TRAP_PER_CPU_FAULT_INFO 0x40 +#define TRAP_PER_CPU_CPU_MONDO_BLOCK_PA 0xc0 +#define TRAP_PER_CPU_CPU_LIST_PA 0xc8 +#define TRAP_PER_CPU_TSB_HUGE 0xd0 +#define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8 +#define TRAP_PER_CPU_IRQ_WORKLIST_PA 0xe0 +#define TRAP_PER_CPU_CPU_MONDO_QMASK 0xe8 +#define TRAP_PER_CPU_DEV_MONDO_QMASK 0xec +#define TRAP_PER_CPU_RESUM_QMASK 0xf0 +#define TRAP_PER_CPU_NONRESUM_QMASK 0xf4 +#define TRAP_PER_CPU_PER_CPU_BASE 0xf8 + +#define TRAP_BLOCK_SZ_SHIFT 8 + +#include <asm/scratchpad.h> + +#define __GET_CPUID(REG) \ + /* Spitfire implementation (default). */ \ +661: ldxa [%g0] ASI_UPA_CONFIG, REG; \ + srlx REG, 17, REG; \ + and REG, 0x1f, REG; \ + nop; \ + .section .cpuid_patch, "ax"; \ + /* Instruction location. */ \ + .word 661b; \ + /* Cheetah Safari implementation. */ \ + ldxa [%g0] ASI_SAFARI_CONFIG, REG; \ + srlx REG, 17, REG; \ + and REG, 0x3ff, REG; \ + nop; \ + /* Cheetah JBUS implementation. */ \ + ldxa [%g0] ASI_JBUS_CONFIG, REG; \ + srlx REG, 17, REG; \ + and REG, 0x1f, REG; \ + nop; \ + /* Starfire implementation. */ \ + sethi %hi(0x1fff40000d0 >> 9), REG; \ + sllx REG, 9, REG; \ + or REG, 0xd0, REG; \ + lduwa [REG] ASI_PHYS_BYPASS_EC_E, REG;\ + /* sun4v implementation. */ \ + mov SCRATCHPAD_CPUID, REG; \ + ldxa [REG] ASI_SCRATCHPAD, REG; \ + nop; \ + nop; \ + .previous; + +#ifdef CONFIG_SMP + +#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ + __GET_CPUID(TMP) \ + sethi %hi(trap_block), DEST; \ + sllx TMP, TRAP_BLOCK_SZ_SHIFT, TMP; \ + or DEST, %lo(trap_block), DEST; \ + add DEST, TMP, DEST; \ + +/* Clobbers TMP, current address space PGD phys address into DEST. */ +#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \ + TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ + ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST; + +/* Clobbers TMP, loads local processor's IRQ work area into DEST. */ +#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \ + TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ + add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST; + +/* Clobbers TMP, loads DEST with current thread info pointer. */ +#define TRAP_LOAD_THREAD_REG(DEST, TMP) \ + TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ + ldx [DEST + TRAP_PER_CPU_THREAD], DEST; + +/* Given the current thread info pointer in THR, load the per-cpu + * area base of the current processor into DEST. REG1, REG2, and REG3 are + * clobbered. + * + * You absolutely cannot use DEST as a temporary in this code. The + * reason is that traps can happen during execution, and return from + * trap will load the fully resolved DEST per-cpu base. This can corrupt + * the calculations done by the macro mid-stream. + */ +#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) \ + lduh [THR + TI_CPU], REG1; \ + sethi %hi(trap_block), REG2; \ + sllx REG1, TRAP_BLOCK_SZ_SHIFT, REG1; \ + or REG2, %lo(trap_block), REG2; \ + add REG2, REG1, REG2; \ + ldx [REG2 + TRAP_PER_CPU_PER_CPU_BASE], DEST; + +#else + +#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ + sethi %hi(trap_block), DEST; \ + or DEST, %lo(trap_block), DEST; \ + +/* Uniprocessor versions, we know the cpuid is zero. */ +#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \ + TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ + ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST; + +/* Clobbers TMP, loads local processor's IRQ work area into DEST. */ +#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \ + TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ + add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST; + +#define TRAP_LOAD_THREAD_REG(DEST, TMP) \ + TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ + ldx [DEST + TRAP_PER_CPU_THREAD], DEST; + +/* No per-cpu areas on uniprocessor, so no need to load DEST. */ +#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) + +#endif /* !(CONFIG_SMP) */ + +#endif /* _SPARC_TRAP_BLOCK_H */ diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h index b8eb71ef316..b2c406de7d4 100644 --- a/arch/sparc/include/asm/unistd.h +++ b/arch/sparc/include/asm/unistd.h @@ -394,8 +394,9 @@ #define __NR_accept4 323 #define __NR_preadv 324 #define __NR_pwritev 325 +#define __NR_rt_tgsigqueueinfo 326 -#define NR_SYSCALLS 326 +#define NR_SYSCALLS 327 #ifdef __32bit_syscall_numbers__ /* Sparc 32-bit only has the "setresuid32", "getresuid32" variants, diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index 54742e58831..475ce4696ac 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile @@ -37,6 +37,7 @@ obj-y += una_asm_$(BITS).o obj-$(CONFIG_SPARC32) += muldiv.o obj-y += prom_common.o obj-y += prom_$(BITS).o +obj-y += of_device_common.o obj-y += of_device_$(BITS).o obj-$(CONFIG_SPARC64) += prom_irqtrans.o @@ -54,6 +55,7 @@ obj-$(CONFIG_SPARC64) += sstate.o obj-$(CONFIG_SPARC64) += mdesc.o obj-$(CONFIG_SPARC64) += pcr.o obj-$(CONFIG_SPARC64) += nmi.o +obj-$(CONFIG_SPARC64_SMP) += cpumap.o # sparc32 do not use GENERIC_HARDIRQS but uses the generic devres implementation obj-$(CONFIG_SPARC32) += devres.o diff --git a/arch/sparc/kernel/cpumap.c b/arch/sparc/kernel/cpumap.c new file mode 100644 index 00000000000..7430ed080b2 --- /dev/null +++ b/arch/sparc/kernel/cpumap.c @@ -0,0 +1,431 @@ +/* cpumap.c: used for optimizing CPU assignment + * + * Copyright (C) 2009 Hong H. Pham <hong.pham@windriver.com> + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/cpumask.h> +#include <linux/spinlock.h> +#include <asm/cpudata.h> +#include "cpumap.h" + + +enum { + CPUINFO_LVL_ROOT = 0, + CPUINFO_LVL_NODE, + CPUINFO_LVL_CORE, + CPUINFO_LVL_PROC, + CPUINFO_LVL_MAX, +}; + +enum { + ROVER_NO_OP = 0, + /* Increment rover every time level is visited */ + ROVER_INC_ON_VISIT = 1 << 0, + /* Increment parent's rover every time rover wraps around */ + ROVER_INC_PARENT_ON_LOOP = 1 << 1, +}; + +struct cpuinfo_node { + int id; + int level; + int num_cpus; /* Number of CPUs in this hierarchy */ + int parent_index; + int child_start; /* Array index of the first child node */ + int child_end; /* Array index of the last child node */ + int rover; /* Child node iterator */ +}; + +struct cpuinfo_level { + int start_index; /* Index of first node of a level in a cpuinfo tree */ + int end_index; /* Index of last node of a level in a cpuinfo tree */ + int num_nodes; /* Number of nodes in a level in a cpuinfo tree */ +}; + +struct cpuinfo_tree { + int total_nodes; + + /* Offsets into nodes[] for each level of the tree */ + struct cpuinfo_level level[CPUINFO_LVL_MAX]; + struct cpuinfo_node nodes[0]; +}; + + +static struct cpuinfo_tree *cpuinfo_tree; + +static u16 cpu_distribution_map[NR_CPUS]; +static DEFINE_SPINLOCK(cpu_map_lock); + + +/* Niagara optimized cpuinfo tree traversal. */ +static const int niagara_iterate_method[] = { + [CPUINFO_LVL_ROOT] = ROVER_NO_OP, + + /* Strands (or virtual CPUs) within a core may not run concurrently + * on the Niagara, as instruction pipeline(s) are shared. Distribute + * work to strands in different cores first for better concurrency. + * Go to next NUMA node when all cores are used. + */ + [CPUINFO_LVL_NODE] = ROVER_INC_ON_VISIT|ROVER_INC_PARENT_ON_LOOP, + + /* Strands are grouped together by proc_id in cpuinfo_sparc, i.e. + * a proc_id represents an instruction pipeline. Distribute work to + * strands in different proc_id groups if the core has multiple + * instruction pipelines (e.g. the Niagara 2/2+ has two). + */ + [CPUINFO_LVL_CORE] = ROVER_INC_ON_VISIT, + + /* Pick the next strand in the proc_id group. */ + [CPUINFO_LVL_PROC] = ROVER_INC_ON_VISIT, +}; + +/* Generic cpuinfo tree traversal. Distribute work round robin across NUMA + * nodes. + */ +static const int generic_iterate_method[] = { + [CPUINFO_LVL_ROOT] = ROVER_INC_ON_VISIT, + [CPUINFO_LVL_NODE] = ROVER_NO_OP, + [CPUINFO_LVL_CORE] = ROVER_INC_PARENT_ON_LOOP, + [CPUINFO_LVL_PROC] = ROVER_INC_ON_VISIT|ROVER_INC_PARENT_ON_LOOP, +}; + + +static int cpuinfo_id(int cpu, int level) +{ + int id; + + switch (level) { + case CPUINFO_LVL_ROOT: + id = 0; + break; + case CPUINFO_LVL_NODE: + id = cpu_to_node(cpu); + break; + case CPUINFO_LVL_CORE: + id = cpu_data(cpu).core_id; + break; + case CPUINFO_LVL_PROC: + id = cpu_data(cpu).proc_id; + break; + default: + id = -EINVAL; + } + return id; +} + +/* + * Enumerate the CPU information in __cpu_data to determine the start index, + * end index, and number of nodes for each level in the cpuinfo tree. The + * total number of cpuinfo nodes required to build the tree is returned. + */ +static int enumerate_cpuinfo_nodes(struct cpuinfo_level *tree_level) +{ + int prev_id[CPUINFO_LVL_MAX]; + int i, n, num_nodes; + + for (i = CPUINFO_LVL_ROOT; i < CPUINFO_LVL_MAX; i++) { + struct cpuinfo_level *lv = &tree_level[i]; + + prev_id[i] = -1; + lv->start_index = lv->end_index = lv->num_nodes = 0; + } + + num_nodes = 1; /* Include the root node */ + + for (i = 0; i < num_possible_cpus(); i++) { + if (!cpu_online(i)) + continue; + + n = cpuinfo_id(i, CPUINFO_LVL_NODE); + if (n > prev_id[CPUINFO_LVL_NODE]) { + tree_level[CPUINFO_LVL_NODE].num_nodes++; + prev_id[CPUINFO_LVL_NODE] = n; + num_nodes++; + } + n = cpuinfo_id(i, CPUINFO_LVL_CORE); + if (n > prev_id[CPUINFO_LVL_CORE]) { + tree_level[CPUINFO_LVL_CORE].num_nodes++; + prev_id[CPUINFO_LVL_CORE] = n; + num_nodes++; + } + n = cpuinfo_id(i, CPUINFO_LVL_PROC); + if (n > prev_id[CPUINFO_LVL_PROC]) { + tree_level[CPUINFO_LVL_PROC].num_nodes++; + prev_id[CPUINFO_LVL_PROC] = n; + num_nodes++; + } + } + + tree_level[CPUINFO_LVL_ROOT].num_nodes = 1; + + n = tree_level[CPUINFO_LVL_NODE].num_nodes; + tree_level[CPUINFO_LVL_NODE].start_index = 1; + tree_level[CPUINFO_LVL_NODE].end_index = n; + + n++; + tree_level[CPUINFO_LVL_CORE].start_index = n; + n += tree_level[CPUINFO_LVL_CORE].num_nodes; + tree_level[CPUINFO_LVL_CORE].end_index = n - 1; + + tree_level[CPUINFO_LVL_PROC].start_index = n; + n += tree_level[CPUINFO_LVL_PROC].num_nodes; + tree_level[CPUINFO_LVL_PROC].end_index = n - 1; + + return num_nodes; +} + +/* Build a tree representation of the CPU hierarchy using the per CPU + * information in __cpu_data. Entries in __cpu_data[0..NR_CPUS] are + * assumed to be sorted in ascending order based on node, core_id, and + * proc_id (in order of significance). + */ +static struct cpuinfo_tree *build_cpuinfo_tree(void) +{ + struct cpuinfo_tree *new_tree; + struct cpuinfo_node *node; + struct cpuinfo_level tmp_level[CPUINFO_LVL_MAX]; + int num_cpus[CPUINFO_LVL_MAX]; + int level_rover[CPUINFO_LVL_MAX]; + int prev_id[CPUINFO_LVL_MAX]; + int n, id, cpu, prev_cpu, last_cpu, level; + + n = enumerate_cpuinfo_nodes(tmp_level); + + new_tree = kzalloc(sizeof(struct cpuinfo_tree) + + (sizeof(struct cpuinfo_node) * n), GFP_ATOMIC); + if (!new_tree) + return NULL; + + new_tree->total_nodes = n; + memcpy(&new_tree->level, tmp_level, sizeof(tmp_level)); + + prev_cpu = cpu = first_cpu(cpu_online_map); + + /* Initialize all levels in the tree with the first CPU */ + for (level = CPUINFO_LVL_PROC; level >= CPUINFO_LVL_ROOT; level--) { + n = new_tree->level[level].start_index; + + level_rover[level] = n; + node = &new_tree->nodes[n]; + + id = cpuinfo_id(cpu, level); + if (unlikely(id < 0)) { + kfree(new_tree); + return NULL; + } + node->id = id; + node->level = level; + node->num_cpus = 1; + + node->parent_index = (level > CPUINFO_LVL_ROOT) + ? new_tree->level[level - 1].start_index : -1; + + node->child_start = node->child_end = node->rover = + (level == CPUINFO_LVL_PROC) + ? cpu : new_tree->level[level + 1].start_index; + + prev_id[level] = node->id; + num_cpus[level] = 1; + } + + for (last_cpu = (num_possible_cpus() - 1); last_cpu >= 0; last_cpu--) { + if (cpu_online(last_cpu)) + break; + } + + while (++cpu <= last_cpu) { + if (!cpu_online(cpu)) + continue; + + for (level = CPUINFO_LVL_PROC; level >= CPUINFO_LVL_ROOT; + level--) { + id = cpuinfo_id(cpu, level); + if (unlikely(id < 0)) { + kfree(new_tree); + return NULL; + } + + if ((id != prev_id[level]) || (cpu == last_cpu)) { + prev_id[level] = id; + node = &new_tree->nodes[level_rover[level]]; + node->num_cpus = num_cpus[level]; + num_cpus[level] = 1; + + if (cpu == last_cpu) + node->num_cpus++; + + /* Connect tree node to parent */ + if (level == CPUINFO_LVL_ROOT) + node->parent_index = -1; + else + node->parent_index = + level_rover[level - 1]; + + if (level == CPUINFO_LVL_PROC) { + node->child_end = + (cpu == last_cpu) ? cpu : prev_cpu; + } else { + node->child_end = + level_rover[level + 1] - 1; + } + + /* Initialize the next node in the same level */ + n = ++level_rover[level]; + if (n <= new_tree->level[level].end_index) { + node = &new_tree->nodes[n]; + node->id = id; + node->level = level; + + /* Connect node to child */ + node->child_start = node->child_end = + node->rover = + (level == CPUINFO_LVL_PROC) + ? cpu : level_rover[level + 1]; + } + } else + num_cpus[level]++; + } + prev_cpu = cpu; + } + + return new_tree; +} + +static void increment_rover(struct cpuinfo_tree *t, int node_index, + int root_index, const int *rover_inc_table) +{ + struct cpuinfo_node *node = &t->nodes[node_index]; + int top_level, level; + + top_level = t->nodes[root_index].level; + for (level = node->level; level >= top_level; level--) { + node->rover++; + if (node->rover <= node->child_end) + return; + + node->rover = node->child_start; + /* If parent's rover does not need to be adjusted, stop here. */ + if ((level == top_level) || + !(rover_inc_table[level] & ROVER_INC_PARENT_ON_LOOP)) + return; + + node = &t->nodes[node->parent_index]; + } +} + +static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index) +{ + const int *rover_inc_table; + int level, new_index, index = root_index; + + switch (sun4v_chip_type) { + case SUN4V_CHIP_NIAGARA1: + case SUN4V_CHIP_NIAGARA2: + rover_inc_table = niagara_iterate_method; + break; + default: + rover_inc_table = generic_iterate_method; + } + + for (level = t->nodes[root_index].level; level < CPUINFO_LVL_MAX; + level++) { + new_index = t->nodes[index].rover; + if (rover_inc_table[level] & ROVER_INC_ON_VISIT) + increment_rover(t, index, root_index, rover_inc_table); + + index = new_index; + } + return index; +} + +static void _cpu_map_rebuild(void) +{ + int i; + + if (cpuinfo_tree) { + kfree(cpuinfo_tree); + cpuinfo_tree = NULL; + } + + cpuinfo_tree = build_cpuinfo_tree(); + if (!cpuinfo_tree) + return; + + /* Build CPU distribution map that spans all online CPUs. No need + * to check if the CPU is online, as that is done when the cpuinfo + * tree is being built. + */ + for (i = 0; i < cpuinfo_tree->nodes[0].num_cpus; i++) + cpu_distribution_map[i] = iterate_cpu(cpuinfo_tree, 0); +} + +/* Fallback if the cpuinfo tree could not be built. CPU mapping is linear + * round robin. + */ +static int simple_map_to_cpu(unsigned int index) +{ + int i, end, cpu_rover; + + cpu_rover = 0; + end = index % num_online_cpus(); + for (i = 0; i < num_possible_cpus(); i++) { + if (cpu_online(cpu_rover)) { + if (cpu_rover >= end) + return cpu_rover; + + cpu_rover++; + } + } + + /* Impossible, since num_online_cpus() <= num_possible_cpus() */ + return first_cpu(cpu_online_map); +} + +static int _map_to_cpu(unsigned int index) +{ + struct cpuinfo_node *root_node; + + if (unlikely(!cpuinfo_tree)) { + _cpu_map_rebuild(); + if (!cpuinfo_tree) + return simple_map_to_cpu(index); + } + + root_node = &cpuinfo_tree->nodes[0]; +#ifdef CONFIG_HOTPLUG_CPU + if (unlikely(root_node->num_cpus != num_online_cpus())) { + _cpu_map_rebuild(); + if (!cpuinfo_tree) + return simple_map_to_cpu(index); + } +#endif + return cpu_distribution_map[index % root_node->num_cpus]; +} + +int map_to_cpu(unsigned int index) +{ + int mapped_cpu; + unsigned long flag; + + spin_lock_irqsave(&cpu_map_lock, flag); + mapped_cpu = _map_to_cpu(index); + +#ifdef CONFIG_HOTPLUG_CPU + while (unlikely(!cpu_online(mapped_cpu))) + mapped_cpu = _map_to_cpu(index); +#endif + spin_unlock_irqrestore(&cpu_map_lock, flag); + return mapped_cpu; +} +EXPORT_SYMBOL(map_to_cpu); + +void cpu_map_rebuild(void) +{ + unsigned long flag; + + spin_lock_irqsave(&cpu_map_lock, flag); + _cpu_map_rebuild(); + spin_unlock_irqrestore(&cpu_map_lock, flag); +} diff --git a/arch/sparc/kernel/cpumap.h b/arch/sparc/kernel/cpumap.h new file mode 100644 index 00000000000..e639880ab86 --- /dev/null +++ b/arch/sparc/kernel/cpumap.h @@ -0,0 +1,16 @@ +#ifndef _CPUMAP_H +#define _CPUMAP_H + +#ifdef CONFIG_SMP +extern void cpu_map_rebuild(void); +extern int map_to_cpu(unsigned int index); +#define cpu_map_init() cpu_map_rebuild() +#else +#define cpu_map_init() do {} while (0) +static inline int map_to_cpu(unsigned int index) +{ + return raw_smp_processor_id(); +} +#endif + +#endif diff --git a/arch/sparc/kernel/dma.c b/arch/sparc/kernel/dma.c index ebc8403b035..524c32f97c5 100644 --- a/arch/sparc/kernel/dma.c +++ b/arch/sparc/kernel/dma.c @@ -35,8 +35,8 @@ int dma_set_mask(struct device *dev, u64 dma_mask) } EXPORT_SYMBOL(dma_set_mask); -void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag) +static void *dma32_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) { #ifdef CONFIG_PCI if (dev->bus == &pci_bus_type) @@ -44,10 +44,9 @@ void *dma_alloc_coherent(struct device *dev, size_t size, #endif return sbus_alloc_consistent(dev, size, dma_handle); } -EXPORT_SYMBOL(dma_alloc_coherent); -void dma_free_coherent(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle) +static void dma32_free_coherent(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle) { #ifdef CONFIG_PCI if (dev->bus == &pci_bus_type) { @@ -58,38 +57,10 @@ void dma_free_coherent(struct device *dev, size_t size, #endif sbus_free_consistent(dev, size, cpu_addr, dma_handle); } -EXPORT_SYMBOL(dma_free_coherent); -dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, - size_t size, enum dma_data_direction direction) -{ -#ifdef CONFIG_PCI - if (dev->bus == &pci_bus_type) - return pci_map_single(to_pci_dev(dev), cpu_addr, - size, (int)direction); -#endif - return sbus_map_single(dev, cpu_addr, size, (int)direction); -} -EXPORT_SYMBOL(dma_map_single); - -void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, - size_t size, - enum dma_data_direction direction) -{ -#ifdef CONFIG_PCI - if (dev->bus == &pci_bus_type) { - pci_unmap_single(to_pci_dev(dev), dma_addr, - size, (int)direction); - return; - } -#endif - sbus_unmap_single(dev, dma_addr, size, (int)direction); -} -EXPORT_SYMBOL(dma_unmap_single); - -dma_addr_t dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction direction) +static dma_addr_t dma32_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction direction) { #ifdef CONFIG_PCI if (dev->bus == &pci_bus_type) @@ -99,10 +70,9 @@ dma_addr_t dma_map_page(struct device *dev, struct page *page, return sbus_map_single(dev, page_address(page) + offset, size, (int)direction); } -EXPORT_SYMBOL(dma_map_page); -void dma_unmap_page(struct device *dev, dma_addr_t dma_address, - size_t size, enum dma_data_direction direction) +static void dma32_unmap_page(struct device *dev, dma_addr_t dma_address, + size_t size, enum dma_data_direction direction) { #ifdef CONFIG_PCI if (dev->bus == &pci_bus_type) { @@ -113,10 +83,9 @@ void dma_unmap_page(struct device *dev, dma_addr_t dma_address, #endif sbus_unmap_single(dev, dma_address, size, (int)direction); } -EXPORT_SYMBOL(dma_unmap_page); -int dma_map_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction direction) +static int dma32_map_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction direction) { #ifdef CONFIG_PCI if (dev->bus == &pci_bus_type) @@ -124,10 +93,9 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, #endif return sbus_map_sg(dev, sg, nents, direction); } -EXPORT_SYMBOL(dma_map_sg); -void dma_unmap_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction direction) +void dma32_unmap_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction direction) { #ifdef CONFIG_PCI if (dev->bus == &pci_bus_type) { @@ -137,10 +105,10 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, #endif sbus_unmap_sg(dev, sg, nents, (int)direction); } -EXPORT_SYMBOL(dma_unmap_sg); -void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction direction) +static void dma32_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, + size_t size, + enum dma_data_direction direction) { #ifdef CONFIG_PCI if (dev->bus == &pci_bus_type) { @@ -151,10 +119,10 @@ void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, #endif sbus_dma_sync_single_for_cpu(dev, dma_handle, size, (int) direction); } -EXPORT_SYMBOL(dma_sync_single_for_cpu); -void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction direction) +static void dma32_sync_single_for_device(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction direction) { #ifdef CONFIG_PCI if (dev->bus == &pci_bus_type) { @@ -165,28 +133,9 @@ void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, #endif sbus_dma_sync_single_for_device(dev, dma_handle, size, (int) direction); } -EXPORT_SYMBOL(dma_sync_single_for_device); - -void dma_sync_single_range_for_cpu(struct device *dev, - dma_addr_t dma_handle, - unsigned long offset, - size_t size, - enum dma_data_direction direction) -{ - dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction); -} -EXPORT_SYMBOL(dma_sync_single_range_for_cpu); - -void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - dma_sync_single_for_device(dev, dma_handle+offset, size, direction); -} -EXPORT_SYMBOL(dma_sync_single_range_for_device); -void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, - int nelems, enum dma_data_direction direction) +static void dma32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, + int nelems, enum dma_data_direction direction) { #ifdef CONFIG_PCI if (dev->bus == &pci_bus_type) { @@ -197,11 +146,10 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, #endif BUG(); } -EXPORT_SYMBOL(dma_sync_sg_for_cpu); -void dma_sync_sg_for_device(struct device *dev, - struct scatterlist *sg, int nelems, - enum dma_data_direction direction) +static void dma32_sync_sg_for_device(struct device *dev, + struct scatterlist *sg, int nelems, + enum dma_data_direction direction) { #ifdef CONFIG_PCI if (dev->bus == &pci_bus_type) { @@ -212,16 +160,19 @@ void dma_sync_sg_for_device(struct device *dev, #endif BUG(); } -EXPORT_SYMBOL(dma_sync_sg_for_device); -int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return (dma_addr == DMA_ERROR_CODE); -} -EXPORT_SYMBOL(dma_mapping_error); - -int dma_get_cache_alignment(void) -{ - return 32; -} -EXPORT_SYMBOL(dma_get_cache_alignment); +static const struct dma_ops dma32_dma_ops = { + .alloc_coherent = dma32_alloc_coherent, + .free_coherent = dma32_free_coherent, + .map_page = dma32_map_page, + .unmap_page = dma32_unmap_page, + .map_sg = dma32_map_sg, + .unmap_sg = dma32_unmap_sg, + .sync_single_for_cpu = dma32_sync_single_for_cpu, + .sync_single_for_device = dma32_sync_single_for_device, + .sync_sg_for_cpu = dma32_sync_sg_for_cpu, + .sync_sg_for_device = dma32_sync_sg_for_device, +}; + +const struct dma_ops *dma_ops = &dma32_dma_ops; +EXPORT_SYMBOL(dma_ops); diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c index 90350f838f0..4a700f4b79c 100644 --- a/arch/sparc/kernel/ds.c +++ b/arch/sparc/kernel/ds.c @@ -544,7 +544,8 @@ static int __cpuinit dr_cpu_configure(struct ds_info *dp, resp_len, ncpus, mask, DR_CPU_STAT_CONFIGURED); - mdesc_fill_in_cpu_data(*mask); + mdesc_populate_present_mask(mask); + mdesc_fill_in_cpu_data(mask); for_each_cpu_mask(cpu, *mask) { int err; diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c index d0218e73f98..d3b1a307656 100644 --- a/arch/sparc/kernel/ftrace.c +++ b/arch/sparc/kernel/ftrace.c @@ -7,14 +7,10 @@ #include <asm/ftrace.h> +#ifdef CONFIG_DYNAMIC_FTRACE static const u32 ftrace_nop = 0x01000000; -unsigned char *ftrace_nop_replace(void) -{ - return (char *)&ftrace_nop; -} - -unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) +static u32 ftrace_call_replace(unsigned long ip, unsigned long addr) { static u32 call; s32 off; @@ -22,15 +18,11 @@ unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) off = ((s32)addr - (s32)ip); call = 0x40000000 | ((u32)off >> 2); - return (unsigned char *) &call; + return call; } -int -ftrace_modify_code(unsigned long ip, unsigned char *old_code, - unsigned char *new_code) +static int ftrace_modify_code(unsigned long ip, u32 old, u32 new) { - u32 old = *(u32 *)old_code; - u32 new = *(u32 *)new_code; u32 replaced; int faulted; @@ -59,18 +51,43 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code, return faulted; } +int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned long ip = rec->ip; + u32 old, new; + + old = ftrace_call_replace(ip, addr); + new = ftrace_nop; + return ftrace_modify_code(ip, old, new); +} + +int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned long ip = rec->ip; + u32 old, new; + + old = ftrace_nop; + new = ftrace_call_replace(ip, addr); + return ftrace_modify_code(ip, old, new); +} + int ftrace_update_ftrace_func(ftrace_func_t func) { unsigned long ip = (unsigned long)(&ftrace_call); - unsigned char old[MCOUNT_INSN_SIZE], *new; + u32 old, new; - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE); + old = *(u32 *) &ftrace_call; new = ftrace_call_replace(ip, (unsigned long)func); return ftrace_modify_code(ip, old, new); } int __init ftrace_dyn_arch_init(void *data) { - ftrace_mcount_set(data); + unsigned long *p = data; + + *p = 0; + return 0; } +#endif + diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S index 91bf4c7f79b..f8f21050448 100644 --- a/arch/sparc/kernel/head_64.S +++ b/arch/sparc/kernel/head_64.S @@ -641,28 +641,6 @@ tlb_fixup_done: /* Not reached... */ 1: - /* If we boot on a non-zero cpu, all of the per-cpu - * variable references we make before setting up the - * per-cpu areas will use a bogus offset. Put a - * compensating factor into __per_cpu_base to handle - * this cleanly. - * - * What the per-cpu code calculates is: - * - * __per_cpu_base + (cpu << __per_cpu_shift) - * - * These two variables are zero initially, so to - * make it all cancel out to zero we need to put - * "0 - (cpu << 0)" into __per_cpu_base so that the - * above formula evaluates to zero. - * - * We cannot even perform a printk() until this stuff - * is setup as that calls cpu_clock() which uses - * per-cpu variables. - */ - sub %g0, %o0, %o1 - sethi %hi(__per_cpu_base), %o2 - stx %o1, [%o2 + %lo(__per_cpu_base)] #else mov 0, %o0 #endif diff --git a/arch/sparc/kernel/init_task.c b/arch/sparc/kernel/init_task.c index f28cb8278e9..28125c5b3d3 100644 --- a/arch/sparc/kernel/init_task.c +++ b/arch/sparc/kernel/init_task.c @@ -10,10 +10,7 @@ static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); -struct mm_struct init_mm = INIT_MM(init_mm); struct task_struct init_task = INIT_TASK(init_task); - -EXPORT_SYMBOL(init_mm); EXPORT_SYMBOL(init_task); /* .text section in head.S is aligned at 8k boundary and this gets linked diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index d8900e1d5aa..0aeaefe696b 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c @@ -351,8 +351,9 @@ static void dma_4u_free_coherent(struct device *dev, size_t size, free_pages((unsigned long)cpu, order); } -static dma_addr_t dma_4u_map_single(struct device *dev, void *ptr, size_t sz, - enum dma_data_direction direction) +static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t sz, + enum dma_data_direction direction) { struct iommu *iommu; struct strbuf *strbuf; @@ -368,7 +369,7 @@ static dma_addr_t dma_4u_map_single(struct device *dev, void *ptr, size_t sz, if (unlikely(direction == DMA_NONE)) goto bad_no_ctx; - oaddr = (unsigned long)ptr; + oaddr = (unsigned long)(page_address(page) + offset); npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); npages >>= IO_PAGE_SHIFT; @@ -472,8 +473,8 @@ do_flush_sync: vaddr, ctx, npages); } -static void dma_4u_unmap_single(struct device *dev, dma_addr_t bus_addr, - size_t sz, enum dma_data_direction direction) +static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, + size_t sz, enum dma_data_direction direction) { struct iommu *iommu; struct strbuf *strbuf; @@ -824,8 +825,8 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev, static const struct dma_ops sun4u_dma_ops = { .alloc_coherent = dma_4u_alloc_coherent, .free_coherent = dma_4u_free_coherent, - .map_single = dma_4u_map_single, - .unmap_single = dma_4u_unmap_single, + .map_page = dma_4u_map_page, + .unmap_page = dma_4u_unmap_page, .map_sg = dma_4u_map_sg, .unmap_sg = dma_4u_unmap_sg, .sync_single_for_cpu = dma_4u_sync_single_for_cpu, diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index e5e78f9cfc9..bd075054942 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c @@ -45,6 +45,7 @@ #include <asm/cacheflush.h> #include "entry.h" +#include "cpumap.h" #define NUM_IVECS (IMAP_INR + 1) @@ -256,35 +257,13 @@ static int irq_choose_cpu(unsigned int virt_irq) int cpuid; cpumask_copy(&mask, irq_desc[virt_irq].affinity); - if (cpus_equal(mask, CPU_MASK_ALL)) { - static int irq_rover; - static DEFINE_SPINLOCK(irq_rover_lock); - unsigned long flags; - - /* Round-robin distribution... */ - do_round_robin: - spin_lock_irqsave(&irq_rover_lock, flags); - - while (!cpu_online(irq_rover)) { - if (++irq_rover >= nr_cpu_ids) - irq_rover = 0; - } - cpuid = irq_rover; - do { - if (++irq_rover >= nr_cpu_ids) - irq_rover = 0; - } while (!cpu_online(irq_rover)); - - spin_unlock_irqrestore(&irq_rover_lock, flags); + if (cpus_equal(mask, cpu_online_map)) { + cpuid = map_to_cpu(virt_irq); } else { cpumask_t tmp; cpus_and(tmp, cpu_online_map, mask); - - if (cpus_empty(tmp)) - goto do_round_robin; - - cpuid = first_cpu(tmp); + cpuid = cpus_empty(tmp) ? map_to_cpu(virt_irq) : first_cpu(tmp); } return cpuid; diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c index f0e6ed23a46..938da19dc06 100644 --- a/arch/sparc/kernel/mdesc.c +++ b/arch/sparc/kernel/mdesc.c @@ -574,7 +574,7 @@ static void __init report_platform_properties(void) mdesc_release(hp); } -static void __devinit fill_in_one_cache(cpuinfo_sparc *c, +static void __cpuinit fill_in_one_cache(cpuinfo_sparc *c, struct mdesc_handle *hp, u64 mp) { @@ -619,8 +619,7 @@ static void __devinit fill_in_one_cache(cpuinfo_sparc *c, } } -static void __devinit mark_core_ids(struct mdesc_handle *hp, u64 mp, - int core_id) +static void __cpuinit mark_core_ids(struct mdesc_handle *hp, u64 mp, int core_id) { u64 a; @@ -653,7 +652,7 @@ static void __devinit mark_core_ids(struct mdesc_handle *hp, u64 mp, } } -static void __devinit set_core_ids(struct mdesc_handle *hp) +static void __cpuinit set_core_ids(struct mdesc_handle *hp) { int idx; u64 mp; @@ -678,8 +677,7 @@ static void __devinit set_core_ids(struct mdesc_handle *hp) } } -static void __devinit mark_proc_ids(struct mdesc_handle *hp, u64 mp, - int proc_id) +static void __cpuinit mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id) { u64 a; @@ -698,8 +696,7 @@ static void __devinit mark_proc_ids(struct mdesc_handle *hp, u64 mp, } } -static void __devinit __set_proc_ids(struct mdesc_handle *hp, - const char *exec_unit_name) +static void __cpuinit __set_proc_ids(struct mdesc_handle *hp, const char *exec_unit_name) { int idx; u64 mp; @@ -720,13 +717,13 @@ static void __devinit __set_proc_ids(struct mdesc_handle *hp, } } -static void __devinit set_proc_ids(struct mdesc_handle *hp) +static void __cpuinit set_proc_ids(struct mdesc_handle *hp) { __set_proc_ids(hp, "exec_unit"); __set_proc_ids(hp, "exec-unit"); } -static void __devinit get_one_mondo_bits(const u64 *p, unsigned int *mask, +static void __cpuinit get_one_mondo_bits(const u64 *p, unsigned int *mask, unsigned char def) { u64 val; @@ -745,7 +742,7 @@ use_default: *mask = ((1U << def) * 64U) - 1U; } -static void __devinit get_mondo_data(struct mdesc_handle *hp, u64 mp, +static void __cpuinit get_mondo_data(struct mdesc_handle *hp, u64 mp, struct trap_per_cpu *tb) { const u64 *val; @@ -763,23 +760,15 @@ static void __devinit get_mondo_data(struct mdesc_handle *hp, u64 mp, get_one_mondo_bits(val, &tb->nonresum_qmask, 2); } -void __cpuinit mdesc_fill_in_cpu_data(cpumask_t mask) +static void * __cpuinit mdesc_iterate_over_cpus(void *(*func)(struct mdesc_handle *, u64, int, void *), void *arg, cpumask_t *mask) { struct mdesc_handle *hp = mdesc_grab(); + void *ret = NULL; u64 mp; - ncpus_probed = 0; mdesc_for_each_node_by_name(hp, mp, "cpu") { const u64 *id = mdesc_get_property(hp, mp, "id", NULL); - const u64 *cfreq = mdesc_get_property(hp, mp, "clock-frequency", NULL); - struct trap_per_cpu *tb; - cpuinfo_sparc *c; - int cpuid; - u64 a; - - ncpus_probed++; - - cpuid = *id; + int cpuid = *id; #ifdef CONFIG_SMP if (cpuid >= NR_CPUS) { @@ -788,62 +777,104 @@ void __cpuinit mdesc_fill_in_cpu_data(cpumask_t mask) cpuid, NR_CPUS); continue; } - if (!cpu_isset(cpuid, mask)) + if (!cpu_isset(cpuid, *mask)) continue; -#else - /* On uniprocessor we only want the values for the - * real physical cpu the kernel booted onto, however - * cpu_data() only has one entry at index 0. - */ - if (cpuid != real_hard_smp_processor_id()) - continue; - cpuid = 0; #endif - c = &cpu_data(cpuid); - c->clock_tick = *cfreq; + ret = func(hp, mp, cpuid, arg); + if (ret) + goto out; + } +out: + mdesc_release(hp); + return ret; +} - tb = &trap_block[cpuid]; - get_mondo_data(hp, mp, tb); +static void * __cpuinit record_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg) +{ + ncpus_probed++; +#ifdef CONFIG_SMP + set_cpu_present(cpuid, true); +#endif + return NULL; +} - mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) { - u64 j, t = mdesc_arc_target(hp, a); - const char *t_name; +void __cpuinit mdesc_populate_present_mask(cpumask_t *mask) +{ + if (tlb_type != hypervisor) + return; - t_name = mdesc_node_name(hp, t); - if (!strcmp(t_name, "cache")) { - fill_in_one_cache(c, hp, t); - continue; - } + ncpus_probed = 0; + mdesc_iterate_over_cpus(record_one_cpu, NULL, mask); +} - mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_FWD) { - u64 n = mdesc_arc_target(hp, j); - const char *n_name; +static void * __cpuinit fill_in_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg) +{ + const u64 *cfreq = mdesc_get_property(hp, mp, "clock-frequency", NULL); + struct trap_per_cpu *tb; + cpuinfo_sparc *c; + u64 a; - n_name = mdesc_node_name(hp, n); - if (!strcmp(n_name, "cache")) - fill_in_one_cache(c, hp, n); - } +#ifndef CONFIG_SMP + /* On uniprocessor we only want the values for the + * real physical cpu the kernel booted onto, however + * cpu_data() only has one entry at index 0. + */ + if (cpuid != real_hard_smp_processor_id()) + return NULL; + cpuid = 0; +#endif + + c = &cpu_data(cpuid); + c->clock_tick = *cfreq; + + tb = &trap_block[cpuid]; + get_mondo_data(hp, mp, tb); + + mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) { + u64 j, t = mdesc_arc_target(hp, a); + const char *t_name; + + t_name = mdesc_node_name(hp, t); + if (!strcmp(t_name, "cache")) { + fill_in_one_cache(c, hp, t); + continue; } -#ifdef CONFIG_SMP - cpu_set(cpuid, cpu_present_map); -#endif + mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_FWD) { + u64 n = mdesc_arc_target(hp, j); + const char *n_name; - c->core_id = 0; - c->proc_id = -1; + n_name = mdesc_node_name(hp, n); + if (!strcmp(n_name, "cache")) + fill_in_one_cache(c, hp, n); + } } + c->core_id = 0; + c->proc_id = -1; + + return NULL; +} + +void __cpuinit mdesc_fill_in_cpu_data(cpumask_t *mask) +{ + struct mdesc_handle *hp; + + mdesc_iterate_over_cpus(fill_in_one_cpu, NULL, mask); + #ifdef CONFIG_SMP sparc64_multi_core = 1; #endif + hp = mdesc_grab(); + set_core_ids(hp); set_proc_ids(hp); - smp_fill_in_sib_core_maps(); - mdesc_release(hp); + + smp_fill_in_sib_core_maps(); } static ssize_t mdesc_read(struct file *file, char __user *buf, @@ -887,7 +918,6 @@ void __init sun4v_mdesc_init(void) { struct mdesc_handle *hp; unsigned long len, real_len, status; - cpumask_t mask; (void) sun4v_mach_desc(0UL, 0UL, &len); @@ -911,7 +941,4 @@ void __init sun4v_mdesc_init(void) cur_mdesc = hp; report_platform_properties(); - - cpus_setall(mask); - mdesc_fill_in_cpu_data(mask); } diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c index c8f14c1dc52..90396702ea2 100644 --- a/arch/sparc/kernel/of_device_32.c +++ b/arch/sparc/kernel/of_device_32.c @@ -6,159 +6,11 @@ #include <linux/mod_devicetable.h> #include <linux/slab.h> #include <linux/errno.h> +#include <linux/irq.h> #include <linux/of_device.h> #include <linux/of_platform.h> -static int node_match(struct device *dev, void *data) -{ - struct of_device *op = to_of_device(dev); - struct device_node *dp = data; - - return (op->node == dp); -} - -struct of_device *of_find_device_by_node(struct device_node *dp) -{ - struct device *dev = bus_find_device(&of_platform_bus_type, NULL, - dp, node_match); - - if (dev) - return to_of_device(dev); - - return NULL; -} -EXPORT_SYMBOL(of_find_device_by_node); - -unsigned int irq_of_parse_and_map(struct device_node *node, int index) -{ - struct of_device *op = of_find_device_by_node(node); - - if (!op || index >= op->num_irqs) - return 0; - - return op->irqs[index]; -} -EXPORT_SYMBOL(irq_of_parse_and_map); - -/* Take the archdata values for IOMMU, STC, and HOSTDATA found in - * BUS and propagate to all child of_device objects. - */ -void of_propagate_archdata(struct of_device *bus) -{ - struct dev_archdata *bus_sd = &bus->dev.archdata; - struct device_node *bus_dp = bus->node; - struct device_node *dp; - - for (dp = bus_dp->child; dp; dp = dp->sibling) { - struct of_device *op = of_find_device_by_node(dp); - - op->dev.archdata.iommu = bus_sd->iommu; - op->dev.archdata.stc = bus_sd->stc; - op->dev.archdata.host_controller = bus_sd->host_controller; - op->dev.archdata.numa_node = bus_sd->numa_node; - - if (dp->child) - of_propagate_archdata(op); - } -} - -struct bus_type of_platform_bus_type; -EXPORT_SYMBOL(of_platform_bus_type); - -static inline u64 of_read_addr(const u32 *cell, int size) -{ - u64 r = 0; - while (size--) - r = (r << 32) | *(cell++); - return r; -} - -static void __init get_cells(struct device_node *dp, - int *addrc, int *sizec) -{ - if (addrc) - *addrc = of_n_addr_cells(dp); - if (sizec) - *sizec = of_n_size_cells(dp); -} - -/* Max address size we deal with */ -#define OF_MAX_ADDR_CELLS 4 - -struct of_bus { - const char *name; - const char *addr_prop_name; - int (*match)(struct device_node *parent); - void (*count_cells)(struct device_node *child, - int *addrc, int *sizec); - int (*map)(u32 *addr, const u32 *range, - int na, int ns, int pna); - unsigned long (*get_flags)(const u32 *addr, unsigned long); -}; - -/* - * Default translator (generic bus) - */ - -static void of_bus_default_count_cells(struct device_node *dev, - int *addrc, int *sizec) -{ - get_cells(dev, addrc, sizec); -} - -/* Make sure the least significant 64-bits are in-range. Even - * for 3 or 4 cell values it is a good enough approximation. - */ -static int of_out_of_range(const u32 *addr, const u32 *base, - const u32 *size, int na, int ns) -{ - u64 a = of_read_addr(addr, na); - u64 b = of_read_addr(base, na); - - if (a < b) - return 1; - - b += of_read_addr(size, ns); - if (a >= b) - return 1; - - return 0; -} - -static int of_bus_default_map(u32 *addr, const u32 *range, - int na, int ns, int pna) -{ - u32 result[OF_MAX_ADDR_CELLS]; - int i; - - if (ns > 2) { - printk("of_device: Cannot handle size cells (%d) > 2.", ns); - return -EINVAL; - } - - if (of_out_of_range(addr, range, range + na + pna, na, ns)) - return -EINVAL; - - /* Start with the parent range base. */ - memcpy(result, range + na, pna * 4); - - /* Add in the child address offset. */ - for (i = 0; i < na; i++) - result[pna - 1 - i] += - (addr[na - 1 - i] - - range[na - 1 - i]); - - memcpy(addr, result, pna * 4); - - return 0; -} - -static unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long flags) -{ - if (flags) - return flags; - return IORESOURCE_MEM; -} +#include "of_device_common.h" /* * PCI bus specific translator @@ -240,47 +92,6 @@ static unsigned long of_bus_pci_get_flags(const u32 *addr, unsigned long flags) return flags; } -/* - * SBUS bus specific translator - */ - -static int of_bus_sbus_match(struct device_node *np) -{ - struct device_node *dp = np; - - while (dp) { - if (!strcmp(dp->name, "sbus") || - !strcmp(dp->name, "sbi")) - return 1; - - /* Have a look at use_1to1_mapping(). We're trying - * to match SBUS if that's the top-level bus and we - * don't have some intervening real bus that provides - * ranges based translations. - */ - if (of_find_property(dp, "ranges", NULL) != NULL) - break; - - dp = dp->parent; - } - - return 0; -} - -static void of_bus_sbus_count_cells(struct device_node *child, - int *addrc, int *sizec) -{ - if (addrc) - *addrc = 2; - if (sizec) - *sizec = 1; -} - -static int of_bus_sbus_map(u32 *addr, const u32 *range, int na, int ns, int pna) -{ - return of_bus_default_map(addr, range, na, ns, pna); -} - static unsigned long of_bus_sbus_get_flags(const u32 *addr, unsigned long flags) { return IORESOURCE_MEM; @@ -307,7 +118,7 @@ static struct of_bus of_busses[] = { .addr_prop_name = "reg", .match = of_bus_sbus_match, .count_cells = of_bus_sbus_count_cells, - .map = of_bus_sbus_map, + .map = of_bus_default_map, .get_flags = of_bus_sbus_get_flags, }, /* Default */ diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c index 5ac287ac03d..881947e59e9 100644 --- a/arch/sparc/kernel/of_device_64.c +++ b/arch/sparc/kernel/of_device_64.c @@ -10,6 +10,8 @@ #include <linux/of_device.h> #include <linux/of_platform.h> +#include "of_device_common.h" + void __iomem *of_ioremap(struct resource *res, unsigned long offset, unsigned long size, char *name) { unsigned long ret = res->start + offset; @@ -35,156 +37,6 @@ void of_iounmap(struct resource *res, void __iomem *base, unsigned long size) } EXPORT_SYMBOL(of_iounmap); -static int node_match(struct device *dev, void *data) -{ - struct of_device *op = to_of_device(dev); - struct device_node *dp = data; - - return (op->node == dp); -} - -struct of_device *of_find_device_by_node(struct device_node *dp) -{ - struct device *dev = bus_find_device(&of_platform_bus_type, NULL, - dp, node_match); - - if (dev) - return to_of_device(dev); - - return NULL; -} -EXPORT_SYMBOL(of_find_device_by_node); - -unsigned int irq_of_parse_and_map(struct device_node *node, int index) -{ - struct of_device *op = of_find_device_by_node(node); - - if (!op || index >= op->num_irqs) - return 0; - - return op->irqs[index]; -} -EXPORT_SYMBOL(irq_of_parse_and_map); - -/* Take the archdata values for IOMMU, STC, and HOSTDATA found in - * BUS and propagate to all child of_device objects. - */ -void of_propagate_archdata(struct of_device *bus) -{ - struct dev_archdata *bus_sd = &bus->dev.archdata; - struct device_node *bus_dp = bus->node; - struct device_node *dp; - - for (dp = bus_dp->child; dp; dp = dp->sibling) { - struct of_device *op = of_find_device_by_node(dp); - - op->dev.archdata.iommu = bus_sd->iommu; - op->dev.archdata.stc = bus_sd->stc; - op->dev.archdata.host_controller = bus_sd->host_controller; - op->dev.archdata.numa_node = bus_sd->numa_node; - - if (dp->child) - of_propagate_archdata(op); - } -} - -struct bus_type of_platform_bus_type; -EXPORT_SYMBOL(of_platform_bus_type); - -static inline u64 of_read_addr(const u32 *cell, int size) -{ - u64 r = 0; - while (size--) - r = (r << 32) | *(cell++); - return r; -} - -static void get_cells(struct device_node *dp, int *addrc, int *sizec) -{ - if (addrc) - *addrc = of_n_addr_cells(dp); - if (sizec) - *sizec = of_n_size_cells(dp); -} - -/* Max address size we deal with */ -#define OF_MAX_ADDR_CELLS 4 - -struct of_bus { - const char *name; - const char *addr_prop_name; - int (*match)(struct device_node *parent); - void (*count_cells)(struct device_node *child, - int *addrc, int *sizec); - int (*map)(u32 *addr, const u32 *range, - int na, int ns, int pna); - unsigned long (*get_flags)(const u32 *addr, unsigned long); -}; - -/* - * Default translator (generic bus) - */ - -static void of_bus_default_count_cells(struct device_node *dev, - int *addrc, int *sizec) -{ - get_cells(dev, addrc, sizec); -} - -/* Make sure the least significant 64-bits are in-range. Even - * for 3 or 4 cell values it is a good enough approximation. - */ -static int of_out_of_range(const u32 *addr, const u32 *base, - const u32 *size, int na, int ns) -{ - u64 a = of_read_addr(addr, na); - u64 b = of_read_addr(base, na); - - if (a < b) - return 1; - - b += of_read_addr(size, ns); - if (a >= b) - return 1; - - return 0; -} - -static int of_bus_default_map(u32 *addr, const u32 *range, - int na, int ns, int pna) -{ - u32 result[OF_MAX_ADDR_CELLS]; - int i; - - if (ns > 2) { - printk("of_device: Cannot handle size cells (%d) > 2.", ns); - return -EINVAL; - } - - if (of_out_of_range(addr, range, range + na + pna, na, ns)) - return -EINVAL; - - /* Start with the parent range base. */ - memcpy(result, range + na, pna * 4); - - /* Add in the child address offset. */ - for (i = 0; i < na; i++) - result[pna - 1 - i] += - (addr[na - 1 - i] - - range[na - 1 - i]); - - memcpy(addr, result, pna * 4); - - return 0; -} - -static unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long flags) -{ - if (flags) - return flags; - return IORESOURCE_MEM; -} - /* * PCI bus specific translator */ @@ -295,42 +147,6 @@ static unsigned long of_bus_pci_get_flags(const u32 *addr, unsigned long flags) } /* - * SBUS bus specific translator - */ - -static int of_bus_sbus_match(struct device_node *np) -{ - struct device_node *dp = np; - - while (dp) { - if (!strcmp(dp->name, "sbus") || - !strcmp(dp->name, "sbi")) - return 1; - - /* Have a look at use_1to1_mapping(). We're trying - * to match SBUS if that's the top-level bus and we - * don't have some intervening real bus that provides - * ranges based translations. - */ - if (of_find_property(dp, "ranges", NULL) != NULL) - break; - - dp = dp->parent; - } - - return 0; -} - -static void of_bus_sbus_count_cells(struct device_node *child, - int *addrc, int *sizec) -{ - if (addrc) - *addrc = 2; - if (sizec) - *sizec = 1; -} - -/* * FHC/Central bus specific translator. * * This is just needed to hard-code the address and size cell diff --git a/arch/sparc/kernel/of_device_common.c b/arch/sparc/kernel/of_device_common.c new file mode 100644 index 00000000000..cb8eb799bb6 --- /dev/null +++ b/arch/sparc/kernel/of_device_common.c @@ -0,0 +1,174 @@ +#include <linux/string.h> +#include <linux/kernel.h> +#include <linux/of.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/irq.h> +#include <linux/of_device.h> +#include <linux/of_platform.h> + +#include "of_device_common.h" + +static int node_match(struct device *dev, void *data) +{ + struct of_device *op = to_of_device(dev); + struct device_node *dp = data; + + return (op->node == dp); +} + +struct of_device *of_find_device_by_node(struct device_node *dp) +{ + struct device *dev = bus_find_device(&of_platform_bus_type, NULL, + dp, node_match); + + if (dev) + return to_of_device(dev); + + return NULL; +} +EXPORT_SYMBOL(of_find_device_by_node); + +unsigned int irq_of_parse_and_map(struct device_node *node, int index) +{ + struct of_device *op = of_find_device_by_node(node); + + if (!op || index >= op->num_irqs) + return 0; + + return op->irqs[index]; +} +EXPORT_SYMBOL(irq_of_parse_and_map); + +/* Take the archdata values for IOMMU, STC, and HOSTDATA found in + * BUS and propagate to all child of_device objects. + */ +void of_propagate_archdata(struct of_device *bus) +{ + struct dev_archdata *bus_sd = &bus->dev.archdata; + struct device_node *bus_dp = bus->node; + struct device_node *dp; + + for (dp = bus_dp->child; dp; dp = dp->sibling) { + struct of_device *op = of_find_device_by_node(dp); + + op->dev.archdata.iommu = bus_sd->iommu; + op->dev.archdata.stc = bus_sd->stc; + op->dev.archdata.host_controller = bus_sd->host_controller; + op->dev.archdata.numa_node = bus_sd->numa_node; + + if (dp->child) + of_propagate_archdata(op); + } +} + +struct bus_type of_platform_bus_type; +EXPORT_SYMBOL(of_platform_bus_type); + +static void get_cells(struct device_node *dp, int *addrc, int *sizec) +{ + if (addrc) + *addrc = of_n_addr_cells(dp); + if (sizec) + *sizec = of_n_size_cells(dp); +} + +/* + * Default translator (generic bus) + */ + +void of_bus_default_count_cells(struct device_node *dev, int *addrc, int *sizec) +{ + get_cells(dev, addrc, sizec); +} + +/* Make sure the least significant 64-bits are in-range. Even + * for 3 or 4 cell values it is a good enough approximation. + */ +int of_out_of_range(const u32 *addr, const u32 *base, + const u32 *size, int na, int ns) +{ + u64 a = of_read_addr(addr, na); + u64 b = of_read_addr(base, na); + + if (a < b) + return 1; + + b += of_read_addr(size, ns); + if (a >= b) + return 1; + + return 0; +} + +int of_bus_default_map(u32 *addr, const u32 *range, int na, int ns, int pna) +{ + u32 result[OF_MAX_ADDR_CELLS]; + int i; + + if (ns > 2) { + printk("of_device: Cannot handle size cells (%d) > 2.", ns); + return -EINVAL; + } + + if (of_out_of_range(addr, range, range + na + pna, na, ns)) + return -EINVAL; + + /* Start with the parent range base. */ + memcpy(result, range + na, pna * 4); + + /* Add in the child address offset. */ + for (i = 0; i < na; i++) + result[pna - 1 - i] += + (addr[na - 1 - i] - + range[na - 1 - i]); + + memcpy(addr, result, pna * 4); + + return 0; +} + +unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long flags) +{ + if (flags) + return flags; + return IORESOURCE_MEM; +} + +/* + * SBUS bus specific translator + */ + +int of_bus_sbus_match(struct device_node *np) +{ + struct device_node *dp = np; + + while (dp) { + if (!strcmp(dp->name, "sbus") || + !strcmp(dp->name, "sbi")) + return 1; + + /* Have a look at use_1to1_mapping(). We're trying + * to match SBUS if that's the top-level bus and we + * don't have some intervening real bus that provides + * ranges based translations. + */ + if (of_find_property(dp, "ranges", NULL) != NULL) + break; + + dp = dp->parent; + } + + return 0; +} + +void of_bus_sbus_count_cells(struct device_node *child, int *addrc, int *sizec) +{ + if (addrc) + *addrc = 2; + if (sizec) + *sizec = 1; +} diff --git a/arch/sparc/kernel/of_device_common.h b/arch/sparc/kernel/of_device_common.h new file mode 100644 index 00000000000..cdfd2399284 --- /dev/null +++ b/arch/sparc/kernel/of_device_common.h @@ -0,0 +1,36 @@ +#ifndef _OF_DEVICE_COMMON_H +#define _OF_DEVICE_COMMON_H + +static inline u64 of_read_addr(const u32 *cell, int size) +{ + u64 r = 0; + while (size--) + r = (r << 32) | *(cell++); + return r; +} + +void of_bus_default_count_cells(struct device_node *dev, int *addrc, + int *sizec); +int of_out_of_range(const u32 *addr, const u32 *base, + const u32 *size, int na, int ns); +int of_bus_default_map(u32 *addr, const u32 *range, int na, int ns, int pna); +unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long flags); + +int of_bus_sbus_match(struct device_node *np); +void of_bus_sbus_count_cells(struct device_node *child, int *addrc, int *sizec); + +/* Max address size we deal with */ +#define OF_MAX_ADDR_CELLS 4 + +struct of_bus { + const char *name; + const char *addr_prop_name; + int (*match)(struct device_node *parent); + void (*count_cells)(struct device_node *child, + int *addrc, int *sizec); + int (*map)(u32 *addr, const u32 *range, + int na, int ns, int pna); + unsigned long (*get_flags)(const u32 *addr, unsigned long); +}; + +#endif /* _OF_DEVICE_COMMON_H */ diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index 5db5ebed35d..2485eaa2310 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c @@ -230,8 +230,9 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, free_pages((unsigned long)cpu, order); } -static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz, - enum dma_data_direction direction) +static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t sz, + enum dma_data_direction direction) { struct iommu *iommu; unsigned long flags, npages, oaddr; @@ -245,7 +246,7 @@ static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz, if (unlikely(direction == DMA_NONE)) goto bad; - oaddr = (unsigned long)ptr; + oaddr = (unsigned long)(page_address(page) + offset); npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); npages >>= IO_PAGE_SHIFT; @@ -294,8 +295,8 @@ iommu_map_fail: return DMA_ERROR_CODE; } -static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr, - size_t sz, enum dma_data_direction direction) +static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, + size_t sz, enum dma_data_direction direction) { struct pci_pbm_info *pbm; struct iommu *iommu; @@ -537,8 +538,8 @@ static void dma_4v_sync_sg_for_cpu(struct device *dev, static const struct dma_ops sun4v_dma_ops = { .alloc_coherent = dma_4v_alloc_coherent, .free_coherent = dma_4v_free_coherent, - .map_single = dma_4v_map_single, - .unmap_single = dma_4v_unmap_single, + .map_page = dma_4v_map_page, + .unmap_page = dma_4v_unmap_page, .map_sg = dma_4v_map_sg, .unmap_sg = dma_4v_unmap_sg, .sync_single_for_cpu = dma_4v_sync_single_for_cpu, diff --git a/arch/sparc/kernel/prom.h b/arch/sparc/kernel/prom.h index bb0f0fda6ca..453397fe5e1 100644 --- a/arch/sparc/kernel/prom.h +++ b/arch/sparc/kernel/prom.h @@ -22,7 +22,6 @@ static inline int is_root_node(const struct device_node *dp) extern char *build_path_component(struct device_node *dp); extern void of_console_init(void); -extern void of_fill_in_cpu_data(void); extern unsigned int prom_early_allocated; diff --git a/arch/sparc/kernel/prom_64.c b/arch/sparc/kernel/prom_64.c index ca55c7012f7..fb06ac2bd38 100644 --- a/arch/sparc/kernel/prom_64.c +++ b/arch/sparc/kernel/prom_64.c @@ -374,75 +374,26 @@ static const char *get_mid_prop(void) return (tlb_type == spitfire ? "upa-portid" : "portid"); } -struct device_node *of_find_node_by_cpuid(int cpuid) -{ - struct device_node *dp; - const char *mid_prop = get_mid_prop(); - - for_each_node_by_type(dp, "cpu") { - int id = of_getintprop_default(dp, mid_prop, -1); - const char *this_mid_prop = mid_prop; - - if (id < 0) { - this_mid_prop = "cpuid"; - id = of_getintprop_default(dp, this_mid_prop, -1); - } - - if (id < 0) { - prom_printf("OF: Serious problem, cpu lacks " - "%s property", this_mid_prop); - prom_halt(); - } - if (cpuid == id) - return dp; - } - return NULL; -} - -void __init of_fill_in_cpu_data(void) +static void *of_iterate_over_cpus(void *(*func)(struct device_node *, int, int), int arg) { struct device_node *dp; const char *mid_prop; - if (tlb_type == hypervisor) - return; - mid_prop = get_mid_prop(); - ncpus_probed = 0; for_each_node_by_type(dp, "cpu") { int cpuid = of_getintprop_default(dp, mid_prop, -1); const char *this_mid_prop = mid_prop; - struct device_node *portid_parent; - int portid = -1; + void *ret; - portid_parent = NULL; if (cpuid < 0) { this_mid_prop = "cpuid"; cpuid = of_getintprop_default(dp, this_mid_prop, -1); - if (cpuid >= 0) { - int limit = 2; - - portid_parent = dp; - while (limit--) { - portid_parent = portid_parent->parent; - if (!portid_parent) - break; - portid = of_getintprop_default(portid_parent, - "portid", -1); - if (portid >= 0) - break; - } - } } - if (cpuid < 0) { prom_printf("OF: Serious problem, cpu lacks " "%s property", this_mid_prop); prom_halt(); } - - ncpus_probed++; - #ifdef CONFIG_SMP if (cpuid >= NR_CPUS) { printk(KERN_WARNING "Ignoring CPU %d which is " @@ -450,79 +401,142 @@ void __init of_fill_in_cpu_data(void) cpuid, NR_CPUS); continue; } -#else - /* On uniprocessor we only want the values for the - * real physical cpu the kernel booted onto, however - * cpu_data() only has one entry at index 0. - */ - if (cpuid != real_hard_smp_processor_id()) - continue; - cpuid = 0; #endif + ret = func(dp, cpuid, arg); + if (ret) + return ret; + } + return NULL; +} - cpu_data(cpuid).clock_tick = - of_getintprop_default(dp, "clock-frequency", 0); - - if (portid_parent) { - cpu_data(cpuid).dcache_size = - of_getintprop_default(dp, "l1-dcache-size", - 16 * 1024); - cpu_data(cpuid).dcache_line_size = - of_getintprop_default(dp, "l1-dcache-line-size", - 32); - cpu_data(cpuid).icache_size = - of_getintprop_default(dp, "l1-icache-size", - 8 * 1024); - cpu_data(cpuid).icache_line_size = - of_getintprop_default(dp, "l1-icache-line-size", - 32); - cpu_data(cpuid).ecache_size = - of_getintprop_default(dp, "l2-cache-size", 0); - cpu_data(cpuid).ecache_line_size = - of_getintprop_default(dp, "l2-cache-line-size", 0); - if (!cpu_data(cpuid).ecache_size || - !cpu_data(cpuid).ecache_line_size) { - cpu_data(cpuid).ecache_size = - of_getintprop_default(portid_parent, - "l2-cache-size", - (4 * 1024 * 1024)); - cpu_data(cpuid).ecache_line_size = - of_getintprop_default(portid_parent, - "l2-cache-line-size", 64); - } - - cpu_data(cpuid).core_id = portid + 1; - cpu_data(cpuid).proc_id = portid; +static void *check_cpu_node(struct device_node *dp, int cpuid, int id) +{ + if (id == cpuid) + return dp; + return NULL; +} + +struct device_node *of_find_node_by_cpuid(int cpuid) +{ + return of_iterate_over_cpus(check_cpu_node, cpuid); +} + +static void *record_one_cpu(struct device_node *dp, int cpuid, int arg) +{ + ncpus_probed++; #ifdef CONFIG_SMP - sparc64_multi_core = 1; + set_cpu_present(cpuid, true); + set_cpu_possible(cpuid, true); #endif - } else { - cpu_data(cpuid).dcache_size = - of_getintprop_default(dp, "dcache-size", 16 * 1024); - cpu_data(cpuid).dcache_line_size = - of_getintprop_default(dp, "dcache-line-size", 32); + return NULL; +} - cpu_data(cpuid).icache_size = - of_getintprop_default(dp, "icache-size", 16 * 1024); - cpu_data(cpuid).icache_line_size = - of_getintprop_default(dp, "icache-line-size", 32); +void __init of_populate_present_mask(void) +{ + if (tlb_type == hypervisor) + return; + + ncpus_probed = 0; + of_iterate_over_cpus(record_one_cpu, 0); +} +static void *fill_in_one_cpu(struct device_node *dp, int cpuid, int arg) +{ + struct device_node *portid_parent = NULL; + int portid = -1; + + if (of_find_property(dp, "cpuid", NULL)) { + int limit = 2; + + portid_parent = dp; + while (limit--) { + portid_parent = portid_parent->parent; + if (!portid_parent) + break; + portid = of_getintprop_default(portid_parent, + "portid", -1); + if (portid >= 0) + break; + } + } + +#ifndef CONFIG_SMP + /* On uniprocessor we only want the values for the + * real physical cpu the kernel booted onto, however + * cpu_data() only has one entry at index 0. + */ + if (cpuid != real_hard_smp_processor_id()) + return NULL; + cpuid = 0; +#endif + + cpu_data(cpuid).clock_tick = + of_getintprop_default(dp, "clock-frequency", 0); + + if (portid_parent) { + cpu_data(cpuid).dcache_size = + of_getintprop_default(dp, "l1-dcache-size", + 16 * 1024); + cpu_data(cpuid).dcache_line_size = + of_getintprop_default(dp, "l1-dcache-line-size", + 32); + cpu_data(cpuid).icache_size = + of_getintprop_default(dp, "l1-icache-size", + 8 * 1024); + cpu_data(cpuid).icache_line_size = + of_getintprop_default(dp, "l1-icache-line-size", + 32); + cpu_data(cpuid).ecache_size = + of_getintprop_default(dp, "l2-cache-size", 0); + cpu_data(cpuid).ecache_line_size = + of_getintprop_default(dp, "l2-cache-line-size", 0); + if (!cpu_data(cpuid).ecache_size || + !cpu_data(cpuid).ecache_line_size) { cpu_data(cpuid).ecache_size = - of_getintprop_default(dp, "ecache-size", + of_getintprop_default(portid_parent, + "l2-cache-size", (4 * 1024 * 1024)); cpu_data(cpuid).ecache_line_size = - of_getintprop_default(dp, "ecache-line-size", 64); - - cpu_data(cpuid).core_id = 0; - cpu_data(cpuid).proc_id = -1; + of_getintprop_default(portid_parent, + "l2-cache-line-size", 64); } + cpu_data(cpuid).core_id = portid + 1; + cpu_data(cpuid).proc_id = portid; #ifdef CONFIG_SMP - set_cpu_present(cpuid, true); - set_cpu_possible(cpuid, true); + sparc64_multi_core = 1; #endif + } else { + cpu_data(cpuid).dcache_size = + of_getintprop_default(dp, "dcache-size", 16 * 1024); + cpu_data(cpuid).dcache_line_size = + of_getintprop_default(dp, "dcache-line-size", 32); + + cpu_data(cpuid).icache_size = + of_getintprop_default(dp, "icache-size", 16 * 1024); + cpu_data(cpuid).icache_line_size = + of_getintprop_default(dp, "icache-line-size", 32); + + cpu_data(cpuid).ecache_size = + of_getintprop_default(dp, "ecache-size", + (4 * 1024 * 1024)); + cpu_data(cpuid).ecache_line_size = + of_getintprop_default(dp, "ecache-line-size", 64); + + cpu_data(cpuid).core_id = 0; + cpu_data(cpuid).proc_id = -1; } + return NULL; +} + +void __init of_fill_in_cpu_data(void) +{ + if (tlb_type == hypervisor) + return; + + of_iterate_over_cpus(fill_in_one_cpu, 0); + smp_fill_in_sib_core_maps(); } diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c index ff7b591c894..0fb5789d43c 100644 --- a/arch/sparc/kernel/prom_common.c +++ b/arch/sparc/kernel/prom_common.c @@ -313,6 +313,4 @@ void __init prom_build_devicetree(void) printk("PROM: Built device tree with %u bytes of memory.\n", prom_early_allocated); - - of_fill_in_cpu_data(); } diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index f7642e5a94d..fa44eaf8d89 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -20,7 +20,8 @@ #include <linux/cache.h> #include <linux/jiffies.h> #include <linux/profile.h> -#include <linux/lmb.h> +#include <linux/bootmem.h> +#include <linux/vmalloc.h> #include <linux/cpu.h> #include <asm/head.h> @@ -47,6 +48,8 @@ #include <asm/ldc.h> #include <asm/hypervisor.h> +#include "cpumap.h" + int sparc64_multi_core __read_mostly; DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; @@ -278,7 +281,7 @@ static unsigned long kimage_addr_to_ra(void *p) return kern_base + (val - KERNBASE); } -static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg) +static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg, void **descrp) { extern unsigned long sparc64_ttable_tl0; extern unsigned long kern_locked_tte_data; @@ -298,12 +301,12 @@ static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread "hvtramp_descr.\n"); return; } + *descrp = hdesc; hdesc->cpu = cpu; hdesc->num_mappings = num_kernel_image_mappings; tb = &trap_block[cpu]; - tb->hdesc = hdesc; hdesc->fault_info_va = (unsigned long) &tb->fault_info; hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info); @@ -341,12 +344,12 @@ static struct thread_info *cpu_new_thread = NULL; static int __cpuinit smp_boot_one_cpu(unsigned int cpu) { - struct trap_per_cpu *tb = &trap_block[cpu]; unsigned long entry = (unsigned long)(&sparc64_cpu_startup); unsigned long cookie = (unsigned long)(&cpu_new_thread); struct task_struct *p; + void *descr = NULL; int timeout, ret; p = fork_idle(cpu); @@ -359,7 +362,8 @@ static int __cpuinit smp_boot_one_cpu(unsigned int cpu) #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) if (ldom_domaining_enabled) ldom_startcpu_cpuid(cpu, - (unsigned long) cpu_new_thread); + (unsigned long) cpu_new_thread, + &descr); else #endif prom_startcpu_cpuid(cpu, entry, cookie); @@ -383,10 +387,7 @@ static int __cpuinit smp_boot_one_cpu(unsigned int cpu) } cpu_new_thread = NULL; - if (tb->hdesc) { - kfree(tb->hdesc); - tb->hdesc = NULL; - } + kfree(descr); return ret; } @@ -1315,6 +1316,8 @@ int __cpu_disable(void) cpu_clear(cpu, cpu_online_map); ipi_call_unlock(); + cpu_map_rebuild(); + return 0; } @@ -1373,36 +1376,171 @@ void smp_send_stop(void) { } -unsigned long __per_cpu_base __read_mostly; -unsigned long __per_cpu_shift __read_mostly; +/** + * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu + * @cpu: cpu to allocate for + * @size: size allocation in bytes + * @align: alignment + * + * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper + * does the right thing for NUMA regardless of the current + * configuration. + * + * RETURNS: + * Pointer to the allocated area on success, NULL on failure. + */ +static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, + unsigned long align) +{ + const unsigned long goal = __pa(MAX_DMA_ADDRESS); +#ifdef CONFIG_NEED_MULTIPLE_NODES + int node = cpu_to_node(cpu); + void *ptr; + + if (!node_online(node) || !NODE_DATA(node)) { + ptr = __alloc_bootmem(size, align, goal); + pr_info("cpu %d has no node %d or node-local memory\n", + cpu, node); + pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n", + cpu, size, __pa(ptr)); + } else { + ptr = __alloc_bootmem_node(NODE_DATA(node), + size, align, goal); + pr_debug("per cpu data for cpu%d %lu bytes on node%d at " + "%016lx\n", cpu, size, node, __pa(ptr)); + } + return ptr; +#else + return __alloc_bootmem(size, align, goal); +#endif +} -EXPORT_SYMBOL(__per_cpu_base); -EXPORT_SYMBOL(__per_cpu_shift); +static size_t pcpur_size __initdata; +static void **pcpur_ptrs __initdata; -void __init real_setup_per_cpu_areas(void) +static struct page * __init pcpur_get_page(unsigned int cpu, int pageno) { - unsigned long paddr, goal, size, i; - char *ptr; + size_t off = (size_t)pageno << PAGE_SHIFT; - /* Copy section for each CPU (we discard the original) */ - goal = PERCPU_ENOUGH_ROOM; + if (off >= pcpur_size) + return NULL; - __per_cpu_shift = PAGE_SHIFT; - for (size = PAGE_SIZE; size < goal; size <<= 1UL) - __per_cpu_shift++; + return virt_to_page(pcpur_ptrs[cpu] + off); +} + +#define PCPU_CHUNK_SIZE (4UL * 1024UL * 1024UL) + +static void __init pcpu_map_range(unsigned long start, unsigned long end, + struct page *page) +{ + unsigned long pfn = page_to_pfn(page); + unsigned long pte_base; + + BUG_ON((pfn<<PAGE_SHIFT)&(PCPU_CHUNK_SIZE - 1UL)); + + pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U | + _PAGE_CP_4U | _PAGE_CV_4U | + _PAGE_P_4U | _PAGE_W_4U); + if (tlb_type == hypervisor) + pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V | + _PAGE_CP_4V | _PAGE_CV_4V | + _PAGE_P_4V | _PAGE_W_4V); + + while (start < end) { + pgd_t *pgd = pgd_offset_k(start); + unsigned long this_end; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + pud = pud_offset(pgd, start); + if (pud_none(*pud)) { + pmd_t *new; + + new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); + pud_populate(&init_mm, pud, new); + } + + pmd = pmd_offset(pud, start); + if (!pmd_present(*pmd)) { + pte_t *new; + + new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); + pmd_populate_kernel(&init_mm, pmd, new); + } - paddr = lmb_alloc(size * NR_CPUS, PAGE_SIZE); - if (!paddr) { - prom_printf("Cannot allocate per-cpu memory.\n"); - prom_halt(); + pte = pte_offset_kernel(pmd, start); + this_end = (start + PMD_SIZE) & PMD_MASK; + if (this_end > end) + this_end = end; + + while (start < this_end) { + unsigned long paddr = pfn << PAGE_SHIFT; + + pte_val(*pte) = (paddr | pte_base); + + start += PAGE_SIZE; + pte++; + pfn++; + } + } +} + +void __init setup_per_cpu_areas(void) +{ + size_t dyn_size, static_size = __per_cpu_end - __per_cpu_start; + static struct vm_struct vm; + unsigned long delta, cpu; + size_t pcpu_unit_size; + size_t ptrs_size; + + pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE + + PERCPU_DYNAMIC_RESERVE); + dyn_size = pcpur_size - static_size - PERCPU_MODULE_RESERVE; + + + ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0])); + pcpur_ptrs = alloc_bootmem(ptrs_size); + + for_each_possible_cpu(cpu) { + pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PCPU_CHUNK_SIZE, + PCPU_CHUNK_SIZE); + + free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size), + PCPU_CHUNK_SIZE - pcpur_size); + + memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size); } - ptr = __va(paddr); - __per_cpu_base = ptr - __per_cpu_start; + /* allocate address and map */ + vm.flags = VM_ALLOC; + vm.size = num_possible_cpus() * PCPU_CHUNK_SIZE; + vm_area_register_early(&vm, PCPU_CHUNK_SIZE); + + for_each_possible_cpu(cpu) { + unsigned long start = (unsigned long) vm.addr; + unsigned long end; + + start += cpu * PCPU_CHUNK_SIZE; + end = start + PCPU_CHUNK_SIZE; + pcpu_map_range(start, end, virt_to_page(pcpur_ptrs[cpu])); + } - for (i = 0; i < NR_CPUS; i++, ptr += size) - memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); + pcpu_unit_size = pcpu_setup_first_chunk(pcpur_get_page, static_size, + PERCPU_MODULE_RESERVE, dyn_size, + PCPU_CHUNK_SIZE, vm.addr, NULL); + + free_bootmem(__pa(pcpur_ptrs), ptrs_size); + + delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; + for_each_possible_cpu(cpu) { + __per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size; + } /* Setup %g5 for the boot cpu. */ __local_per_cpu_offset = __per_cpu_offset(smp_processor_id()); + + of_fill_in_cpu_data(); + if (tlb_type == hypervisor) + mdesc_fill_in_cpu_data(cpu_all_mask); } diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S index 00ec3b15f38..69090165729 100644 --- a/arch/sparc/kernel/systbls_32.S +++ b/arch/sparc/kernel/systbls_32.S @@ -81,4 +81,6 @@ sys_call_table: /*305*/ .long sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait /*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate /*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 -/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv, sys_pwritev +/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv +/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo + diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S index 82b5bf85b9d..6b3ee88e253 100644 --- a/arch/sparc/kernel/systbls_64.S +++ b/arch/sparc/kernel/systbls_64.S @@ -82,7 +82,8 @@ sys_call_table32: .word compat_sys_set_mempolicy, compat_sys_kexec_load, compat_sys_move_pages, sys_getcpu, compat_sys_epoll_pwait /*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1 -/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv, compat_sys_pwritev +/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv + .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo #endif /* CONFIG_COMPAT */ @@ -156,4 +157,5 @@ sys_call_table: .word sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait /*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate .word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 -/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv, sys_pwritev +/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv + .word sys_pwritev, sys_rt_tgsigqueueinfo diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index d809c4ebb48..10f7bb9fc14 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -2509,6 +2509,7 @@ void do_getpsr(struct pt_regs *regs) } struct trap_per_cpu trap_block[NR_CPUS]; +EXPORT_SYMBOL(trap_block); /* This can get invoked before sched_init() so play it super safe * and use hard_smp_processor_id(). @@ -2530,84 +2531,97 @@ extern void tsb_config_offsets_are_bolixed_dave(void); void __init trap_init(void) { /* Compile time sanity check. */ - if (TI_TASK != offsetof(struct thread_info, task) || - TI_FLAGS != offsetof(struct thread_info, flags) || - TI_CPU != offsetof(struct thread_info, cpu) || - TI_FPSAVED != offsetof(struct thread_info, fpsaved) || - TI_KSP != offsetof(struct thread_info, ksp) || - TI_FAULT_ADDR != offsetof(struct thread_info, fault_address) || - TI_KREGS != offsetof(struct thread_info, kregs) || - TI_UTRAPS != offsetof(struct thread_info, utraps) || - TI_EXEC_DOMAIN != offsetof(struct thread_info, exec_domain) || - TI_REG_WINDOW != offsetof(struct thread_info, reg_window) || - TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) || - TI_GSR != offsetof(struct thread_info, gsr) || - TI_XFSR != offsetof(struct thread_info, xfsr) || - TI_USER_CNTD0 != offsetof(struct thread_info, user_cntd0) || - TI_USER_CNTD1 != offsetof(struct thread_info, user_cntd1) || - TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) || - TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) || - TI_PCR != offsetof(struct thread_info, pcr_reg) || - TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) || - TI_NEW_CHILD != offsetof(struct thread_info, new_child) || - TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) || - TI_RESTART_BLOCK != offsetof(struct thread_info, restart_block) || - TI_KUNA_REGS != offsetof(struct thread_info, kern_una_regs) || - TI_KUNA_INSN != offsetof(struct thread_info, kern_una_insn) || - TI_FPREGS != offsetof(struct thread_info, fpregs) || - (TI_FPREGS & (64 - 1))) - thread_info_offsets_are_bolixed_dave(); - - if (TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu, thread) || - (TRAP_PER_CPU_PGD_PADDR != - offsetof(struct trap_per_cpu, pgd_paddr)) || - (TRAP_PER_CPU_CPU_MONDO_PA != - offsetof(struct trap_per_cpu, cpu_mondo_pa)) || - (TRAP_PER_CPU_DEV_MONDO_PA != - offsetof(struct trap_per_cpu, dev_mondo_pa)) || - (TRAP_PER_CPU_RESUM_MONDO_PA != - offsetof(struct trap_per_cpu, resum_mondo_pa)) || - (TRAP_PER_CPU_RESUM_KBUF_PA != - offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) || - (TRAP_PER_CPU_NONRESUM_MONDO_PA != - offsetof(struct trap_per_cpu, nonresum_mondo_pa)) || - (TRAP_PER_CPU_NONRESUM_KBUF_PA != - offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) || - (TRAP_PER_CPU_FAULT_INFO != - offsetof(struct trap_per_cpu, fault_info)) || - (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA != - offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) || - (TRAP_PER_CPU_CPU_LIST_PA != - offsetof(struct trap_per_cpu, cpu_list_pa)) || - (TRAP_PER_CPU_TSB_HUGE != - offsetof(struct trap_per_cpu, tsb_huge)) || - (TRAP_PER_CPU_TSB_HUGE_TEMP != - offsetof(struct trap_per_cpu, tsb_huge_temp)) || - (TRAP_PER_CPU_IRQ_WORKLIST_PA != - offsetof(struct trap_per_cpu, irq_worklist_pa)) || - (TRAP_PER_CPU_CPU_MONDO_QMASK != - offsetof(struct trap_per_cpu, cpu_mondo_qmask)) || - (TRAP_PER_CPU_DEV_MONDO_QMASK != - offsetof(struct trap_per_cpu, dev_mondo_qmask)) || - (TRAP_PER_CPU_RESUM_QMASK != - offsetof(struct trap_per_cpu, resum_qmask)) || - (TRAP_PER_CPU_NONRESUM_QMASK != - offsetof(struct trap_per_cpu, nonresum_qmask))) - trap_per_cpu_offsets_are_bolixed_dave(); - - if ((TSB_CONFIG_TSB != - offsetof(struct tsb_config, tsb)) || - (TSB_CONFIG_RSS_LIMIT != - offsetof(struct tsb_config, tsb_rss_limit)) || - (TSB_CONFIG_NENTRIES != - offsetof(struct tsb_config, tsb_nentries)) || - (TSB_CONFIG_REG_VAL != - offsetof(struct tsb_config, tsb_reg_val)) || - (TSB_CONFIG_MAP_VADDR != - offsetof(struct tsb_config, tsb_map_vaddr)) || - (TSB_CONFIG_MAP_PTE != - offsetof(struct tsb_config, tsb_map_pte))) - tsb_config_offsets_are_bolixed_dave(); + BUILD_BUG_ON(TI_TASK != offsetof(struct thread_info, task) || + TI_FLAGS != offsetof(struct thread_info, flags) || + TI_CPU != offsetof(struct thread_info, cpu) || + TI_FPSAVED != offsetof(struct thread_info, fpsaved) || + TI_KSP != offsetof(struct thread_info, ksp) || + TI_FAULT_ADDR != offsetof(struct thread_info, + fault_address) || + TI_KREGS != offsetof(struct thread_info, kregs) || + TI_UTRAPS != offsetof(struct thread_info, utraps) || + TI_EXEC_DOMAIN != offsetof(struct thread_info, + exec_domain) || + TI_REG_WINDOW != offsetof(struct thread_info, + reg_window) || + TI_RWIN_SPTRS != offsetof(struct thread_info, + rwbuf_stkptrs) || + TI_GSR != offsetof(struct thread_info, gsr) || + TI_XFSR != offsetof(struct thread_info, xfsr) || + TI_USER_CNTD0 != offsetof(struct thread_info, + user_cntd0) || + TI_USER_CNTD1 != offsetof(struct thread_info, + user_cntd1) || + TI_KERN_CNTD0 != offsetof(struct thread_info, + kernel_cntd0) || + TI_KERN_CNTD1 != offsetof(struct thread_info, + kernel_cntd1) || + TI_PCR != offsetof(struct thread_info, pcr_reg) || + TI_PRE_COUNT != offsetof(struct thread_info, + preempt_count) || + TI_NEW_CHILD != offsetof(struct thread_info, new_child) || + TI_SYS_NOERROR != offsetof(struct thread_info, + syscall_noerror) || + TI_RESTART_BLOCK != offsetof(struct thread_info, + restart_block) || + TI_KUNA_REGS != offsetof(struct thread_info, + kern_una_regs) || + TI_KUNA_INSN != offsetof(struct thread_info, + kern_una_insn) || + TI_FPREGS != offsetof(struct thread_info, fpregs) || + (TI_FPREGS & (64 - 1))); + + BUILD_BUG_ON(TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu, + thread) || + (TRAP_PER_CPU_PGD_PADDR != + offsetof(struct trap_per_cpu, pgd_paddr)) || + (TRAP_PER_CPU_CPU_MONDO_PA != + offsetof(struct trap_per_cpu, cpu_mondo_pa)) || + (TRAP_PER_CPU_DEV_MONDO_PA != + offsetof(struct trap_per_cpu, dev_mondo_pa)) || + (TRAP_PER_CPU_RESUM_MONDO_PA != + offsetof(struct trap_per_cpu, resum_mondo_pa)) || + (TRAP_PER_CPU_RESUM_KBUF_PA != + offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) || + (TRAP_PER_CPU_NONRESUM_MONDO_PA != + offsetof(struct trap_per_cpu, nonresum_mondo_pa)) || + (TRAP_PER_CPU_NONRESUM_KBUF_PA != + offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) || + (TRAP_PER_CPU_FAULT_INFO != + offsetof(struct trap_per_cpu, fault_info)) || + (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA != + offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) || + (TRAP_PER_CPU_CPU_LIST_PA != + offsetof(struct trap_per_cpu, cpu_list_pa)) || + (TRAP_PER_CPU_TSB_HUGE != + offsetof(struct trap_per_cpu, tsb_huge)) || + (TRAP_PER_CPU_TSB_HUGE_TEMP != + offsetof(struct trap_per_cpu, tsb_huge_temp)) || + (TRAP_PER_CPU_IRQ_WORKLIST_PA != + offsetof(struct trap_per_cpu, irq_worklist_pa)) || + (TRAP_PER_CPU_CPU_MONDO_QMASK != + offsetof(struct trap_per_cpu, cpu_mondo_qmask)) || + (TRAP_PER_CPU_DEV_MONDO_QMASK != + offsetof(struct trap_per_cpu, dev_mondo_qmask)) || + (TRAP_PER_CPU_RESUM_QMASK != + offsetof(struct trap_per_cpu, resum_qmask)) || + (TRAP_PER_CPU_NONRESUM_QMASK != + offsetof(struct trap_per_cpu, nonresum_qmask)) || + (TRAP_PER_CPU_PER_CPU_BASE != + offsetof(struct trap_per_cpu, __per_cpu_base))); + + BUILD_BUG_ON((TSB_CONFIG_TSB != + offsetof(struct tsb_config, tsb)) || + (TSB_CONFIG_RSS_LIMIT != + offsetof(struct tsb_config, tsb_rss_limit)) || + (TSB_CONFIG_NENTRIES != + offsetof(struct tsb_config, tsb_nentries)) || + (TSB_CONFIG_REG_VAL != + offsetof(struct tsb_config, tsb_reg_val)) || + (TSB_CONFIG_MAP_VADDR != + offsetof(struct tsb_config, tsb_map_vaddr)) || + (TSB_CONFIG_MAP_PTE != + offsetof(struct tsb_config, tsb_map_pte))); /* Attach to the address space of init_task. On SMP we * do this in smp.c:smp_callin for other cpus. diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index cbb282dab5a..26bb3919ff1 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c @@ -358,6 +358,7 @@ void __init paging_init(void) protection_map[15] = PAGE_SHARED; btfixup(); prom_build_devicetree(); + of_fill_in_cpu_data(); device_scan(); } diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index f26a352c08a..ca92e2f54e4 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -1679,11 +1679,6 @@ pgd_t swapper_pg_dir[2048]; static void sun4u_pgprot_init(void); static void sun4v_pgprot_init(void); -/* Dummy function */ -void __init setup_per_cpu_areas(void) -{ -} - void __init paging_init(void) { unsigned long end_pfn, shift, phys_base; @@ -1799,16 +1794,13 @@ void __init paging_init(void) if (tlb_type == hypervisor) sun4v_ktsb_register(); - /* We must setup the per-cpu areas before we pull in the - * PROM and the MDESC. The code there fills in cpu and - * other information into per-cpu data structures. - */ - real_setup_per_cpu_areas(); - prom_build_devicetree(); + of_populate_present_mask(); - if (tlb_type == hypervisor) + if (tlb_type == hypervisor) { sun4v_mdesc_init(); + mdesc_populate_present_mask(cpu_all_mask); + } /* Once the OF device tree and MDESC have been setup, we know * the list of possible cpus. Therefore we can allocate the diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index 06c9a7d9820..ade4eb373bd 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -19,6 +19,7 @@ #include <linux/fs.h> #include <linux/seq_file.h> #include <linux/kdebug.h> +#include <linux/log2.h> #include <asm/bitext.h> #include <asm/page.h> @@ -349,7 +350,7 @@ static void srmmu_free_nocache(unsigned long vaddr, int size) vaddr, srmmu_nocache_end); BUG(); } - if (size & (size-1)) { + if (!is_power_of_2(size)) { printk("Size 0x%x is not a power of 2\n", size); BUG(); } diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c index 434ba121e3c..3b44b47c7e1 100644 --- a/arch/um/drivers/net_kern.c +++ b/arch/um/drivers/net_kern.c @@ -360,7 +360,7 @@ static struct platform_driver uml_net_driver = { static void net_device_release(struct device *dev) { - struct uml_net *device = dev->driver_data; + struct uml_net *device = dev_get_drvdata(dev); struct net_device *netdev = device->dev; struct uml_net_private *lp = netdev_priv(netdev); @@ -440,7 +440,7 @@ static void eth_configure(int n, void *init, char *mac, device->pdev.id = n; device->pdev.name = DRIVER_NAME; device->pdev.dev.release = net_device_release; - device->pdev.dev.driver_data = device; + dev_set_drvdata(&device->pdev.dev, device); if (platform_device_register(&device->pdev)) goto out_free_netdev; SET_NETDEV_DEV(dev,&device->pdev.dev); diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index aa9e926e13d..8f05d4d9da1 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c @@ -778,7 +778,7 @@ static int ubd_open_dev(struct ubd *ubd_dev) static void ubd_device_release(struct device *dev) { - struct ubd *ubd_dev = dev->driver_data; + struct ubd *ubd_dev = dev_get_drvdata(dev); blk_cleanup_queue(ubd_dev->queue); *ubd_dev = ((struct ubd) DEFAULT_UBD); @@ -807,7 +807,7 @@ static int ubd_disk_register(int major, u64 size, int unit, ubd_devs[unit].pdev.id = unit; ubd_devs[unit].pdev.name = DRIVER_NAME; ubd_devs[unit].pdev.dev.release = ubd_device_release; - ubd_devs[unit].pdev.dev.driver_data = &ubd_devs[unit]; + dev_set_drvdata(&ubd_devs[unit].pdev.dev, &ubd_devs[unit]); platform_device_register(&ubd_devs[unit].pdev); disk->driverfs_dev = &ubd_devs[unit].pdev.dev; } diff --git a/arch/um/include/shared/init.h b/arch/um/include/shared/init.h index 37dd097c16c..b3906f860a8 100644 --- a/arch/um/include/shared/init.h +++ b/arch/um/include/shared/init.h @@ -27,7 +27,7 @@ * sign followed by value, e.g.: * * static int init_variable __initdata = 0; - * static char linux_logo[] __initdata = { 0x32, 0x36, ... }; + * static const char linux_logo[] __initconst = { 0x32, 0x36, ... }; * * Don't forget to initialize data not at file scope, i.e. within a function, * as gcc otherwise puts the data into the bss section and not into the init diff --git a/arch/um/include/shared/net_user.h b/arch/um/include/shared/net_user.h index 63bee158cd8..3dabbe128e4 100644 --- a/arch/um/include/shared/net_user.h +++ b/arch/um/include/shared/net_user.h @@ -8,7 +8,7 @@ #define ETH_ADDR_LEN (6) #define ETH_HEADER_ETHERTAP (16) -#define ETH_HEADER_OTHER (14) +#define ETH_HEADER_OTHER (26) /* 14 for ethernet + VLAN + MPLS for crazy people */ #define ETH_MAX_PACKET (1500) #define UML_NET_VERSION (4) diff --git a/arch/um/kernel/init_task.c b/arch/um/kernel/init_task.c index 806d381947b..b25121b537d 100644 --- a/arch/um/kernel/init_task.c +++ b/arch/um/kernel/init_task.c @@ -10,11 +10,8 @@ #include "linux/mqueue.h" #include "asm/uaccess.h" -struct mm_struct init_mm = INIT_MM(init_mm); static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); -EXPORT_SYMBOL(init_mm); - /* * Initial task structure. * diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c index 336b6156907..454cdb43e35 100644 --- a/arch/um/kernel/irq.c +++ b/arch/um/kernel/irq.c @@ -358,7 +358,7 @@ EXPORT_SYMBOL(um_request_irq); EXPORT_SYMBOL(reactivate_fd); /* - * hw_interrupt_type must define (startup || enable) && + * irq_chip must define (startup || enable) && * (shutdown || disable) && end */ static void dummy(unsigned int irq) @@ -366,7 +366,7 @@ static void dummy(unsigned int irq) } /* This is used for everything else than the timer. */ -static struct hw_interrupt_type normal_irq_type = { +static struct irq_chip normal_irq_type = { .typename = "SIGIO", .release = free_irq_by_irq_and_dev, .disable = dummy, @@ -375,7 +375,7 @@ static struct hw_interrupt_type normal_irq_type = { .end = dummy }; -static struct hw_interrupt_type SIGVTALRM_irq_type = { +static struct irq_chip SIGVTALRM_irq_type = { .typename = "SIGVTALRM", .release = free_irq_by_irq_and_dev, .shutdown = dummy, /* never called */ diff --git a/arch/um/sys-i386/stub.S b/arch/um/sys-i386/stub.S index c41b04bf5fa..54a36ec20cb 100644 --- a/arch/um/sys-i386/stub.S +++ b/arch/um/sys-i386/stub.S @@ -1,7 +1,7 @@ #include "as-layout.h" .globl syscall_stub -.section .__syscall_stub, "x" +.section .__syscall_stub, "ax" .globl batch_syscall_stub batch_syscall_stub: diff --git a/arch/um/sys-x86_64/asm/elf.h b/arch/um/sys-x86_64/asm/elf.h index 6e8a9195e95..04b9e87c8da 100644 --- a/arch/um/sys-x86_64/asm/elf.h +++ b/arch/um/sys-x86_64/asm/elf.h @@ -66,28 +66,28 @@ typedef struct user_i387_struct elf_fpregset_t; PT_REGS_R15(regs) = 0; \ } while (0) -#define ELF_CORE_COPY_REGS(pr_reg, regs) \ - (pr_reg)[0] = (regs)->regs.gp[0]; \ - (pr_reg)[1] = (regs)->regs.gp[1]; \ - (pr_reg)[2] = (regs)->regs.gp[2]; \ - (pr_reg)[3] = (regs)->regs.gp[3]; \ - (pr_reg)[4] = (regs)->regs.gp[4]; \ - (pr_reg)[5] = (regs)->regs.gp[5]; \ - (pr_reg)[6] = (regs)->regs.gp[6]; \ - (pr_reg)[7] = (regs)->regs.gp[7]; \ - (pr_reg)[8] = (regs)->regs.gp[8]; \ - (pr_reg)[9] = (regs)->regs.gp[9]; \ - (pr_reg)[10] = (regs)->regs.gp[10]; \ - (pr_reg)[11] = (regs)->regs.gp[11]; \ - (pr_reg)[12] = (regs)->regs.gp[12]; \ - (pr_reg)[13] = (regs)->regs.gp[13]; \ - (pr_reg)[14] = (regs)->regs.gp[14]; \ - (pr_reg)[15] = (regs)->regs.gp[15]; \ - (pr_reg)[16] = (regs)->regs.gp[16]; \ - (pr_reg)[17] = (regs)->regs.gp[17]; \ - (pr_reg)[18] = (regs)->regs.gp[18]; \ - (pr_reg)[19] = (regs)->regs.gp[19]; \ - (pr_reg)[20] = (regs)->regs.gp[20]; \ +#define ELF_CORE_COPY_REGS(pr_reg, _regs) \ + (pr_reg)[0] = (_regs)->regs.gp[0]; \ + (pr_reg)[1] = (_regs)->regs.gp[1]; \ + (pr_reg)[2] = (_regs)->regs.gp[2]; \ + (pr_reg)[3] = (_regs)->regs.gp[3]; \ + (pr_reg)[4] = (_regs)->regs.gp[4]; \ + (pr_reg)[5] = (_regs)->regs.gp[5]; \ + (pr_reg)[6] = (_regs)->regs.gp[6]; \ + (pr_reg)[7] = (_regs)->regs.gp[7]; \ + (pr_reg)[8] = (_regs)->regs.gp[8]; \ + (pr_reg)[9] = (_regs)->regs.gp[9]; \ + (pr_reg)[10] = (_regs)->regs.gp[10]; \ + (pr_reg)[11] = (_regs)->regs.gp[11]; \ + (pr_reg)[12] = (_regs)->regs.gp[12]; \ + (pr_reg)[13] = (_regs)->regs.gp[13]; \ + (pr_reg)[14] = (_regs)->regs.gp[14]; \ + (pr_reg)[15] = (_regs)->regs.gp[15]; \ + (pr_reg)[16] = (_regs)->regs.gp[16]; \ + (pr_reg)[17] = (_regs)->regs.gp[17]; \ + (pr_reg)[18] = (_regs)->regs.gp[18]; \ + (pr_reg)[19] = (_regs)->regs.gp[19]; \ + (pr_reg)[20] = (_regs)->regs.gp[20]; \ (pr_reg)[21] = current->thread.arch.fs; \ (pr_reg)[22] = 0; \ (pr_reg)[23] = 0; \ diff --git a/arch/um/sys-x86_64/stub.S b/arch/um/sys-x86_64/stub.S index 6d9edf9fabc..20e4a96a6dc 100644 --- a/arch/um/sys-x86_64/stub.S +++ b/arch/um/sys-x86_64/stub.S @@ -1,7 +1,7 @@ #include "as-layout.h" .globl syscall_stub -.section .__syscall_stub, "x" +.section .__syscall_stub, "ax" syscall_stub: syscall /* We don't have 64-bit constants, so this constructs the address diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 356d2ec8e2f..cf42fc30541 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -46,6 +46,7 @@ config X86 select HAVE_KERNEL_GZIP select HAVE_KERNEL_BZIP2 select HAVE_KERNEL_LZMA + select HAVE_ARCH_KMEMCHECK config OUTPUT_FORMAT string diff --git a/arch/x86/Makefile b/arch/x86/Makefile index edbd0ca6206..1b68659c41b 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -81,6 +81,11 @@ ifdef CONFIG_CC_STACKPROTECTOR endif endif +# Don't unroll struct assignments with kmemcheck enabled +ifeq ($(CONFIG_KMEMCHECK),y) + KBUILD_CFLAGS += $(call cc-option,-fno-builtin-memcpy) +endif + # Stackpointer is addressed different for 32 bit and 64 bit x86 sp-$(CONFIG_X86_32) := esp sp-$(CONFIG_X86_64) := rsp diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index f82fdc412c6..b93405b228b 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -6,6 +6,7 @@ * Documentation/DMA-API.txt for documentation. */ +#include <linux/kmemcheck.h> #include <linux/scatterlist.h> #include <linux/dma-debug.h> #include <linux/dma-attrs.h> @@ -60,6 +61,7 @@ dma_map_single(struct device *hwdev, void *ptr, size_t size, dma_addr_t addr; BUG_ON(!valid_dma_direction(dir)); + kmemcheck_mark_initialized(ptr, size); addr = ops->map_page(hwdev, virt_to_page(ptr), (unsigned long)ptr & ~PAGE_MASK, size, dir, NULL); @@ -87,8 +89,12 @@ dma_map_sg(struct device *hwdev, struct scatterlist *sg, { struct dma_map_ops *ops = get_dma_ops(hwdev); int ents; + struct scatterlist *s; + int i; BUG_ON(!valid_dma_direction(dir)); + for_each_sg(sg, s, nents, i) + kmemcheck_mark_initialized(sg_virt(s), s->length); ents = ops->map_sg(hwdev, sg, nents, dir, NULL); debug_dma_map_sg(hwdev, sg, nents, ents, dir); @@ -200,6 +206,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, dma_addr_t addr; BUG_ON(!valid_dma_direction(dir)); + kmemcheck_mark_initialized(page_address(page) + offset, size); addr = ops->map_page(dev, page, offset, size, dir, NULL); debug_dma_map_page(dev, page, offset, size, dir, addr, false); diff --git a/arch/x86/include/asm/kmap_types.h b/arch/x86/include/asm/kmap_types.h index 5759c165a5c..9e00a731a7f 100644 --- a/arch/x86/include/asm/kmap_types.h +++ b/arch/x86/include/asm/kmap_types.h @@ -2,28 +2,11 @@ #define _ASM_X86_KMAP_TYPES_H #if defined(CONFIG_X86_32) && defined(CONFIG_DEBUG_HIGHMEM) -# define D(n) __KM_FENCE_##n , -#else -# define D(n) +#define __WITH_KM_FENCE #endif -enum km_type { -D(0) KM_BOUNCE_READ, -D(1) KM_SKB_SUNRPC_DATA, -D(2) KM_SKB_DATA_SOFTIRQ, -D(3) KM_USER0, -D(4) KM_USER1, -D(5) KM_BIO_SRC_IRQ, -D(6) KM_BIO_DST_IRQ, -D(7) KM_PTE0, -D(8) KM_PTE1, -D(9) KM_IRQ0, -D(10) KM_IRQ1, -D(11) KM_SOFTIRQ0, -D(12) KM_SOFTIRQ1, -D(13) KM_TYPE_NR -}; +#include <asm-generic/kmap_types.h> -#undef D +#undef __WITH_KM_FENCE #endif /* _ASM_X86_KMAP_TYPES_H */ diff --git a/arch/x86/include/asm/kmemcheck.h b/arch/x86/include/asm/kmemcheck.h new file mode 100644 index 00000000000..ed01518f297 --- /dev/null +++ b/arch/x86/include/asm/kmemcheck.h @@ -0,0 +1,42 @@ +#ifndef ASM_X86_KMEMCHECK_H +#define ASM_X86_KMEMCHECK_H + +#include <linux/types.h> +#include <asm/ptrace.h> + +#ifdef CONFIG_KMEMCHECK +bool kmemcheck_active(struct pt_regs *regs); + +void kmemcheck_show(struct pt_regs *regs); +void kmemcheck_hide(struct pt_regs *regs); + +bool kmemcheck_fault(struct pt_regs *regs, + unsigned long address, unsigned long error_code); +bool kmemcheck_trap(struct pt_regs *regs); +#else +static inline bool kmemcheck_active(struct pt_regs *regs) +{ + return false; +} + +static inline void kmemcheck_show(struct pt_regs *regs) +{ +} + +static inline void kmemcheck_hide(struct pt_regs *regs) +{ +} + +static inline bool kmemcheck_fault(struct pt_regs *regs, + unsigned long address, unsigned long error_code) +{ + return false; +} + +static inline bool kmemcheck_trap(struct pt_regs *regs) +{ + return false; +} +#endif /* CONFIG_KMEMCHECK */ + +#endif diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 18ef7ebf263..3cc06e3fceb 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -317,6 +317,11 @@ static inline int pte_present(pte_t a) return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE); } +static inline int pte_hidden(pte_t pte) +{ + return pte_flags(pte) & _PAGE_HIDDEN; +} + static inline int pmd_present(pmd_t pmd) { return pmd_flags(pmd) & _PAGE_PRESENT; diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 4d258ad76a0..54cb697f490 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -18,7 +18,7 @@ #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ #define _PAGE_BIT_UNUSED1 9 /* available for programmer */ #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */ -#define _PAGE_BIT_UNUSED3 11 +#define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */ #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ #define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1 #define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1 @@ -41,13 +41,18 @@ #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL) #define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1) #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP) -#define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3) #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT) #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE) #define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL) #define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST) #define __HAVE_ARCH_PTE_SPECIAL +#ifdef CONFIG_KMEMCHECK +#define _PAGE_HIDDEN (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN) +#else +#define _PAGE_HIDDEN (_AT(pteval_t, 0)) +#endif + #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) #else diff --git a/arch/x86/include/asm/string_32.h b/arch/x86/include/asm/string_32.h index 0e0e3ba827f..c86f452256d 100644 --- a/arch/x86/include/asm/string_32.h +++ b/arch/x86/include/asm/string_32.h @@ -177,10 +177,18 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len) * No 3D Now! */ +#ifndef CONFIG_KMEMCHECK #define memcpy(t, f, n) \ (__builtin_constant_p((n)) \ ? __constant_memcpy((t), (f), (n)) \ : __memcpy((t), (f), (n))) +#else +/* + * kmemcheck becomes very happy if we use the REP instructions unconditionally, + * because it means that we know both memory operands in advance. + */ +#define memcpy(t, f, n) __memcpy((t), (f), (n)) +#endif #endif diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h index 2afe164bf1e..19e2c468fc2 100644 --- a/arch/x86/include/asm/string_64.h +++ b/arch/x86/include/asm/string_64.h @@ -27,6 +27,7 @@ static __always_inline void *__inline_memcpy(void *to, const void *from, size_t function. */ #define __HAVE_ARCH_MEMCPY 1 +#ifndef CONFIG_KMEMCHECK #if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4 extern void *memcpy(void *to, const void *from, size_t len); #else @@ -42,6 +43,13 @@ extern void *__memcpy(void *to, const void *from, size_t len); __ret; \ }) #endif +#else +/* + * kmemcheck becomes very happy if we use the REP instructions unconditionally, + * because it means that we know both memory operands in advance. + */ +#define memcpy(dst, src, len) __inline_memcpy((dst), (src), (len)) +#endif #define __HAVE_ARCH_MEMSET void *memset(void *s, int c, size_t n); diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 602c769fc98..b0783520988 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -154,9 +154,9 @@ struct thread_info { /* thread information allocation */ #ifdef CONFIG_DEBUG_STACK_USAGE -#define THREAD_FLAGS (GFP_KERNEL | __GFP_ZERO) +#define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) #else -#define THREAD_FLAGS GFP_KERNEL +#define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK) #endif #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR diff --git a/arch/x86/include/asm/timex.h b/arch/x86/include/asm/timex.h index b5c9d45c981..1375cfc9396 100644 --- a/arch/x86/include/asm/timex.h +++ b/arch/x86/include/asm/timex.h @@ -4,9 +4,7 @@ #include <asm/processor.h> #include <asm/tsc.h> -/* The PIT ticks at this frequency (in HZ): */ -#define PIT_TICK_RATE 1193182 - +/* Assume we use the PIT time source for the clock tick */ #define CLOCK_TICK_RATE PIT_TICK_RATE #define ARCH_HAS_READ_CURRENT_TIMER diff --git a/arch/x86/include/asm/xor.h b/arch/x86/include/asm/xor.h index 11b3bb86e17..7fcf6f3dbcc 100644 --- a/arch/x86/include/asm/xor.h +++ b/arch/x86/include/asm/xor.h @@ -1,5 +1,10 @@ +#ifdef CONFIG_KMEMCHECK +/* kmemcheck doesn't handle MMX/SSE/SSE2 instructions */ +# include <asm-generic/xor.h> +#else #ifdef CONFIG_X86_32 # include "xor_32.h" #else # include "xor_64.h" #endif +#endif diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index ef0ae207a7c..096d19aea2f 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -463,7 +463,7 @@ static void uv_heartbeat(unsigned long ignored) uv_set_scir_bits(bits); /* enable next timer period */ - mod_timer(timer, jiffies + SCIR_CPU_HB_INTERVAL); + mod_timer_pinned(timer, jiffies + SCIR_CPU_HB_INTERVAL); } static void __cpuinit uv_heartbeat_enable(int cpu) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 3ffdcfa9abd..9fa33886c0d 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -487,7 +487,6 @@ out: static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) { char *v = c->x86_vendor_id; - static int printed; int i; for (i = 0; i < X86_VENDOR_NUM; i++) { @@ -504,13 +503,9 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) } } - if (!printed) { - printed++; - printk(KERN_ERR - "CPU: vendor_id '%s' unknown, using generic init.\n", v); - - printk(KERN_ERR "CPU: Your system may be unstable.\n"); - } + printk_once(KERN_ERR + "CPU: vendor_id '%s' unknown, using generic init.\n" \ + "CPU: Your system may be unstable.\n", v); c->x86_vendor = X86_VENDOR_UNKNOWN; this_cpu = &default_cpu; diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index cf52215d9eb..81cbe64ed6b 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c @@ -1,3 +1,4 @@ + /* * (c) 2003-2006 Advanced Micro Devices, Inc. * Your use of this code is subject to the terms and conditions of the @@ -117,20 +118,17 @@ static int query_current_values_with_pending_wait(struct powernow_k8_data *data) u32 i = 0; if (cpu_family == CPU_HW_PSTATE) { - if (data->currpstate == HW_PSTATE_INVALID) { - /* read (initial) hw pstate if not yet set */ - rdmsr(MSR_PSTATE_STATUS, lo, hi); - i = lo & HW_PSTATE_MASK; - - /* - * a workaround for family 11h erratum 311 might cause - * an "out-of-range Pstate if the core is in Pstate-0 - */ - if (i >= data->numps) - data->currpstate = HW_PSTATE_0; - else - data->currpstate = i; - } + rdmsr(MSR_PSTATE_STATUS, lo, hi); + i = lo & HW_PSTATE_MASK; + data->currpstate = i; + + /* + * a workaround for family 11h erratum 311 might cause + * an "out-of-range Pstate if the core is in Pstate-0 + */ + if ((boot_cpu_data.x86 == 0x11) && (i >= data->numps)) + data->currpstate = HW_PSTATE_0; + return 0; } do { @@ -510,41 +508,34 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, return 0; } -static int check_supported_cpu(unsigned int cpu) +static void check_supported_cpu(void *_rc) { - cpumask_t oldmask; u32 eax, ebx, ecx, edx; - unsigned int rc = 0; - - oldmask = current->cpus_allowed; - set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); + int *rc = _rc; - if (smp_processor_id() != cpu) { - printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu); - goto out; - } + *rc = -ENODEV; if (current_cpu_data.x86_vendor != X86_VENDOR_AMD) - goto out; + return; eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); if (((eax & CPUID_XFAM) != CPUID_XFAM_K8) && ((eax & CPUID_XFAM) < CPUID_XFAM_10H)) - goto out; + return; if ((eax & CPUID_XFAM) == CPUID_XFAM_K8) { if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) || ((eax & CPUID_XMOD) > CPUID_XMOD_REV_MASK)) { printk(KERN_INFO PFX "Processor cpuid %x not supported\n", eax); - goto out; + return; } eax = cpuid_eax(CPUID_GET_MAX_CAPABILITIES); if (eax < CPUID_FREQ_VOLT_CAPABILITIES) { printk(KERN_INFO PFX "No frequency change capabilities detected\n"); - goto out; + return; } cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx); @@ -552,21 +543,17 @@ static int check_supported_cpu(unsigned int cpu) != P_STATE_TRANSITION_CAPABLE) { printk(KERN_INFO PFX "Power state transitions not supported\n"); - goto out; + return; } } else { /* must be a HW Pstate capable processor */ cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx); if ((edx & USE_HW_PSTATE) == USE_HW_PSTATE) cpu_family = CPU_HW_PSTATE; else - goto out; + return; } - rc = 1; - -out: - set_cpus_allowed_ptr(current, &oldmask); - return rc; + *rc = 0; } static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, @@ -823,13 +810,14 @@ static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE)) return; - control = data->acpi_data.states[index].control; data->irt = (control - >> IRT_SHIFT) & IRT_MASK; data->rvo = (control >> - RVO_SHIFT) & RVO_MASK; data->exttype = (control - >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK; - data->plllock = (control >> PLL_L_SHIFT) & PLL_L_MASK; data->vidmvs = 1 - << ((control >> MVS_SHIFT) & MVS_MASK); data->vstable = - (control >> VST_SHIFT) & VST_MASK; } + control = data->acpi_data.states[index].control; + data->irt = (control >> IRT_SHIFT) & IRT_MASK; + data->rvo = (control >> RVO_SHIFT) & RVO_MASK; + data->exttype = (control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK; + data->plllock = (control >> PLL_L_SHIFT) & PLL_L_MASK; + data->vidmvs = 1 << ((control >> MVS_SHIFT) & MVS_MASK); + data->vstable = (control >> VST_SHIFT) & VST_MASK; +} static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { @@ -1046,6 +1034,19 @@ static int get_transition_latency(struct powernow_k8_data *data) if (cur_latency > max_latency) max_latency = cur_latency; } + if (max_latency == 0) { + /* + * Fam 11h always returns 0 as transition latency. + * This is intended and means "very fast". While cpufreq core + * and governors currently can handle that gracefully, better + * set it to 1 to avoid problems in the future. + * For all others it's a BIOS bug. + */ + if (!boot_cpu_data.x86 == 0x11) + printk(KERN_ERR FW_WARN PFX "Invalid zero transition " + "latency\n"); + max_latency = 1; + } /* value in usecs, needs to be in nanoseconds */ return 1000 * max_latency; } @@ -1093,7 +1094,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, freqs.old = find_khz_freq_from_fid(data->currfid); freqs.new = find_khz_freq_from_fid(fid); - for_each_cpu_mask_nr(i, *(data->available_cores)) { + for_each_cpu(i, data->available_cores) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); } @@ -1101,7 +1102,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, res = transition_fid_vid(data, fid, vid); freqs.new = find_khz_freq_from_fid(data->currfid); - for_each_cpu_mask_nr(i, *(data->available_cores)) { + for_each_cpu(i, data->available_cores) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); } @@ -1126,7 +1127,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, data->currpstate); freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); - for_each_cpu_mask_nr(i, *(data->available_cores)) { + for_each_cpu(i, data->available_cores) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); } @@ -1134,7 +1135,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, res = transition_pstate(data, pstate); freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); - for_each_cpu_mask_nr(i, *(data->available_cores)) { + for_each_cpu(i, data->available_cores) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); } @@ -1235,21 +1236,47 @@ static int powernowk8_verify(struct cpufreq_policy *pol) return cpufreq_frequency_table_verify(pol, data->powernow_table); } -static const char ACPI_PSS_BIOS_BUG_MSG[] = - KERN_ERR FW_BUG PFX "No compatible ACPI _PSS objects found.\n" - KERN_ERR FW_BUG PFX "Try again with latest BIOS.\n"; +struct init_on_cpu { + struct powernow_k8_data *data; + int rc; +}; + +static void __cpuinit powernowk8_cpu_init_on_cpu(void *_init_on_cpu) +{ + struct init_on_cpu *init_on_cpu = _init_on_cpu; + + if (pending_bit_stuck()) { + printk(KERN_ERR PFX "failing init, change pending bit set\n"); + init_on_cpu->rc = -ENODEV; + return; + } + + if (query_current_values_with_pending_wait(init_on_cpu->data)) { + init_on_cpu->rc = -ENODEV; + return; + } + + if (cpu_family == CPU_OPTERON) + fidvid_msr_init(); + + init_on_cpu->rc = 0; +} /* per CPU init entry point to the driver */ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) { + static const char ACPI_PSS_BIOS_BUG_MSG[] = + KERN_ERR FW_BUG PFX "No compatible ACPI _PSS objects found.\n" + KERN_ERR FW_BUG PFX "Try again with latest BIOS.\n"; struct powernow_k8_data *data; - cpumask_t oldmask; + struct init_on_cpu init_on_cpu; int rc; if (!cpu_online(pol->cpu)) return -ENODEV; - if (!check_supported_cpu(pol->cpu)) + smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1); + if (rc) return -ENODEV; data = kzalloc(sizeof(struct powernow_k8_data), GFP_KERNEL); @@ -1289,27 +1316,12 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) pol->cpuinfo.transition_latency = get_transition_latency(data); /* only run on specific CPU from here on */ - oldmask = current->cpus_allowed; - set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu)); - - if (smp_processor_id() != pol->cpu) { - printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); - goto err_out_unmask; - } - - if (pending_bit_stuck()) { - printk(KERN_ERR PFX "failing init, change pending bit set\n"); - goto err_out_unmask; - } - - if (query_current_values_with_pending_wait(data)) - goto err_out_unmask; - - if (cpu_family == CPU_OPTERON) - fidvid_msr_init(); - - /* run on any CPU again */ - set_cpus_allowed_ptr(current, &oldmask); + init_on_cpu.data = data; + smp_call_function_single(data->cpu, powernowk8_cpu_init_on_cpu, + &init_on_cpu, 1); + rc = init_on_cpu.rc; + if (rc != 0) + goto err_out_exit_acpi; if (cpu_family == CPU_HW_PSTATE) cpumask_copy(pol->cpus, cpumask_of(pol->cpu)); @@ -1346,8 +1358,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) return 0; -err_out_unmask: - set_cpus_allowed_ptr(current, &oldmask); +err_out_exit_acpi: powernow_k8_cpu_exit_acpi(data); err_out: @@ -1372,28 +1383,25 @@ static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol) return 0; } +static void query_values_on_cpu(void *_err) +{ + int *err = _err; + struct powernow_k8_data *data = __get_cpu_var(powernow_data); + + *err = query_current_values_with_pending_wait(data); +} + static unsigned int powernowk8_get(unsigned int cpu) { - struct powernow_k8_data *data; - cpumask_t oldmask = current->cpus_allowed; + struct powernow_k8_data *data = per_cpu(powernow_data, cpu); unsigned int khz = 0; - unsigned int first; - - first = cpumask_first(cpu_core_mask(cpu)); - data = per_cpu(powernow_data, first); + int err; if (!data) return -EINVAL; - set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); - if (smp_processor_id() != cpu) { - printk(KERN_ERR PFX - "limiting to CPU %d failed in powernowk8_get\n", cpu); - set_cpus_allowed_ptr(current, &oldmask); - return 0; - } - - if (query_current_values_with_pending_wait(data)) + smp_call_function_single(cpu, query_values_on_cpu, &err, true); + if (err) goto out; if (cpu_family == CPU_HW_PSTATE) @@ -1404,7 +1412,6 @@ static unsigned int powernowk8_get(unsigned int cpu) out: - set_cpus_allowed_ptr(current, &oldmask); return khz; } @@ -1430,7 +1437,9 @@ static int __cpuinit powernowk8_init(void) unsigned int i, supported_cpus = 0; for_each_online_cpu(i) { - if (check_supported_cpu(i)) + int rc; + smp_call_function_single(i, check_supported_cpu, &rc, 1); + if (rc == 0) supported_cpus++; } diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h index 6c6698feade..c9c1190b5e1 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h @@ -223,14 +223,3 @@ static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table); static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table); - -#ifdef CONFIG_SMP -static inline void define_siblings(int cpu, cpumask_t cpu_sharedcore_mask[]) -{ -} -#else -static inline void define_siblings(int cpu, cpumask_t cpu_sharedcore_mask[]) -{ - cpu_set(0, cpu_sharedcore_mask[0]); -} -#endif diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c index 55c831ed71c..8d672ef162c 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c @@ -323,14 +323,8 @@ static unsigned int get_cur_freq(unsigned int cpu) { unsigned l, h; unsigned clock_freq; - cpumask_t saved_mask; - saved_mask = current->cpus_allowed; - set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); - if (smp_processor_id() != cpu) - return 0; - - rdmsr(MSR_IA32_PERF_STATUS, l, h); + rdmsr_on_cpu(cpu, MSR_IA32_PERF_STATUS, &l, &h); clock_freq = extract_clock(l, cpu, 0); if (unlikely(clock_freq == 0)) { @@ -340,11 +334,9 @@ static unsigned int get_cur_freq(unsigned int cpu) * P-state transition (like TM2). Get the last freq set * in PERF_CTL. */ - rdmsr(MSR_IA32_PERF_CTL, l, h); + rdmsr_on_cpu(cpu, MSR_IA32_PERF_CTL, &l, &h); clock_freq = extract_clock(l, cpu, 1); } - - set_cpus_allowed_ptr(current, &saved_mask); return clock_freq; } @@ -467,15 +459,10 @@ static int centrino_target (struct cpufreq_policy *policy, struct cpufreq_freqs freqs; int retval = 0; unsigned int j, k, first_cpu, tmp; - cpumask_var_t saved_mask, covered_cpus; + cpumask_var_t covered_cpus; - if (unlikely(!alloc_cpumask_var(&saved_mask, GFP_KERNEL))) - return -ENOMEM; - if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))) { - free_cpumask_var(saved_mask); + if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))) return -ENOMEM; - } - cpumask_copy(saved_mask, ¤t->cpus_allowed); if (unlikely(per_cpu(centrino_model, cpu) == NULL)) { retval = -ENODEV; @@ -493,7 +480,7 @@ static int centrino_target (struct cpufreq_policy *policy, first_cpu = 1; for_each_cpu(j, policy->cpus) { - const struct cpumask *mask; + int good_cpu; /* cpufreq holds the hotplug lock, so we are safe here */ if (!cpu_online(j)) @@ -504,32 +491,30 @@ static int centrino_target (struct cpufreq_policy *policy, * Make sure we are running on CPU that wants to change freq */ if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) - mask = policy->cpus; + good_cpu = cpumask_any_and(policy->cpus, + cpu_online_mask); else - mask = cpumask_of(j); + good_cpu = j; - set_cpus_allowed_ptr(current, mask); - preempt_disable(); - if (unlikely(!cpu_isset(smp_processor_id(), *mask))) { + if (good_cpu >= nr_cpu_ids) { dprintk("couldn't limit to CPUs in this domain\n"); retval = -EAGAIN; if (first_cpu) { /* We haven't started the transition yet. */ - goto migrate_end; + goto out; } - preempt_enable(); break; } msr = per_cpu(centrino_model, cpu)->op_points[newstate].index; if (first_cpu) { - rdmsr(MSR_IA32_PERF_CTL, oldmsr, h); + rdmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, &oldmsr, &h); if (msr == (oldmsr & 0xffff)) { dprintk("no change needed - msr was and needs " "to be %x\n", oldmsr); retval = 0; - goto migrate_end; + goto out; } freqs.old = extract_clock(oldmsr, cpu, 0); @@ -553,14 +538,11 @@ static int centrino_target (struct cpufreq_policy *policy, oldmsr |= msr; } - wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); - if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { - preempt_enable(); + wrmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, oldmsr, h); + if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) break; - } - cpu_set(j, *covered_cpus); - preempt_enable(); + cpumask_set_cpu(j, covered_cpus); } for_each_cpu(k, policy->cpus) { @@ -578,10 +560,8 @@ static int centrino_target (struct cpufreq_policy *policy, * Best effort undo.. */ - for_each_cpu_mask_nr(j, *covered_cpus) { - set_cpus_allowed_ptr(current, &cpumask_of_cpu(j)); - wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); - } + for_each_cpu(j, covered_cpus) + wrmsr_on_cpu(j, MSR_IA32_PERF_CTL, oldmsr, h); tmp = freqs.new; freqs.new = freqs.old; @@ -593,15 +573,9 @@ static int centrino_target (struct cpufreq_policy *policy, cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); } } - set_cpus_allowed_ptr(current, saved_mask); retval = 0; - goto out; -migrate_end: - preempt_enable(); - set_cpus_allowed_ptr(current, saved_mask); out: - free_cpumask_var(saved_mask); free_cpumask_var(covered_cpus); return retval; } diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c index 016c1a4fa3f..6911e91fb4f 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c @@ -89,7 +89,8 @@ static int speedstep_find_register(void) * speedstep_set_state - set the SpeedStep state * @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH) * - * Tries to change the SpeedStep state. + * Tries to change the SpeedStep state. Can be called from + * smp_call_function_single. */ static void speedstep_set_state(unsigned int state) { @@ -143,6 +144,11 @@ static void speedstep_set_state(unsigned int state) return; } +/* Wrapper for smp_call_function_single. */ +static void _speedstep_set_state(void *_state) +{ + speedstep_set_state(*(unsigned int *)_state); +} /** * speedstep_activate - activate SpeedStep control in the chipset @@ -226,22 +232,28 @@ static unsigned int speedstep_detect_chipset(void) return 0; } -static unsigned int _speedstep_get(const struct cpumask *cpus) -{ +struct get_freq_data { unsigned int speed; - cpumask_t cpus_allowed; - - cpus_allowed = current->cpus_allowed; - set_cpus_allowed_ptr(current, cpus); - speed = speedstep_get_frequency(speedstep_processor); - set_cpus_allowed_ptr(current, &cpus_allowed); - dprintk("detected %u kHz as current frequency\n", speed); - return speed; + unsigned int processor; +}; + +static void get_freq_data(void *_data) +{ + struct get_freq_data *data = _data; + + data->speed = speedstep_get_frequency(data->processor); } static unsigned int speedstep_get(unsigned int cpu) { - return _speedstep_get(cpumask_of(cpu)); + struct get_freq_data data = { .processor = cpu }; + + /* You're supposed to ensure CPU is online. */ + if (smp_call_function_single(cpu, get_freq_data, &data, 1) != 0) + BUG(); + + dprintk("detected %u kHz as current frequency\n", data.speed); + return data.speed; } /** @@ -257,16 +269,16 @@ static int speedstep_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { - unsigned int newstate = 0; + unsigned int newstate = 0, policy_cpu; struct cpufreq_freqs freqs; - cpumask_t cpus_allowed; int i; if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate)) return -EINVAL; - freqs.old = _speedstep_get(policy->cpus); + policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask); + freqs.old = speedstep_get(policy_cpu); freqs.new = speedstep_freqs[newstate].frequency; freqs.cpu = policy->cpu; @@ -276,20 +288,13 @@ static int speedstep_target(struct cpufreq_policy *policy, if (freqs.old == freqs.new) return 0; - cpus_allowed = current->cpus_allowed; - for_each_cpu(i, policy->cpus) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); } - /* switch to physical CPU where state is to be changed */ - set_cpus_allowed_ptr(current, policy->cpus); - - speedstep_set_state(newstate); - - /* allow to be run on all CPUs */ - set_cpus_allowed_ptr(current, &cpus_allowed); + smp_call_function_single(policy_cpu, _speedstep_set_state, &newstate, + true); for_each_cpu(i, policy->cpus) { freqs.cpu = i; @@ -312,33 +317,43 @@ static int speedstep_verify(struct cpufreq_policy *policy) return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]); } +struct get_freqs { + struct cpufreq_policy *policy; + int ret; +}; + +static void get_freqs_on_cpu(void *_get_freqs) +{ + struct get_freqs *get_freqs = _get_freqs; + + get_freqs->ret = + speedstep_get_freqs(speedstep_processor, + &speedstep_freqs[SPEEDSTEP_LOW].frequency, + &speedstep_freqs[SPEEDSTEP_HIGH].frequency, + &get_freqs->policy->cpuinfo.transition_latency, + &speedstep_set_state); +} static int speedstep_cpu_init(struct cpufreq_policy *policy) { - int result = 0; - unsigned int speed; - cpumask_t cpus_allowed; + int result; + unsigned int policy_cpu, speed; + struct get_freqs gf; /* only run on CPU to be set, or on its sibling */ #ifdef CONFIG_SMP cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); #endif - - cpus_allowed = current->cpus_allowed; - set_cpus_allowed_ptr(current, policy->cpus); + policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask); /* detect low and high frequency and transition latency */ - result = speedstep_get_freqs(speedstep_processor, - &speedstep_freqs[SPEEDSTEP_LOW].frequency, - &speedstep_freqs[SPEEDSTEP_HIGH].frequency, - &policy->cpuinfo.transition_latency, - &speedstep_set_state); - set_cpus_allowed_ptr(current, &cpus_allowed); - if (result) - return result; + gf.policy = policy; + smp_call_function_single(policy_cpu, get_freqs_on_cpu, &gf, 1); + if (gf.ret) + return gf.ret; /* get current speed setting */ - speed = _speedstep_get(policy->cpus); + speed = speedstep_get(policy_cpu); if (!speed) return -EIO; diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c index 2e3c6862657..f4c290b8482 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c @@ -226,6 +226,7 @@ static unsigned int pentium4_get_frequency(void) } +/* Warning: may get called from smp_call_function_single. */ unsigned int speedstep_get_frequency(unsigned int processor) { switch (processor) { diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index daed39ba261..3260ab04499 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -86,6 +86,29 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) */ if (c->x86 == 6 && c->x86_model < 15) clear_cpu_cap(c, X86_FEATURE_PAT); + +#ifdef CONFIG_KMEMCHECK + /* + * P4s have a "fast strings" feature which causes single- + * stepping REP instructions to only generate a #DB on + * cache-line boundaries. + * + * Ingo Molnar reported a Pentium D (model 6) and a Xeon + * (model 2) with the same problem. + */ + if (c->x86 == 15) { + u64 misc_enable; + + rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); + + if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) { + printk(KERN_INFO "kmemcheck: Disabling fast string operations\n"); + + misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING; + wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); + } + } +#endif } #ifdef CONFIG_X86_32 diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c index 2ac1f0c2beb..b07af886124 100644 --- a/arch/x86/kernel/cpuid.c +++ b/arch/x86/kernel/cpuid.c @@ -182,6 +182,11 @@ static struct notifier_block __refdata cpuid_class_cpu_notifier = .notifier_call = cpuid_class_cpu_callback, }; +static char *cpuid_nodename(struct device *dev) +{ + return kasprintf(GFP_KERNEL, "cpu/%u/cpuid", MINOR(dev->devt)); +} + static int __init cpuid_init(void) { int i, err = 0; @@ -198,6 +203,7 @@ static int __init cpuid_init(void) err = PTR_ERR(cpuid_class); goto out_chrdev; } + cpuid_class->nodename = cpuid_nodename; for_each_online_cpu(i) { err = cpuid_device_create(i); if (err != 0) diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c index c2e0bb0890d..5cf36c053ac 100644 --- a/arch/x86/kernel/i8253.c +++ b/arch/x86/kernel/i8253.c @@ -7,6 +7,7 @@ #include <linux/spinlock.h> #include <linux/jiffies.h> #include <linux/module.h> +#include <linux/timex.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/io.h> diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c index df3bf269bea..270ff83efc1 100644 --- a/arch/x86/kernel/init_task.c +++ b/arch/x86/kernel/init_task.c @@ -12,7 +12,6 @@ static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); -struct mm_struct init_mm = INIT_MM(init_mm); /* * Initial thread structure. diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c index 9c4461501fc..9371448290a 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/microcode_core.c @@ -236,6 +236,7 @@ static const struct file_operations microcode_fops = { static struct miscdevice microcode_dev = { .minor = MICROCODE_MINOR, .name = "microcode", + .devnode = "cpu/microcode", .fops = µcode_fops, }; diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index 3cf3413ec62..98fd6cd4e3a 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c @@ -196,6 +196,11 @@ static struct notifier_block __refdata msr_class_cpu_notifier = { .notifier_call = msr_class_cpu_callback, }; +static char *msr_nodename(struct device *dev) +{ + return kasprintf(GFP_KERNEL, "cpu/%u/msr", MINOR(dev->devt)); +} + static int __init msr_init(void) { int i, err = 0; @@ -212,6 +217,7 @@ static int __init msr_init(void) err = PTR_ERR(msr_class); goto out_chrdev; } + msr_class->nodename = msr_nodename; for_each_online_cpu(i) { err = msr_device_create(i); if (err != 0) diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 3bb2be1649b..994dd6a4a2a 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -63,7 +63,7 @@ void arch_task_cache_init(void) task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size, __alignof__(union thread_xstate), - SLAB_PANIC, NULL); + SLAB_PANIC | SLAB_NOTRACK, NULL); } /* diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c index 4aaf7e48394..c3eb207181f 100644 --- a/arch/x86/kernel/stacktrace.c +++ b/arch/x86/kernel/stacktrace.c @@ -77,6 +77,13 @@ void save_stack_trace(struct stack_trace *trace) } EXPORT_SYMBOL_GPL(save_stack_trace); +void save_stack_trace_bp(struct stack_trace *trace, unsigned long bp) +{ + dump_trace(current, NULL, NULL, bp, &save_stack_ops, trace); + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = ULONG_MAX; +} + void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) { dump_trace(tsk, NULL, NULL, 0, &save_stack_ops_nosched, trace); diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 1e1e27b7d43..5f935f0d586 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -45,6 +45,7 @@ #include <linux/edac.h> #endif +#include <asm/kmemcheck.h> #include <asm/stacktrace.h> #include <asm/processor.h> #include <asm/debugreg.h> @@ -534,6 +535,10 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) get_debugreg(condition, 6); + /* Catch kmemcheck conditions first of all! */ + if (condition & DR_STEP && kmemcheck_trap(regs)) + return; + /* * The processor cleared BTF, so don't mark that we need it set. */ diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 3e1c057e98f..b0597ad02c9 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -9,6 +9,7 @@ #include <linux/delay.h> #include <linux/clocksource.h> #include <linux/percpu.h> +#include <linux/timex.h> #include <asm/hpet.h> #include <asm/timer.h> @@ -631,17 +632,15 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) { struct cpufreq_freqs *freq = data; - unsigned long *lpj, dummy; + unsigned long *lpj; if (cpu_has(&cpu_data(freq->cpu), X86_FEATURE_CONSTANT_TSC)) return 0; - lpj = &dummy; - if (!(freq->flags & CPUFREQ_CONST_LOOPS)) + lpj = &boot_cpu_data.loops_per_jiffy; #ifdef CONFIG_SMP + if (!(freq->flags & CPUFREQ_CONST_LOOPS)) lpj = &cpu_data(freq->cpu).loops_per_jiffy; -#else - lpj = &boot_cpu_data.loops_per_jiffy; #endif if (!ref_freq) { diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 32d6ae8fb60..e770bf349ec 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1277,7 +1277,7 @@ static struct vmcs *alloc_vmcs_cpu(int cpu) struct page *pages; struct vmcs *vmcs; - pages = alloc_pages_node(node, GFP_KERNEL, vmcs_config.order); + pages = alloc_pages_exact_node(node, GFP_KERNEL, vmcs_config.order); if (!pages) return NULL; vmcs = page_address(pages); diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index fdd30d08ab5..eefdeee8a87 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -10,6 +10,8 @@ obj-$(CONFIG_X86_PTDUMP) += dump_pagetables.o obj-$(CONFIG_HIGHMEM) += highmem_32.o +obj-$(CONFIG_KMEMCHECK) += kmemcheck/ + obj-$(CONFIG_MMIOTRACE) += mmiotrace.o mmiotrace-y := kmmio.o pf_in.o mmio-mod.o obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index c6acc632637..baa0e86adfb 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -14,6 +14,7 @@ #include <asm/traps.h> /* dotraplinkage, ... */ #include <asm/pgalloc.h> /* pgd_*(), ... */ +#include <asm/kmemcheck.h> /* kmemcheck_*(), ... */ /* * Page fault error code bits: @@ -956,6 +957,13 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code) /* Get the faulting address: */ address = read_cr2(); + /* + * Detect and handle instructions that would cause a page fault for + * both a tracked kernel page and a userspace page. + */ + if (kmemcheck_active(regs)) + kmemcheck_hide(regs); + if (unlikely(kmmio_fault(regs, address))) return; @@ -973,9 +981,13 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code) * protection error (error_code & 9) == 0. */ if (unlikely(fault_in_kernel_space(address))) { - if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) && - vmalloc_fault(address) >= 0) - return; + if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) { + if (vmalloc_fault(address) >= 0) + return; + + if (kmemcheck_fault(regs, address, error_code)) + return; + } /* Can handle a stale RO->RW TLB: */ if (spurious_fault(error_code, address)) diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 34c1bfb64f1..f53b57e4086 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -213,7 +213,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, if (!after_bootmem) init_gbpages(); -#ifdef CONFIG_DEBUG_PAGEALLOC +#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK) /* * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages. * This will simplify cpa(), which otherwise needs to support splitting diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 9ff3c0816d1..3cd7711bb94 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -111,7 +111,7 @@ static pte_t * __init one_page_table_init(pmd_t *pmd) pte_t *page_table = NULL; if (after_bootmem) { -#ifdef CONFIG_DEBUG_PAGEALLOC +#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK) page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE); #endif if (!page_table) diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 52bb9519bb8..9c543290a81 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -104,7 +104,7 @@ static __ref void *spp_getpage(void) void *ptr; if (after_bootmem) - ptr = (void *) get_zeroed_page(GFP_ATOMIC); + ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK); else ptr = alloc_bootmem_pages(PAGE_SIZE); @@ -281,7 +281,7 @@ static __ref void *alloc_low_page(unsigned long *phys) void *adr; if (after_bootmem) { - adr = (void *)get_zeroed_page(GFP_ATOMIC); + adr = (void *)get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK); *phys = __pa(adr); return adr; diff --git a/arch/x86/mm/kmemcheck/Makefile b/arch/x86/mm/kmemcheck/Makefile new file mode 100644 index 00000000000..520b3bce409 --- /dev/null +++ b/arch/x86/mm/kmemcheck/Makefile @@ -0,0 +1 @@ +obj-y := error.o kmemcheck.o opcode.o pte.o selftest.o shadow.o diff --git a/arch/x86/mm/kmemcheck/error.c b/arch/x86/mm/kmemcheck/error.c new file mode 100644 index 00000000000..4901d0dafda --- /dev/null +++ b/arch/x86/mm/kmemcheck/error.c @@ -0,0 +1,228 @@ +#include <linux/interrupt.h> +#include <linux/kdebug.h> +#include <linux/kmemcheck.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/ptrace.h> +#include <linux/stacktrace.h> +#include <linux/string.h> + +#include "error.h" +#include "shadow.h" + +enum kmemcheck_error_type { + KMEMCHECK_ERROR_INVALID_ACCESS, + KMEMCHECK_ERROR_BUG, +}; + +#define SHADOW_COPY_SIZE (1 << CONFIG_KMEMCHECK_SHADOW_COPY_SHIFT) + +struct kmemcheck_error { + enum kmemcheck_error_type type; + + union { + /* KMEMCHECK_ERROR_INVALID_ACCESS */ + struct { + /* Kind of access that caused the error */ + enum kmemcheck_shadow state; + /* Address and size of the erroneous read */ + unsigned long address; + unsigned int size; + }; + }; + + struct pt_regs regs; + struct stack_trace trace; + unsigned long trace_entries[32]; + + /* We compress it to a char. */ + unsigned char shadow_copy[SHADOW_COPY_SIZE]; + unsigned char memory_copy[SHADOW_COPY_SIZE]; +}; + +/* + * Create a ring queue of errors to output. We can't call printk() directly + * from the kmemcheck traps, since this may call the console drivers and + * result in a recursive fault. + */ +static struct kmemcheck_error error_fifo[CONFIG_KMEMCHECK_QUEUE_SIZE]; +static unsigned int error_count; +static unsigned int error_rd; +static unsigned int error_wr; +static unsigned int error_missed_count; + +static struct kmemcheck_error *error_next_wr(void) +{ + struct kmemcheck_error *e; + + if (error_count == ARRAY_SIZE(error_fifo)) { + ++error_missed_count; + return NULL; + } + + e = &error_fifo[error_wr]; + if (++error_wr == ARRAY_SIZE(error_fifo)) + error_wr = 0; + ++error_count; + return e; +} + +static struct kmemcheck_error *error_next_rd(void) +{ + struct kmemcheck_error *e; + + if (error_count == 0) + return NULL; + + e = &error_fifo[error_rd]; + if (++error_rd == ARRAY_SIZE(error_fifo)) + error_rd = 0; + --error_count; + return e; +} + +void kmemcheck_error_recall(void) +{ + static const char *desc[] = { + [KMEMCHECK_SHADOW_UNALLOCATED] = "unallocated", + [KMEMCHECK_SHADOW_UNINITIALIZED] = "uninitialized", + [KMEMCHECK_SHADOW_INITIALIZED] = "initialized", + [KMEMCHECK_SHADOW_FREED] = "freed", + }; + + static const char short_desc[] = { + [KMEMCHECK_SHADOW_UNALLOCATED] = 'a', + [KMEMCHECK_SHADOW_UNINITIALIZED] = 'u', + [KMEMCHECK_SHADOW_INITIALIZED] = 'i', + [KMEMCHECK_SHADOW_FREED] = 'f', + }; + + struct kmemcheck_error *e; + unsigned int i; + + e = error_next_rd(); + if (!e) + return; + + switch (e->type) { + case KMEMCHECK_ERROR_INVALID_ACCESS: + printk(KERN_ERR "WARNING: kmemcheck: Caught %d-bit read " + "from %s memory (%p)\n", + 8 * e->size, e->state < ARRAY_SIZE(desc) ? + desc[e->state] : "(invalid shadow state)", + (void *) e->address); + + printk(KERN_INFO); + for (i = 0; i < SHADOW_COPY_SIZE; ++i) + printk("%02x", e->memory_copy[i]); + printk("\n"); + + printk(KERN_INFO); + for (i = 0; i < SHADOW_COPY_SIZE; ++i) { + if (e->shadow_copy[i] < ARRAY_SIZE(short_desc)) + printk(" %c", short_desc[e->shadow_copy[i]]); + else + printk(" ?"); + } + printk("\n"); + printk(KERN_INFO "%*c\n", 2 + 2 + * (int) (e->address & (SHADOW_COPY_SIZE - 1)), '^'); + break; + case KMEMCHECK_ERROR_BUG: + printk(KERN_EMERG "ERROR: kmemcheck: Fatal error\n"); + break; + } + + __show_regs(&e->regs, 1); + print_stack_trace(&e->trace, 0); +} + +static void do_wakeup(unsigned long data) +{ + while (error_count > 0) + kmemcheck_error_recall(); + + if (error_missed_count > 0) { + printk(KERN_WARNING "kmemcheck: Lost %d error reports because " + "the queue was too small\n", error_missed_count); + error_missed_count = 0; + } +} + +static DECLARE_TASKLET(kmemcheck_tasklet, &do_wakeup, 0); + +/* + * Save the context of an error report. + */ +void kmemcheck_error_save(enum kmemcheck_shadow state, + unsigned long address, unsigned int size, struct pt_regs *regs) +{ + static unsigned long prev_ip; + + struct kmemcheck_error *e; + void *shadow_copy; + void *memory_copy; + + /* Don't report several adjacent errors from the same EIP. */ + if (regs->ip == prev_ip) + return; + prev_ip = regs->ip; + + e = error_next_wr(); + if (!e) + return; + + e->type = KMEMCHECK_ERROR_INVALID_ACCESS; + + e->state = state; + e->address = address; + e->size = size; + + /* Save regs */ + memcpy(&e->regs, regs, sizeof(*regs)); + + /* Save stack trace */ + e->trace.nr_entries = 0; + e->trace.entries = e->trace_entries; + e->trace.max_entries = ARRAY_SIZE(e->trace_entries); + e->trace.skip = 0; + save_stack_trace_bp(&e->trace, regs->bp); + + /* Round address down to nearest 16 bytes */ + shadow_copy = kmemcheck_shadow_lookup(address + & ~(SHADOW_COPY_SIZE - 1)); + BUG_ON(!shadow_copy); + + memcpy(e->shadow_copy, shadow_copy, SHADOW_COPY_SIZE); + + kmemcheck_show_addr(address); + memory_copy = (void *) (address & ~(SHADOW_COPY_SIZE - 1)); + memcpy(e->memory_copy, memory_copy, SHADOW_COPY_SIZE); + kmemcheck_hide_addr(address); + + tasklet_hi_schedule_first(&kmemcheck_tasklet); +} + +/* + * Save the context of a kmemcheck bug. + */ +void kmemcheck_error_save_bug(struct pt_regs *regs) +{ + struct kmemcheck_error *e; + + e = error_next_wr(); + if (!e) + return; + + e->type = KMEMCHECK_ERROR_BUG; + + memcpy(&e->regs, regs, sizeof(*regs)); + + e->trace.nr_entries = 0; + e->trace.entries = e->trace_entries; + e->trace.max_entries = ARRAY_SIZE(e->trace_entries); + e->trace.skip = 1; + save_stack_trace(&e->trace); + + tasklet_hi_schedule_first(&kmemcheck_tasklet); +} diff --git a/arch/x86/mm/kmemcheck/error.h b/arch/x86/mm/kmemcheck/error.h new file mode 100644 index 00000000000..0efc2e8d0a2 --- /dev/null +++ b/arch/x86/mm/kmemcheck/error.h @@ -0,0 +1,15 @@ +#ifndef ARCH__X86__MM__KMEMCHECK__ERROR_H +#define ARCH__X86__MM__KMEMCHECK__ERROR_H + +#include <linux/ptrace.h> + +#include "shadow.h" + +void kmemcheck_error_save(enum kmemcheck_shadow state, + unsigned long address, unsigned int size, struct pt_regs *regs); + +void kmemcheck_error_save_bug(struct pt_regs *regs); + +void kmemcheck_error_recall(void); + +#endif diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c new file mode 100644 index 00000000000..2c55ed09865 --- /dev/null +++ b/arch/x86/mm/kmemcheck/kmemcheck.c @@ -0,0 +1,640 @@ +/** + * kmemcheck - a heavyweight memory checker for the linux kernel + * Copyright (C) 2007, 2008 Vegard Nossum <vegardno@ifi.uio.no> + * (With a lot of help from Ingo Molnar and Pekka Enberg.) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2) as + * published by the Free Software Foundation. + */ + +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/kallsyms.h> +#include <linux/kernel.h> +#include <linux/kmemcheck.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/page-flags.h> +#include <linux/percpu.h> +#include <linux/ptrace.h> +#include <linux/string.h> +#include <linux/types.h> + +#include <asm/cacheflush.h> +#include <asm/kmemcheck.h> +#include <asm/pgtable.h> +#include <asm/tlbflush.h> + +#include "error.h" +#include "opcode.h" +#include "pte.h" +#include "selftest.h" +#include "shadow.h" + + +#ifdef CONFIG_KMEMCHECK_DISABLED_BY_DEFAULT +# define KMEMCHECK_ENABLED 0 +#endif + +#ifdef CONFIG_KMEMCHECK_ENABLED_BY_DEFAULT +# define KMEMCHECK_ENABLED 1 +#endif + +#ifdef CONFIG_KMEMCHECK_ONESHOT_BY_DEFAULT +# define KMEMCHECK_ENABLED 2 +#endif + +int kmemcheck_enabled = KMEMCHECK_ENABLED; + +int __init kmemcheck_init(void) +{ +#ifdef CONFIG_SMP + /* + * Limit SMP to use a single CPU. We rely on the fact that this code + * runs before SMP is set up. + */ + if (setup_max_cpus > 1) { + printk(KERN_INFO + "kmemcheck: Limiting number of CPUs to 1.\n"); + setup_max_cpus = 1; + } +#endif + + if (!kmemcheck_selftest()) { + printk(KERN_INFO "kmemcheck: self-tests failed; disabling\n"); + kmemcheck_enabled = 0; + return -EINVAL; + } + + printk(KERN_INFO "kmemcheck: Initialized\n"); + return 0; +} + +early_initcall(kmemcheck_init); + +/* + * We need to parse the kmemcheck= option before any memory is allocated. + */ +static int __init param_kmemcheck(char *str) +{ + if (!str) + return -EINVAL; + + sscanf(str, "%d", &kmemcheck_enabled); + return 0; +} + +early_param("kmemcheck", param_kmemcheck); + +int kmemcheck_show_addr(unsigned long address) +{ + pte_t *pte; + + pte = kmemcheck_pte_lookup(address); + if (!pte) + return 0; + + set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT)); + __flush_tlb_one(address); + return 1; +} + +int kmemcheck_hide_addr(unsigned long address) +{ + pte_t *pte; + + pte = kmemcheck_pte_lookup(address); + if (!pte) + return 0; + + set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT)); + __flush_tlb_one(address); + return 1; +} + +struct kmemcheck_context { + bool busy; + int balance; + + /* + * There can be at most two memory operands to an instruction, but + * each address can cross a page boundary -- so we may need up to + * four addresses that must be hidden/revealed for each fault. + */ + unsigned long addr[4]; + unsigned long n_addrs; + unsigned long flags; + + /* Data size of the instruction that caused a fault. */ + unsigned int size; +}; + +static DEFINE_PER_CPU(struct kmemcheck_context, kmemcheck_context); + +bool kmemcheck_active(struct pt_regs *regs) +{ + struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); + + return data->balance > 0; +} + +/* Save an address that needs to be shown/hidden */ +static void kmemcheck_save_addr(unsigned long addr) +{ + struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); + + BUG_ON(data->n_addrs >= ARRAY_SIZE(data->addr)); + data->addr[data->n_addrs++] = addr; +} + +static unsigned int kmemcheck_show_all(void) +{ + struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); + unsigned int i; + unsigned int n; + + n = 0; + for (i = 0; i < data->n_addrs; ++i) + n += kmemcheck_show_addr(data->addr[i]); + + return n; +} + +static unsigned int kmemcheck_hide_all(void) +{ + struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); + unsigned int i; + unsigned int n; + + n = 0; + for (i = 0; i < data->n_addrs; ++i) + n += kmemcheck_hide_addr(data->addr[i]); + + return n; +} + +/* + * Called from the #PF handler. + */ +void kmemcheck_show(struct pt_regs *regs) +{ + struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); + + BUG_ON(!irqs_disabled()); + + if (unlikely(data->balance != 0)) { + kmemcheck_show_all(); + kmemcheck_error_save_bug(regs); + data->balance = 0; + return; + } + + /* + * None of the addresses actually belonged to kmemcheck. Note that + * this is not an error. + */ + if (kmemcheck_show_all() == 0) + return; + + ++data->balance; + + /* + * The IF needs to be cleared as well, so that the faulting + * instruction can run "uninterrupted". Otherwise, we might take + * an interrupt and start executing that before we've had a chance + * to hide the page again. + * + * NOTE: In the rare case of multiple faults, we must not override + * the original flags: + */ + if (!(regs->flags & X86_EFLAGS_TF)) + data->flags = regs->flags; + + regs->flags |= X86_EFLAGS_TF; + regs->flags &= ~X86_EFLAGS_IF; +} + +/* + * Called from the #DB handler. + */ +void kmemcheck_hide(struct pt_regs *regs) +{ + struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); + int n; + + BUG_ON(!irqs_disabled()); + + if (data->balance == 0) + return; + + if (unlikely(data->balance != 1)) { + kmemcheck_show_all(); + kmemcheck_error_save_bug(regs); + data->n_addrs = 0; + data->balance = 0; + + if (!(data->flags & X86_EFLAGS_TF)) + regs->flags &= ~X86_EFLAGS_TF; + if (data->flags & X86_EFLAGS_IF) + regs->flags |= X86_EFLAGS_IF; + return; + } + + if (kmemcheck_enabled) + n = kmemcheck_hide_all(); + else + n = kmemcheck_show_all(); + + if (n == 0) + return; + + --data->balance; + + data->n_addrs = 0; + + if (!(data->flags & X86_EFLAGS_TF)) + regs->flags &= ~X86_EFLAGS_TF; + if (data->flags & X86_EFLAGS_IF) + regs->flags |= X86_EFLAGS_IF; +} + +void kmemcheck_show_pages(struct page *p, unsigned int n) +{ + unsigned int i; + + for (i = 0; i < n; ++i) { + unsigned long address; + pte_t *pte; + unsigned int level; + + address = (unsigned long) page_address(&p[i]); + pte = lookup_address(address, &level); + BUG_ON(!pte); + BUG_ON(level != PG_LEVEL_4K); + + set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT)); + set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_HIDDEN)); + __flush_tlb_one(address); + } +} + +bool kmemcheck_page_is_tracked(struct page *p) +{ + /* This will also check the "hidden" flag of the PTE. */ + return kmemcheck_pte_lookup((unsigned long) page_address(p)); +} + +void kmemcheck_hide_pages(struct page *p, unsigned int n) +{ + unsigned int i; + + for (i = 0; i < n; ++i) { + unsigned long address; + pte_t *pte; + unsigned int level; + + address = (unsigned long) page_address(&p[i]); + pte = lookup_address(address, &level); + BUG_ON(!pte); + BUG_ON(level != PG_LEVEL_4K); + + set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT)); + set_pte(pte, __pte(pte_val(*pte) | _PAGE_HIDDEN)); + __flush_tlb_one(address); + } +} + +/* Access may NOT cross page boundary */ +static void kmemcheck_read_strict(struct pt_regs *regs, + unsigned long addr, unsigned int size) +{ + void *shadow; + enum kmemcheck_shadow status; + + shadow = kmemcheck_shadow_lookup(addr); + if (!shadow) + return; + + kmemcheck_save_addr(addr); + status = kmemcheck_shadow_test(shadow, size); + if (status == KMEMCHECK_SHADOW_INITIALIZED) + return; + + if (kmemcheck_enabled) + kmemcheck_error_save(status, addr, size, regs); + + if (kmemcheck_enabled == 2) + kmemcheck_enabled = 0; + + /* Don't warn about it again. */ + kmemcheck_shadow_set(shadow, size); +} + +/* Access may cross page boundary */ +static void kmemcheck_read(struct pt_regs *regs, + unsigned long addr, unsigned int size) +{ + unsigned long page = addr & PAGE_MASK; + unsigned long next_addr = addr + size - 1; + unsigned long next_page = next_addr & PAGE_MASK; + + if (likely(page == next_page)) { + kmemcheck_read_strict(regs, addr, size); + return; + } + + /* + * What we do is basically to split the access across the + * two pages and handle each part separately. Yes, this means + * that we may now see reads that are 3 + 5 bytes, for + * example (and if both are uninitialized, there will be two + * reports), but it makes the code a lot simpler. + */ + kmemcheck_read_strict(regs, addr, next_page - addr); + kmemcheck_read_strict(regs, next_page, next_addr - next_page); +} + +static void kmemcheck_write_strict(struct pt_regs *regs, + unsigned long addr, unsigned int size) +{ + void *shadow; + + shadow = kmemcheck_shadow_lookup(addr); + if (!shadow) + return; + + kmemcheck_save_addr(addr); + kmemcheck_shadow_set(shadow, size); +} + +static void kmemcheck_write(struct pt_regs *regs, + unsigned long addr, unsigned int size) +{ + unsigned long page = addr & PAGE_MASK; + unsigned long next_addr = addr + size - 1; + unsigned long next_page = next_addr & PAGE_MASK; + + if (likely(page == next_page)) { + kmemcheck_write_strict(regs, addr, size); + return; + } + + /* See comment in kmemcheck_read(). */ + kmemcheck_write_strict(regs, addr, next_page - addr); + kmemcheck_write_strict(regs, next_page, next_addr - next_page); +} + +/* + * Copying is hard. We have two addresses, each of which may be split across + * a page (and each page will have different shadow addresses). + */ +static void kmemcheck_copy(struct pt_regs *regs, + unsigned long src_addr, unsigned long dst_addr, unsigned int size) +{ + uint8_t shadow[8]; + enum kmemcheck_shadow status; + + unsigned long page; + unsigned long next_addr; + unsigned long next_page; + + uint8_t *x; + unsigned int i; + unsigned int n; + + BUG_ON(size > sizeof(shadow)); + + page = src_addr & PAGE_MASK; + next_addr = src_addr + size - 1; + next_page = next_addr & PAGE_MASK; + + if (likely(page == next_page)) { + /* Same page */ + x = kmemcheck_shadow_lookup(src_addr); + if (x) { + kmemcheck_save_addr(src_addr); + for (i = 0; i < size; ++i) + shadow[i] = x[i]; + } else { + for (i = 0; i < size; ++i) + shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; + } + } else { + n = next_page - src_addr; + BUG_ON(n > sizeof(shadow)); + + /* First page */ + x = kmemcheck_shadow_lookup(src_addr); + if (x) { + kmemcheck_save_addr(src_addr); + for (i = 0; i < n; ++i) + shadow[i] = x[i]; + } else { + /* Not tracked */ + for (i = 0; i < n; ++i) + shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; + } + + /* Second page */ + x = kmemcheck_shadow_lookup(next_page); + if (x) { + kmemcheck_save_addr(next_page); + for (i = n; i < size; ++i) + shadow[i] = x[i - n]; + } else { + /* Not tracked */ + for (i = n; i < size; ++i) + shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; + } + } + + page = dst_addr & PAGE_MASK; + next_addr = dst_addr + size - 1; + next_page = next_addr & PAGE_MASK; + + if (likely(page == next_page)) { + /* Same page */ + x = kmemcheck_shadow_lookup(dst_addr); + if (x) { + kmemcheck_save_addr(dst_addr); + for (i = 0; i < size; ++i) { + x[i] = shadow[i]; + shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; + } + } + } else { + n = next_page - dst_addr; + BUG_ON(n > sizeof(shadow)); + + /* First page */ + x = kmemcheck_shadow_lookup(dst_addr); + if (x) { + kmemcheck_save_addr(dst_addr); + for (i = 0; i < n; ++i) { + x[i] = shadow[i]; + shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; + } + } + + /* Second page */ + x = kmemcheck_shadow_lookup(next_page); + if (x) { + kmemcheck_save_addr(next_page); + for (i = n; i < size; ++i) { + x[i - n] = shadow[i]; + shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; + } + } + } + + status = kmemcheck_shadow_test(shadow, size); + if (status == KMEMCHECK_SHADOW_INITIALIZED) + return; + + if (kmemcheck_enabled) + kmemcheck_error_save(status, src_addr, size, regs); + + if (kmemcheck_enabled == 2) + kmemcheck_enabled = 0; +} + +enum kmemcheck_method { + KMEMCHECK_READ, + KMEMCHECK_WRITE, +}; + +static void kmemcheck_access(struct pt_regs *regs, + unsigned long fallback_address, enum kmemcheck_method fallback_method) +{ + const uint8_t *insn; + const uint8_t *insn_primary; + unsigned int size; + + struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); + + /* Recursive fault -- ouch. */ + if (data->busy) { + kmemcheck_show_addr(fallback_address); + kmemcheck_error_save_bug(regs); + return; + } + + data->busy = true; + + insn = (const uint8_t *) regs->ip; + insn_primary = kmemcheck_opcode_get_primary(insn); + + kmemcheck_opcode_decode(insn, &size); + + switch (insn_primary[0]) { +#ifdef CONFIG_KMEMCHECK_BITOPS_OK + /* AND, OR, XOR */ + /* + * Unfortunately, these instructions have to be excluded from + * our regular checking since they access only some (and not + * all) bits. This clears out "bogus" bitfield-access warnings. + */ + case 0x80: + case 0x81: + case 0x82: + case 0x83: + switch ((insn_primary[1] >> 3) & 7) { + /* OR */ + case 1: + /* AND */ + case 4: + /* XOR */ + case 6: + kmemcheck_write(regs, fallback_address, size); + goto out; + + /* ADD */ + case 0: + /* ADC */ + case 2: + /* SBB */ + case 3: + /* SUB */ + case 5: + /* CMP */ + case 7: + break; + } + break; +#endif + + /* MOVS, MOVSB, MOVSW, MOVSD */ + case 0xa4: + case 0xa5: + /* + * These instructions are special because they take two + * addresses, but we only get one page fault. + */ + kmemcheck_copy(regs, regs->si, regs->di, size); + goto out; + + /* CMPS, CMPSB, CMPSW, CMPSD */ + case 0xa6: + case 0xa7: + kmemcheck_read(regs, regs->si, size); + kmemcheck_read(regs, regs->di, size); + goto out; + } + + /* + * If the opcode isn't special in any way, we use the data from the + * page fault handler to determine the address and type of memory + * access. + */ + switch (fallback_method) { + case KMEMCHECK_READ: + kmemcheck_read(regs, fallback_address, size); + goto out; + case KMEMCHECK_WRITE: + kmemcheck_write(regs, fallback_address, size); + goto out; + } + +out: + data->busy = false; +} + +bool kmemcheck_fault(struct pt_regs *regs, unsigned long address, + unsigned long error_code) +{ + pte_t *pte; + + /* + * XXX: Is it safe to assume that memory accesses from virtual 86 + * mode or non-kernel code segments will _never_ access kernel + * memory (e.g. tracked pages)? For now, we need this to avoid + * invoking kmemcheck for PnP BIOS calls. + */ + if (regs->flags & X86_VM_MASK) + return false; + if (regs->cs != __KERNEL_CS) + return false; + + pte = kmemcheck_pte_lookup(address); + if (!pte) + return false; + + if (error_code & 2) + kmemcheck_access(regs, address, KMEMCHECK_WRITE); + else + kmemcheck_access(regs, address, KMEMCHECK_READ); + + kmemcheck_show(regs); + return true; +} + +bool kmemcheck_trap(struct pt_regs *regs) +{ + if (!kmemcheck_active(regs)) + return false; + + /* We're done. */ + kmemcheck_hide(regs); + return true; +} diff --git a/arch/x86/mm/kmemcheck/opcode.c b/arch/x86/mm/kmemcheck/opcode.c new file mode 100644 index 00000000000..63c19e27aa6 --- /dev/null +++ b/arch/x86/mm/kmemcheck/opcode.c @@ -0,0 +1,106 @@ +#include <linux/types.h> + +#include "opcode.h" + +static bool opcode_is_prefix(uint8_t b) +{ + return + /* Group 1 */ + b == 0xf0 || b == 0xf2 || b == 0xf3 + /* Group 2 */ + || b == 0x2e || b == 0x36 || b == 0x3e || b == 0x26 + || b == 0x64 || b == 0x65 || b == 0x2e || b == 0x3e + /* Group 3 */ + || b == 0x66 + /* Group 4 */ + || b == 0x67; +} + +#ifdef CONFIG_X86_64 +static bool opcode_is_rex_prefix(uint8_t b) +{ + return (b & 0xf0) == 0x40; +} +#else +static bool opcode_is_rex_prefix(uint8_t b) +{ + return false; +} +#endif + +#define REX_W (1 << 3) + +/* + * This is a VERY crude opcode decoder. We only need to find the size of the + * load/store that caused our #PF and this should work for all the opcodes + * that we care about. Moreover, the ones who invented this instruction set + * should be shot. + */ +void kmemcheck_opcode_decode(const uint8_t *op, unsigned int *size) +{ + /* Default operand size */ + int operand_size_override = 4; + + /* prefixes */ + for (; opcode_is_prefix(*op); ++op) { + if (*op == 0x66) + operand_size_override = 2; + } + + /* REX prefix */ + if (opcode_is_rex_prefix(*op)) { + uint8_t rex = *op; + + ++op; + if (rex & REX_W) { + switch (*op) { + case 0x63: + *size = 4; + return; + case 0x0f: + ++op; + + switch (*op) { + case 0xb6: + case 0xbe: + *size = 1; + return; + case 0xb7: + case 0xbf: + *size = 2; + return; + } + + break; + } + + *size = 8; + return; + } + } + + /* escape opcode */ + if (*op == 0x0f) { + ++op; + + /* + * This is move with zero-extend and sign-extend, respectively; + * we don't have to think about 0xb6/0xbe, because this is + * already handled in the conditional below. + */ + if (*op == 0xb7 || *op == 0xbf) + operand_size_override = 2; + } + + *size = (*op & 1) ? operand_size_override : 1; +} + +const uint8_t *kmemcheck_opcode_get_primary(const uint8_t *op) +{ + /* skip prefixes */ + while (opcode_is_prefix(*op)) + ++op; + if (opcode_is_rex_prefix(*op)) + ++op; + return op; +} diff --git a/arch/x86/mm/kmemcheck/opcode.h b/arch/x86/mm/kmemcheck/opcode.h new file mode 100644 index 00000000000..6956aad66b5 --- /dev/null +++ b/arch/x86/mm/kmemcheck/opcode.h @@ -0,0 +1,9 @@ +#ifndef ARCH__X86__MM__KMEMCHECK__OPCODE_H +#define ARCH__X86__MM__KMEMCHECK__OPCODE_H + +#include <linux/types.h> + +void kmemcheck_opcode_decode(const uint8_t *op, unsigned int *size); +const uint8_t *kmemcheck_opcode_get_primary(const uint8_t *op); + +#endif diff --git a/arch/x86/mm/kmemcheck/pte.c b/arch/x86/mm/kmemcheck/pte.c new file mode 100644 index 00000000000..4ead26eeaf9 --- /dev/null +++ b/arch/x86/mm/kmemcheck/pte.c @@ -0,0 +1,22 @@ +#include <linux/mm.h> + +#include <asm/pgtable.h> + +#include "pte.h" + +pte_t *kmemcheck_pte_lookup(unsigned long address) +{ + pte_t *pte; + unsigned int level; + + pte = lookup_address(address, &level); + if (!pte) + return NULL; + if (level != PG_LEVEL_4K) + return NULL; + if (!pte_hidden(*pte)) + return NULL; + + return pte; +} + diff --git a/arch/x86/mm/kmemcheck/pte.h b/arch/x86/mm/kmemcheck/pte.h new file mode 100644 index 00000000000..9f596645649 --- /dev/null +++ b/arch/x86/mm/kmemcheck/pte.h @@ -0,0 +1,10 @@ +#ifndef ARCH__X86__MM__KMEMCHECK__PTE_H +#define ARCH__X86__MM__KMEMCHECK__PTE_H + +#include <linux/mm.h> + +#include <asm/pgtable.h> + +pte_t *kmemcheck_pte_lookup(unsigned long address); + +#endif diff --git a/arch/x86/mm/kmemcheck/selftest.c b/arch/x86/mm/kmemcheck/selftest.c new file mode 100644 index 00000000000..036efbea8b2 --- /dev/null +++ b/arch/x86/mm/kmemcheck/selftest.c @@ -0,0 +1,69 @@ +#include <linux/kernel.h> + +#include "opcode.h" +#include "selftest.h" + +struct selftest_opcode { + unsigned int expected_size; + const uint8_t *insn; + const char *desc; +}; + +static const struct selftest_opcode selftest_opcodes[] = { + /* REP MOVS */ + {1, "\xf3\xa4", "rep movsb <mem8>, <mem8>"}, + {4, "\xf3\xa5", "rep movsl <mem32>, <mem32>"}, + + /* MOVZX / MOVZXD */ + {1, "\x66\x0f\xb6\x51\xf8", "movzwq <mem8>, <reg16>"}, + {1, "\x0f\xb6\x51\xf8", "movzwq <mem8>, <reg32>"}, + + /* MOVSX / MOVSXD */ + {1, "\x66\x0f\xbe\x51\xf8", "movswq <mem8>, <reg16>"}, + {1, "\x0f\xbe\x51\xf8", "movswq <mem8>, <reg32>"}, + +#ifdef CONFIG_X86_64 + /* MOVZX / MOVZXD */ + {1, "\x49\x0f\xb6\x51\xf8", "movzbq <mem8>, <reg64>"}, + {2, "\x49\x0f\xb7\x51\xf8", "movzbq <mem16>, <reg64>"}, + + /* MOVSX / MOVSXD */ + {1, "\x49\x0f\xbe\x51\xf8", "movsbq <mem8>, <reg64>"}, + {2, "\x49\x0f\xbf\x51\xf8", "movsbq <mem16>, <reg64>"}, + {4, "\x49\x63\x51\xf8", "movslq <mem32>, <reg64>"}, +#endif +}; + +static bool selftest_opcode_one(const struct selftest_opcode *op) +{ + unsigned size; + + kmemcheck_opcode_decode(op->insn, &size); + + if (size == op->expected_size) + return true; + + printk(KERN_WARNING "kmemcheck: opcode %s: expected size %d, got %d\n", + op->desc, op->expected_size, size); + return false; +} + +static bool selftest_opcodes_all(void) +{ + bool pass = true; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(selftest_opcodes); ++i) + pass = pass && selftest_opcode_one(&selftest_opcodes[i]); + + return pass; +} + +bool kmemcheck_selftest(void) +{ + bool pass = true; + + pass = pass && selftest_opcodes_all(); + + return pass; +} diff --git a/arch/x86/mm/kmemcheck/selftest.h b/arch/x86/mm/kmemcheck/selftest.h new file mode 100644 index 00000000000..8fed4fe11f9 --- /dev/null +++ b/arch/x86/mm/kmemcheck/selftest.h @@ -0,0 +1,6 @@ +#ifndef ARCH_X86_MM_KMEMCHECK_SELFTEST_H +#define ARCH_X86_MM_KMEMCHECK_SELFTEST_H + +bool kmemcheck_selftest(void); + +#endif diff --git a/arch/x86/mm/kmemcheck/shadow.c b/arch/x86/mm/kmemcheck/shadow.c new file mode 100644 index 00000000000..e773b6bd007 --- /dev/null +++ b/arch/x86/mm/kmemcheck/shadow.c @@ -0,0 +1,162 @@ +#include <linux/kmemcheck.h> +#include <linux/module.h> +#include <linux/mm.h> +#include <linux/module.h> + +#include <asm/page.h> +#include <asm/pgtable.h> + +#include "pte.h" +#include "shadow.h" + +/* + * Return the shadow address for the given address. Returns NULL if the + * address is not tracked. + * + * We need to be extremely careful not to follow any invalid pointers, + * because this function can be called for *any* possible address. + */ +void *kmemcheck_shadow_lookup(unsigned long address) +{ + pte_t *pte; + struct page *page; + + if (!virt_addr_valid(address)) + return NULL; + + pte = kmemcheck_pte_lookup(address); + if (!pte) + return NULL; + + page = virt_to_page(address); + if (!page->shadow) + return NULL; + return page->shadow + (address & (PAGE_SIZE - 1)); +} + +static void mark_shadow(void *address, unsigned int n, + enum kmemcheck_shadow status) +{ + unsigned long addr = (unsigned long) address; + unsigned long last_addr = addr + n - 1; + unsigned long page = addr & PAGE_MASK; + unsigned long last_page = last_addr & PAGE_MASK; + unsigned int first_n; + void *shadow; + + /* If the memory range crosses a page boundary, stop there. */ + if (page == last_page) + first_n = n; + else + first_n = page + PAGE_SIZE - addr; + + shadow = kmemcheck_shadow_lookup(addr); + if (shadow) + memset(shadow, status, first_n); + + addr += first_n; + n -= first_n; + + /* Do full-page memset()s. */ + while (n >= PAGE_SIZE) { + shadow = kmemcheck_shadow_lookup(addr); + if (shadow) + memset(shadow, status, PAGE_SIZE); + + addr += PAGE_SIZE; + n -= PAGE_SIZE; + } + + /* Do the remaining page, if any. */ + if (n > 0) { + shadow = kmemcheck_shadow_lookup(addr); + if (shadow) + memset(shadow, status, n); + } +} + +void kmemcheck_mark_unallocated(void *address, unsigned int n) +{ + mark_shadow(address, n, KMEMCHECK_SHADOW_UNALLOCATED); +} + +void kmemcheck_mark_uninitialized(void *address, unsigned int n) +{ + mark_shadow(address, n, KMEMCHECK_SHADOW_UNINITIALIZED); +} + +/* + * Fill the shadow memory of the given address such that the memory at that + * address is marked as being initialized. + */ +void kmemcheck_mark_initialized(void *address, unsigned int n) +{ + mark_shadow(address, n, KMEMCHECK_SHADOW_INITIALIZED); +} +EXPORT_SYMBOL_GPL(kmemcheck_mark_initialized); + +void kmemcheck_mark_freed(void *address, unsigned int n) +{ + mark_shadow(address, n, KMEMCHECK_SHADOW_FREED); +} + +void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n) +{ + unsigned int i; + + for (i = 0; i < n; ++i) + kmemcheck_mark_unallocated(page_address(&p[i]), PAGE_SIZE); +} + +void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n) +{ + unsigned int i; + + for (i = 0; i < n; ++i) + kmemcheck_mark_uninitialized(page_address(&p[i]), PAGE_SIZE); +} + +void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n) +{ + unsigned int i; + + for (i = 0; i < n; ++i) + kmemcheck_mark_initialized(page_address(&p[i]), PAGE_SIZE); +} + +enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size) +{ + uint8_t *x; + unsigned int i; + + x = shadow; + +#ifdef CONFIG_KMEMCHECK_PARTIAL_OK + /* + * Make sure _some_ bytes are initialized. Gcc frequently generates + * code to access neighboring bytes. + */ + for (i = 0; i < size; ++i) { + if (x[i] == KMEMCHECK_SHADOW_INITIALIZED) + return x[i]; + } +#else + /* All bytes must be initialized. */ + for (i = 0; i < size; ++i) { + if (x[i] != KMEMCHECK_SHADOW_INITIALIZED) + return x[i]; + } +#endif + + return x[0]; +} + +void kmemcheck_shadow_set(void *shadow, unsigned int size) +{ + uint8_t *x; + unsigned int i; + + x = shadow; + for (i = 0; i < size; ++i) + x[i] = KMEMCHECK_SHADOW_INITIALIZED; +} diff --git a/arch/x86/mm/kmemcheck/shadow.h b/arch/x86/mm/kmemcheck/shadow.h new file mode 100644 index 00000000000..af46d9ab9d8 --- /dev/null +++ b/arch/x86/mm/kmemcheck/shadow.h @@ -0,0 +1,16 @@ +#ifndef ARCH__X86__MM__KMEMCHECK__SHADOW_H +#define ARCH__X86__MM__KMEMCHECK__SHADOW_H + +enum kmemcheck_shadow { + KMEMCHECK_SHADOW_UNALLOCATED, + KMEMCHECK_SHADOW_UNINITIALIZED, + KMEMCHECK_SHADOW_INITIALIZED, + KMEMCHECK_SHADOW_FREED, +}; + +void *kmemcheck_shadow_lookup(unsigned long address); + +enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size); +void kmemcheck_shadow_set(void *shadow, unsigned int size); + +#endif diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 6ce9518fe2a..3cfe9ced8a4 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -470,7 +470,7 @@ static int split_large_page(pte_t *kpte, unsigned long address) if (!debug_pagealloc) spin_unlock(&cpa_lock); - base = alloc_pages(GFP_KERNEL, 0); + base = alloc_pages(GFP_KERNEL | __GFP_NOTRACK, 0); if (!debug_pagealloc) spin_lock(&cpa_lock); if (!base) diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 7aa03a5389f..8e43bdd4545 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -4,9 +4,11 @@ #include <asm/tlb.h> #include <asm/fixmap.h> +#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO + pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { - return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); + return (pte_t *)__get_free_page(PGALLOC_GFP); } pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) @@ -14,9 +16,9 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) struct page *pte; #ifdef CONFIG_HIGHPTE - pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0); + pte = alloc_pages(PGALLOC_GFP | __GFP_HIGHMEM, 0); #else - pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); + pte = alloc_pages(PGALLOC_GFP, 0); #endif if (pte) pgtable_page_ctor(pte); @@ -161,7 +163,7 @@ static int preallocate_pmds(pmd_t *pmds[]) bool failed = false; for(i = 0; i < PREALLOCATED_PMDS; i++) { - pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); + pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP); if (pmd == NULL) failed = true; pmds[i] = pmd; @@ -228,7 +230,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) pmd_t *pmds[PREALLOCATED_PMDS]; unsigned long flags; - pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); + pgd = (pgd_t *)__get_free_page(PGALLOC_GFP); if (pgd == NULL) goto out; diff --git a/arch/xtensa/include/asm/kmap_types.h b/arch/xtensa/include/asm/kmap_types.h index 9e822d2e3bc..11c687e527f 100644 --- a/arch/xtensa/include/asm/kmap_types.h +++ b/arch/xtensa/include/asm/kmap_types.h @@ -1,31 +1,6 @@ -/* - * include/asm-xtensa/kmap_types.h - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2001 - 2005 Tensilica Inc. - */ - #ifndef _XTENSA_KMAP_TYPES_H #define _XTENSA_KMAP_TYPES_H -enum km_type { - KM_BOUNCE_READ, - KM_SKB_SUNRPC_DATA, - KM_SKB_DATA_SOFTIRQ, - KM_USER0, - KM_USER1, - KM_BIO_SRC_IRQ, - KM_BIO_DST_IRQ, - KM_PTE0, - KM_PTE1, - KM_IRQ0, - KM_IRQ1, - KM_SOFTIRQ0, - KM_SOFTIRQ1, - KM_TYPE_NR -}; +#include <asm-generic/kmap_types.h> #endif /* _XTENSA_KMAP_TYPES_H */ diff --git a/arch/xtensa/kernel/init_task.c b/arch/xtensa/kernel/init_task.c index e07f5c9fcd3..c4302f0e4ba 100644 --- a/arch/xtensa/kernel/init_task.c +++ b/arch/xtensa/kernel/init_task.c @@ -23,10 +23,6 @@ static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); -struct mm_struct init_mm = INIT_MM(init_mm); - -EXPORT_SYMBOL(init_mm); - union thread_union init_thread_union __attribute__((__section__(".data.init_task"))) = { INIT_THREAD_INFO(init_task) }; |