diff options
Diffstat (limited to 'arch/mips/cavium-octeon')
-rw-r--r-- | arch/mips/cavium-octeon/dma-octeon.c | 10 | ||||
-rw-r--r-- | arch/mips/cavium-octeon/executive/cvmx-bootmem.c | 6 | ||||
-rw-r--r-- | arch/mips/cavium-octeon/executive/cvmx-sysinfo.c | 1 | ||||
-rw-r--r-- | arch/mips/cavium-octeon/octeon-irq.c | 160 | ||||
-rw-r--r-- | arch/mips/cavium-octeon/octeon-platform.c | 85 | ||||
-rw-r--r-- | arch/mips/cavium-octeon/smp.c | 2 |
6 files changed, 211 insertions, 53 deletions
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c index 4b92bfc662d..be531ec1f20 100644 --- a/arch/mips/cavium-octeon/dma-octeon.c +++ b/arch/mips/cavium-octeon/dma-octeon.c @@ -41,7 +41,7 @@ struct bar1_index_state { }; #ifdef CONFIG_PCI -static DEFINE_SPINLOCK(bar1_lock); +static DEFINE_RAW_SPINLOCK(bar1_lock); static struct bar1_index_state bar1_state[32]; #endif @@ -198,7 +198,7 @@ dma_addr_t octeon_map_dma_mem(struct device *dev, void *ptr, size_t size) start_index = 31; /* Only one processor can access the Bar register at once */ - spin_lock_irqsave(&bar1_lock, flags); + raw_spin_lock_irqsave(&bar1_lock, flags); /* Look through Bar1 for existing mapping that will work */ for (index = start_index; index >= 0; index--) { @@ -250,7 +250,7 @@ dma_addr_t octeon_map_dma_mem(struct device *dev, void *ptr, size_t size) (unsigned long long) physical); done_unlock: - spin_unlock_irqrestore(&bar1_lock, flags); + raw_spin_unlock_irqrestore(&bar1_lock, flags); done: pr_debug("dma_map_single 0x%llx->0x%llx\n", physical, result); return result; @@ -324,14 +324,14 @@ void octeon_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr) "Attempt to unmap an invalid address (0x%llx)\n", dma_addr); - spin_lock_irqsave(&bar1_lock, flags); + raw_spin_lock_irqsave(&bar1_lock, flags); bar1_state[index].ref_count--; if (bar1_state[index].ref_count == 0) octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index), 0); else if (unlikely(bar1_state[index].ref_count < 0)) panic("dma_unmap_single: Bar1[%u] reference count < 0\n", (int) index); - spin_unlock_irqrestore(&bar1_lock, flags); + raw_spin_unlock_irqrestore(&bar1_lock, flags); done: pr_debug("dma_unmap_single 0x%llx\n", dma_addr); return; diff --git a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c index 25666da17b2..fdf5f19bfdb 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c +++ b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c @@ -253,7 +253,7 @@ int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min, * impossible requests up front. (NOP for address_min == 0) */ if (alignment) - address_min = __ALIGN_MASK(address_min, (alignment - 1)); + address_min = ALIGN(address_min, alignment); /* * Reject inconsistent args. We have adjusted these, so this @@ -291,7 +291,7 @@ int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min, * satisfy request. */ usable_base = - __ALIGN_MASK(max(address_min, ent_addr), alignment - 1); + ALIGN(max(address_min, ent_addr), alignment); usable_max = min(address_max, ent_addr + ent_size); /* * We should be able to allocate block at address @@ -671,7 +671,7 @@ int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr, * coallesced when they are freed. The alloc routine does the * same rounding up on all allocations. */ - size = __ALIGN_MASK(size, (CVMX_BOOTMEM_ALIGNMENT_SIZE - 1)); + size = ALIGN(size, CVMX_BOOTMEM_ALIGNMENT_SIZE); addr_allocated = cvmx_bootmem_phy_alloc(size, min_addr, max_addr, alignment, diff --git a/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c b/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c index e5838890cba..8b18a20cc7b 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c +++ b/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c @@ -115,4 +115,3 @@ int cvmx_sysinfo_minimal_initialize(void *phy_mem_desc_ptr, return 1; } - diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index 6f2acf09328..c424cd158dc 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c @@ -13,9 +13,8 @@ #include <asm/octeon/cvmx-pexp-defs.h> #include <asm/octeon/cvmx-npi-defs.h> -DEFINE_RWLOCK(octeon_irq_ciu0_rwlock); -DEFINE_RWLOCK(octeon_irq_ciu1_rwlock); -DEFINE_SPINLOCK(octeon_irq_msi_lock); +static DEFINE_RAW_SPINLOCK(octeon_irq_ciu0_lock); +static DEFINE_RAW_SPINLOCK(octeon_irq_ciu1_lock); static int octeon_coreid_for_cpu(int cpu) { @@ -51,9 +50,6 @@ static void octeon_irq_core_eoi(unsigned int irq) */ if (desc->status & IRQ_DISABLED) return; - - /* There is a race here. We should fix it. */ - /* * We don't need to disable IRQs to make these atomic since * they are already disabled earlier in the low level @@ -141,19 +137,12 @@ static void octeon_irq_ciu0_enable(unsigned int irq) uint64_t en0; int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ - /* - * A read lock is used here to make sure only one core is ever - * updating the CIU enable bits at a time. During an enable - * the cores don't interfere with each other. During a disable - * the write lock stops any enables that might cause a - * problem. - */ - read_lock_irqsave(&octeon_irq_ciu0_rwlock, flags); + raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); en0 |= 1ull << bit; cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); - read_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags); + raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); } static void octeon_irq_ciu0_disable(unsigned int irq) @@ -162,7 +151,7 @@ static void octeon_irq_ciu0_disable(unsigned int irq) unsigned long flags; uint64_t en0; int cpu; - write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags); + raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); for_each_online_cpu(cpu) { int coreid = octeon_coreid_for_cpu(cpu); en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); @@ -174,7 +163,7 @@ static void octeon_irq_ciu0_disable(unsigned int irq) * of them are done. */ cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2)); - write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags); + raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); } /* @@ -193,7 +182,7 @@ static void octeon_irq_ciu0_enable_v2(unsigned int irq) * Disable the irq on the current core for chips that have the EN*_W1{S,C} * registers. */ -static void octeon_irq_ciu0_disable_v2(unsigned int irq) +static void octeon_irq_ciu0_ack_v2(unsigned int irq) { int index = cvmx_get_core_num() * 2; u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); @@ -202,6 +191,43 @@ static void octeon_irq_ciu0_disable_v2(unsigned int irq) } /* + * CIU timer type interrupts must be acknoleged by writing a '1' bit + * to their sum0 bit. + */ +static void octeon_irq_ciu0_timer_ack(unsigned int irq) +{ + int index = cvmx_get_core_num() * 2; + uint64_t mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); + cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); +} + +static void octeon_irq_ciu0_timer_ack_v1(unsigned int irq) +{ + octeon_irq_ciu0_timer_ack(irq); + octeon_irq_ciu0_ack(irq); +} + +static void octeon_irq_ciu0_timer_ack_v2(unsigned int irq) +{ + octeon_irq_ciu0_timer_ack(irq); + octeon_irq_ciu0_ack_v2(irq); +} + +/* + * Enable the irq on the current core for chips that have the EN*_W1{S,C} + * registers. + */ +static void octeon_irq_ciu0_eoi_v2(unsigned int irq) +{ + struct irq_desc *desc = irq_desc + irq; + int index = cvmx_get_core_num() * 2; + u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); + + if ((desc->status & IRQ_DISABLED) == 0) + cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); +} + +/* * Disable the irq on the all cores for chips that have the EN*_W1{S,C} * registers. */ @@ -223,7 +249,7 @@ static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask * unsigned long flags; int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ - write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags); + raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); for_each_online_cpu(cpu) { int coreid = octeon_coreid_for_cpu(cpu); uint64_t en0 = @@ -239,7 +265,7 @@ static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask * * of them are done. */ cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2)); - write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags); + raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); return 0; } @@ -272,8 +298,8 @@ static struct irq_chip octeon_irq_chip_ciu0_v2 = { .name = "CIU0", .enable = octeon_irq_ciu0_enable_v2, .disable = octeon_irq_ciu0_disable_all_v2, - .ack = octeon_irq_ciu0_disable_v2, - .eoi = octeon_irq_ciu0_enable_v2, + .ack = octeon_irq_ciu0_ack_v2, + .eoi = octeon_irq_ciu0_eoi_v2, #ifdef CONFIG_SMP .set_affinity = octeon_irq_ciu0_set_affinity_v2, #endif @@ -290,6 +316,28 @@ static struct irq_chip octeon_irq_chip_ciu0 = { #endif }; +static struct irq_chip octeon_irq_chip_ciu0_timer_v2 = { + .name = "CIU0-T", + .enable = octeon_irq_ciu0_enable_v2, + .disable = octeon_irq_ciu0_disable_all_v2, + .ack = octeon_irq_ciu0_timer_ack_v2, + .eoi = octeon_irq_ciu0_eoi_v2, +#ifdef CONFIG_SMP + .set_affinity = octeon_irq_ciu0_set_affinity_v2, +#endif +}; + +static struct irq_chip octeon_irq_chip_ciu0_timer = { + .name = "CIU0-T", + .enable = octeon_irq_ciu0_enable, + .disable = octeon_irq_ciu0_disable, + .ack = octeon_irq_ciu0_timer_ack_v1, + .eoi = octeon_irq_ciu0_eoi, +#ifdef CONFIG_SMP + .set_affinity = octeon_irq_ciu0_set_affinity, +#endif +}; + static void octeon_irq_ciu1_ack(unsigned int irq) { @@ -322,19 +370,12 @@ static void octeon_irq_ciu1_enable(unsigned int irq) uint64_t en1; int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ - /* - * A read lock is used here to make sure only one core is ever - * updating the CIU enable bits at a time. During an enable - * the cores don't interfere with each other. During a disable - * the write lock stops any enables that might cause a - * problem. - */ - read_lock_irqsave(&octeon_irq_ciu1_rwlock, flags); + raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); en1 |= 1ull << bit; cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); - read_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags); + raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); } static void octeon_irq_ciu1_disable(unsigned int irq) @@ -343,7 +384,7 @@ static void octeon_irq_ciu1_disable(unsigned int irq) unsigned long flags; uint64_t en1; int cpu; - write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags); + raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); for_each_online_cpu(cpu) { int coreid = octeon_coreid_for_cpu(cpu); en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); @@ -355,7 +396,7 @@ static void octeon_irq_ciu1_disable(unsigned int irq) * of them are done. */ cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1)); - write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags); + raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); } /* @@ -374,7 +415,7 @@ static void octeon_irq_ciu1_enable_v2(unsigned int irq) * Disable the irq on the current core for chips that have the EN*_W1{S,C} * registers. */ -static void octeon_irq_ciu1_disable_v2(unsigned int irq) +static void octeon_irq_ciu1_ack_v2(unsigned int irq) { int index = cvmx_get_core_num() * 2 + 1; u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); @@ -383,6 +424,20 @@ static void octeon_irq_ciu1_disable_v2(unsigned int irq) } /* + * Enable the irq on the current core for chips that have the EN*_W1{S,C} + * registers. + */ +static void octeon_irq_ciu1_eoi_v2(unsigned int irq) +{ + struct irq_desc *desc = irq_desc + irq; + int index = cvmx_get_core_num() * 2 + 1; + u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); + + if ((desc->status & IRQ_DISABLED) == 0) + cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); +} + +/* * Disable the irq on the all cores for chips that have the EN*_W1{S,C} * registers. */ @@ -405,7 +460,7 @@ static int octeon_irq_ciu1_set_affinity(unsigned int irq, unsigned long flags; int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ - write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags); + raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); for_each_online_cpu(cpu) { int coreid = octeon_coreid_for_cpu(cpu); uint64_t en1 = @@ -422,7 +477,7 @@ static int octeon_irq_ciu1_set_affinity(unsigned int irq, * of them are done. */ cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1)); - write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags); + raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); return 0; } @@ -455,8 +510,8 @@ static struct irq_chip octeon_irq_chip_ciu1_v2 = { .name = "CIU0", .enable = octeon_irq_ciu1_enable_v2, .disable = octeon_irq_ciu1_disable_all_v2, - .ack = octeon_irq_ciu1_disable_v2, - .eoi = octeon_irq_ciu1_enable_v2, + .ack = octeon_irq_ciu1_ack_v2, + .eoi = octeon_irq_ciu1_eoi_v2, #ifdef CONFIG_SMP .set_affinity = octeon_irq_ciu1_set_affinity_v2, #endif @@ -475,6 +530,8 @@ static struct irq_chip octeon_irq_chip_ciu1 = { #ifdef CONFIG_PCI_MSI +static DEFINE_RAW_SPINLOCK(octeon_irq_msi_lock); + static void octeon_irq_msi_ack(unsigned int irq) { if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) { @@ -515,12 +572,12 @@ static void octeon_irq_msi_enable(unsigned int irq) */ uint64_t en; unsigned long flags; - spin_lock_irqsave(&octeon_irq_msi_lock, flags); + raw_spin_lock_irqsave(&octeon_irq_msi_lock, flags); en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0); en |= 1ull << (irq - OCTEON_IRQ_MSI_BIT0); cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en); cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0); - spin_unlock_irqrestore(&octeon_irq_msi_lock, flags); + raw_spin_unlock_irqrestore(&octeon_irq_msi_lock, flags); } } @@ -537,12 +594,12 @@ static void octeon_irq_msi_disable(unsigned int irq) */ uint64_t en; unsigned long flags; - spin_lock_irqsave(&octeon_irq_msi_lock, flags); + raw_spin_lock_irqsave(&octeon_irq_msi_lock, flags); en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0); en &= ~(1ull << (irq - OCTEON_IRQ_MSI_BIT0)); cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en); cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0); - spin_unlock_irqrestore(&octeon_irq_msi_lock, flags); + raw_spin_unlock_irqrestore(&octeon_irq_msi_lock, flags); } } @@ -559,6 +616,7 @@ void __init arch_init_irq(void) { int irq; struct irq_chip *chip0; + struct irq_chip *chip0_timer; struct irq_chip *chip1; #ifdef CONFIG_SMP @@ -574,9 +632,11 @@ void __init arch_init_irq(void) OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X)) { chip0 = &octeon_irq_chip_ciu0_v2; + chip0_timer = &octeon_irq_chip_ciu0_timer_v2; chip1 = &octeon_irq_chip_ciu1_v2; } else { chip0 = &octeon_irq_chip_ciu0; + chip0_timer = &octeon_irq_chip_ciu0_timer; chip1 = &octeon_irq_chip_ciu1; } @@ -590,7 +650,21 @@ void __init arch_init_irq(void) /* 24 - 87 CIU_INT_SUM0 */ for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) { - set_irq_chip_and_handler(irq, chip0, handle_percpu_irq); + switch (irq) { + case OCTEON_IRQ_GMX_DRP0: + case OCTEON_IRQ_GMX_DRP1: + case OCTEON_IRQ_IPD_DRP: + case OCTEON_IRQ_KEY_ZERO: + case OCTEON_IRQ_TIMER0: + case OCTEON_IRQ_TIMER1: + case OCTEON_IRQ_TIMER2: + case OCTEON_IRQ_TIMER3: + set_irq_chip_and_handler(irq, chip0_timer, handle_percpu_irq); + break; + default: + set_irq_chip_and_handler(irq, chip0, handle_percpu_irq); + break; + } } /* 88 - 151 CIU_INT_SUM1 */ diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c index cfdb4c2ac5c..62ac30eef5e 100644 --- a/arch/mips/cavium-octeon/octeon-platform.c +++ b/arch/mips/cavium-octeon/octeon-platform.c @@ -9,6 +9,7 @@ #include <linux/init.h> #include <linux/irq.h> +#include <linux/i2c.h> #include <linux/module.h> #include <linux/platform_device.h> @@ -159,6 +160,90 @@ out: } device_initcall(octeon_rng_device_init); +static struct i2c_board_info __initdata octeon_i2c_devices[] = { + { + I2C_BOARD_INFO("ds1337", 0x68), + }, +}; + +static int __init octeon_i2c_devices_init(void) +{ + return i2c_register_board_info(0, octeon_i2c_devices, + ARRAY_SIZE(octeon_i2c_devices)); +} +arch_initcall(octeon_i2c_devices_init); + +#define OCTEON_I2C_IO_BASE 0x1180000001000ull +#define OCTEON_I2C_IO_UNIT_OFFSET 0x200 + +static struct octeon_i2c_data octeon_i2c_data[2]; + +static int __init octeon_i2c_device_init(void) +{ + struct platform_device *pd; + int ret = 0; + int port, num_ports; + + struct resource i2c_resources[] = { + { + .flags = IORESOURCE_MEM, + }, { + .flags = IORESOURCE_IRQ, + } + }; + + if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) + num_ports = 2; + else + num_ports = 1; + + for (port = 0; port < num_ports; port++) { + octeon_i2c_data[port].sys_freq = octeon_get_clock_rate(); + /*FIXME: should be examined. At the moment is set for 100Khz */ + octeon_i2c_data[port].i2c_freq = 100000; + + pd = platform_device_alloc("i2c-octeon", port); + if (!pd) { + ret = -ENOMEM; + goto out; + } + + pd->dev.platform_data = octeon_i2c_data + port; + + i2c_resources[0].start = + OCTEON_I2C_IO_BASE + (port * OCTEON_I2C_IO_UNIT_OFFSET); + i2c_resources[0].end = i2c_resources[0].start + 0x1f; + switch (port) { + case 0: + i2c_resources[1].start = OCTEON_IRQ_TWSI; + i2c_resources[1].end = OCTEON_IRQ_TWSI; + break; + case 1: + i2c_resources[1].start = OCTEON_IRQ_TWSI2; + i2c_resources[1].end = OCTEON_IRQ_TWSI2; + break; + default: + BUG(); + } + + ret = platform_device_add_resources(pd, + i2c_resources, + ARRAY_SIZE(i2c_resources)); + if (ret) + goto fail; + + ret = platform_device_add(pd); + if (ret) + goto fail; + } + return ret; +fail: + platform_device_put(pd); +out: + return ret; +} +device_initcall(octeon_i2c_device_init); + /* Octeon SMI/MDIO interface. */ static int __init octeon_mdiobus_device_init(void) { diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c index c198efdf583..51e980290ce 100644 --- a/arch/mips/cavium-octeon/smp.c +++ b/arch/mips/cavium-octeon/smp.c @@ -327,7 +327,7 @@ static void octeon_cpu_die(unsigned int cpu) avail_coremask); } - pr_info("Reset core %d. Available Coremask = %x \n", coreid, + pr_info("Reset core %d. Available Coremask = %x\n", coreid, avail_coremask); cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid); cvmx_write_csr(CVMX_CIU_PP_RST, 0); |