diff options
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r-- | arch/ia64/kernel/acpi.c | 9 | ||||
-rw-r--r-- | arch/ia64/kernel/iosapic.c | 6 | ||||
-rw-r--r-- | arch/ia64/kernel/irq.c | 4 | ||||
-rw-r--r-- | arch/ia64/kernel/irq_ia64.c | 24 | ||||
-rw-r--r-- | arch/ia64/kernel/irq_lsapic.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/mca_drv.c | 95 | ||||
-rw-r--r-- | arch/ia64/kernel/pal.S | 58 | ||||
-rw-r--r-- | arch/ia64/kernel/perfmon.c | 3 | ||||
-rw-r--r-- | arch/ia64/kernel/sal.c | 11 | ||||
-rw-r--r-- | arch/ia64/kernel/setup.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/smp.c | 12 | ||||
-rw-r--r-- | arch/ia64/kernel/time.c | 6 | ||||
-rw-r--r-- | arch/ia64/kernel/vmlinux.lds.S | 8 |
13 files changed, 146 insertions, 94 deletions
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 32c3abededc..73ef4a85b86 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -64,9 +64,6 @@ EXPORT_SYMBOL(pm_idle); void (*pm_power_off) (void); EXPORT_SYMBOL(pm_power_off); -unsigned char acpi_kbd_controller_present = 1; -unsigned char acpi_legacy_devices; - unsigned int acpi_cpei_override; unsigned int acpi_cpei_phys_cpuid; @@ -628,12 +625,6 @@ static int __init acpi_parse_fadt(unsigned long phys_addr, unsigned long size) fadt = (struct fadt_descriptor *)fadt_header; - if (!(fadt->iapc_boot_arch & BAF_8042_KEYBOARD_CONTROLLER)) - acpi_kbd_controller_present = 0; - - if (fadt->iapc_boot_arch & BAF_LEGACY_DEVICES) - acpi_legacy_devices = 1; - acpi_register_gsi(fadt->sci_int, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW); return 0; } diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index 9bf15fefa7e..60d64950e3c 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c @@ -426,7 +426,7 @@ iosapic_end_level_irq (unsigned int irq) #define iosapic_ack_level_irq nop struct hw_interrupt_type irq_type_iosapic_level = { - .typename = "IO-SAPIC-level", + .name = "IO-SAPIC-level", .startup = iosapic_startup_level_irq, .shutdown = iosapic_shutdown_level_irq, .enable = iosapic_enable_level_irq, @@ -473,7 +473,7 @@ iosapic_ack_edge_irq (unsigned int irq) #define iosapic_end_edge_irq nop struct hw_interrupt_type irq_type_iosapic_edge = { - .typename = "IO-SAPIC-edge", + .name = "IO-SAPIC-edge", .startup = iosapic_startup_edge_irq, .shutdown = iosapic_disable_edge_irq, .enable = iosapic_enable_edge_irq, @@ -664,7 +664,7 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery, printk(KERN_WARNING "%s: changing vector %d from %s to %s\n", __FUNCTION__, vector, - idesc->chip->typename, irq_type->typename); + idesc->chip->name, irq_type->name); idesc->chip = irq_type; } return 0; diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index f07c0864b0b..54d55e4d64f 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c @@ -76,7 +76,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); } #endif - seq_printf(p, " %14s", irq_desc[i].chip->typename); + seq_printf(p, " %14s", irq_desc[i].chip->name); seq_printf(p, " %s", action->name); for (action=action->next; action; action = action->next) @@ -197,7 +197,7 @@ void fixup_irqs(void) struct pt_regs *old_regs = set_irq_regs(NULL); vectors_in_migration[irq]=0; - __do_IRQ(irq); + generic_handle_irq(irq); set_irq_regs(old_regs); } } diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 68339dd0c9e..ba3ba8bc50b 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c @@ -180,11 +180,13 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); ia64_srlz_d(); while (vector != IA64_SPURIOUS_INT_VECTOR) { - if (!IS_RESCHEDULE(vector)) { + if (unlikely(IS_RESCHEDULE(vector))) + kstat_this_cpu.irqs[vector]++; + else { ia64_setreg(_IA64_REG_CR_TPR, vector); ia64_srlz_d(); - __do_IRQ(local_vector_to_irq(vector)); + generic_handle_irq(local_vector_to_irq(vector)); /* * Disable interrupts and send EOI: @@ -225,7 +227,9 @@ void ia64_process_pending_intr(void) * Perform normal interrupt style processing */ while (vector != IA64_SPURIOUS_INT_VECTOR) { - if (!IS_RESCHEDULE(vector)) { + if (unlikely(IS_RESCHEDULE(vector))) + kstat_this_cpu.irqs[vector]++; + else { struct pt_regs *old_regs = set_irq_regs(NULL); ia64_setreg(_IA64_REG_CR_TPR, vector); @@ -238,7 +242,7 @@ void ia64_process_pending_intr(void) * Probably could shared code. */ vectors_in_migration[local_vector_to_irq(vector)]=0; - __do_IRQ(local_vector_to_irq(vector)); + generic_handle_irq(local_vector_to_irq(vector)); set_irq_regs(old_regs); /* @@ -258,11 +262,22 @@ void ia64_process_pending_intr(void) #ifdef CONFIG_SMP extern irqreturn_t handle_IPI (int irq, void *dev_id); +static irqreturn_t dummy_handler (int irq, void *dev_id) +{ + BUG(); +} + static struct irqaction ipi_irqaction = { .handler = handle_IPI, .flags = IRQF_DISABLED, .name = "IPI" }; + +static struct irqaction resched_irqaction = { + .handler = dummy_handler, + .flags = SA_INTERRUPT, + .name = "resched" +}; #endif void @@ -287,6 +302,7 @@ init_IRQ (void) register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL); #ifdef CONFIG_SMP register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction); + register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction); #endif #ifdef CONFIG_PERFMON pfm_init_percpu(); diff --git a/arch/ia64/kernel/irq_lsapic.c b/arch/ia64/kernel/irq_lsapic.c index 1ab58b09f3d..c2f07beb175 100644 --- a/arch/ia64/kernel/irq_lsapic.c +++ b/arch/ia64/kernel/irq_lsapic.c @@ -34,7 +34,7 @@ static int lsapic_retrigger(unsigned int irq) } struct hw_interrupt_type irq_type_ia64_lsapic = { - .typename = "LSAPIC", + .name = "LSAPIC", .startup = lsapic_noop_startup, .shutdown = lsapic_noop, .enable = lsapic_noop, diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c index a45009d2bc9..afc1403799c 100644 --- a/arch/ia64/kernel/mca_drv.c +++ b/arch/ia64/kernel/mca_drv.c @@ -435,6 +435,50 @@ is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci, } /** + * get_target_identifier - Get the valid Cache or Bus check target identifier. + * @peidx: pointer of index of processor error section + * + * Return value: + * target address on Success / 0 on Failue + */ +static u64 +get_target_identifier(peidx_table_t *peidx) +{ + u64 target_address = 0; + sal_log_mod_error_info_t *smei; + pal_cache_check_info_t *pcci; + int i, level = 9; + + /* + * Look through the cache checks for a valid target identifier + * If more than one valid target identifier, return the one + * with the lowest cache level. + */ + for (i = 0; i < peidx_cache_check_num(peidx); i++) { + smei = (sal_log_mod_error_info_t *)peidx_cache_check(peidx, i); + if (smei->valid.target_identifier && smei->target_identifier) { + pcci = (pal_cache_check_info_t *)&(smei->check_info); + if (!target_address || (pcci->level < level)) { + target_address = smei->target_identifier; + level = pcci->level; + continue; + } + } + } + if (target_address) + return target_address; + + /* + * Look at the bus check for a valid target identifier + */ + smei = peidx_bus_check(peidx, 0); + if (smei && smei->valid.target_identifier) + return smei->target_identifier; + + return 0; +} + +/** * recover_from_read_error - Try to recover the errors which type are "read"s. * @slidx: pointer of index of SAL error record * @peidx: pointer of index of processor error section @@ -450,13 +494,14 @@ recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci, struct ia64_sal_os_state *sos) { - sal_log_mod_error_info_t *smei; + u64 target_identifier; pal_min_state_area_t *pmsa; struct ia64_psr *psr1, *psr2; ia64_fptr_t *mca_hdlr_bh = (ia64_fptr_t*)mca_handler_bhhook; /* Is target address valid? */ - if (!pbci->tv) + target_identifier = get_target_identifier(peidx); + if (!target_identifier) return fatal_mca("target address not valid"); /* @@ -487,32 +532,28 @@ recover_from_read_error(slidx_table_t *slidx, pmsa = sos->pal_min_state; if (psr1->cpl != 0 || ((psr2->cpl != 0) && mca_recover_range(pmsa->pmsa_iip))) { - smei = peidx_bus_check(peidx, 0); - if (smei->valid.target_identifier) { - /* - * setup for resume to bottom half of MCA, - * "mca_handler_bhhook" - */ - /* pass to bhhook as argument (gr8, ...) */ - pmsa->pmsa_gr[8-1] = smei->target_identifier; - pmsa->pmsa_gr[9-1] = pmsa->pmsa_iip; - pmsa->pmsa_gr[10-1] = pmsa->pmsa_ipsr; - /* set interrupted return address (but no use) */ - pmsa->pmsa_br0 = pmsa->pmsa_iip; - /* change resume address to bottom half */ - pmsa->pmsa_iip = mca_hdlr_bh->fp; - pmsa->pmsa_gr[1-1] = mca_hdlr_bh->gp; - /* set cpl with kernel mode */ - psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr; - psr2->cpl = 0; - psr2->ri = 0; - psr2->bn = 1; - psr2->i = 0; - - return mca_recovered("user memory corruption. " + /* + * setup for resume to bottom half of MCA, + * "mca_handler_bhhook" + */ + /* pass to bhhook as argument (gr8, ...) */ + pmsa->pmsa_gr[8-1] = target_identifier; + pmsa->pmsa_gr[9-1] = pmsa->pmsa_iip; + pmsa->pmsa_gr[10-1] = pmsa->pmsa_ipsr; + /* set interrupted return address (but no use) */ + pmsa->pmsa_br0 = pmsa->pmsa_iip; + /* change resume address to bottom half */ + pmsa->pmsa_iip = mca_hdlr_bh->fp; + pmsa->pmsa_gr[1-1] = mca_hdlr_bh->gp; + /* set cpl with kernel mode */ + psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr; + psr2->cpl = 0; + psr2->ri = 0; + psr2->bn = 1; + psr2->i = 0; + + return mca_recovered("user memory corruption. " "kill affected process - recovered."); - } - } return fatal_mca("kernel context not recovered, iip 0x%lx\n", diff --git a/arch/ia64/kernel/pal.S b/arch/ia64/kernel/pal.S index ebaf1e685f5..0b533441c3c 100644 --- a/arch/ia64/kernel/pal.S +++ b/arch/ia64/kernel/pal.S @@ -21,11 +21,12 @@ pal_entry_point: .text /* - * Set the PAL entry point address. This could be written in C code, but we do it here - * to keep it all in one module (besides, it's so trivial that it's + * Set the PAL entry point address. This could be written in C code, but we + * do it here to keep it all in one module (besides, it's so trivial that it's * not a big deal). * - * in0 Address of the PAL entry point (text address, NOT a function descriptor). + * in0 Address of the PAL entry point (text address, NOT a function + * descriptor). */ GLOBAL_ENTRY(ia64_pal_handler_init) alloc r3=ar.pfs,1,0,0,0 @@ -36,9 +37,9 @@ GLOBAL_ENTRY(ia64_pal_handler_init) END(ia64_pal_handler_init) /* - * Default PAL call handler. This needs to be coded in assembly because it uses - * the static calling convention, i.e., the RSE may not be used and calls are - * done via "br.cond" (not "br.call"). + * Default PAL call handler. This needs to be coded in assembly because it + * uses the static calling convention, i.e., the RSE may not be used and + * calls are done via "br.cond" (not "br.call"). */ GLOBAL_ENTRY(ia64_pal_default_handler) mov r8=-1 @@ -50,12 +51,10 @@ END(ia64_pal_default_handler) * * in0 Index of PAL service * in1 - in3 Remaining PAL arguments - * in4 1 ==> clear psr.ic, 0 ==> don't clear psr.ic - * */ GLOBAL_ENTRY(ia64_pal_call_static) - .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5) - alloc loc1 = ar.pfs,5,5,0,0 + .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(4) + alloc loc1 = ar.pfs,4,5,0,0 movl loc2 = pal_entry_point 1: { mov r28 = in0 @@ -64,7 +63,6 @@ GLOBAL_ENTRY(ia64_pal_call_static) } ;; ld8 loc2 = [loc2] // loc2 <- entry point - tbit.nz p6,p7 = in4, 0 adds r8 = 1f-1b,r8 mov loc4=ar.rsc // save RSE configuration ;; @@ -74,13 +72,11 @@ GLOBAL_ENTRY(ia64_pal_call_static) .body mov r30 = in2 -(p6) rsm psr.i | psr.ic mov r31 = in3 mov b7 = loc2 -(p7) rsm psr.i + rsm psr.i ;; -(p6) srlz.i mov rp = r8 br.cond.sptk.many b7 1: mov psr.l = loc3 @@ -96,8 +92,8 @@ END(ia64_pal_call_static) * Make a PAL call using the stacked registers calling convention. * * Inputs: - * in0 Index of PAL service - * in2 - in3 Remaning PAL arguments + * in0 Index of PAL service + * in2 - in3 Remaining PAL arguments */ GLOBAL_ENTRY(ia64_pal_call_stacked) .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(4) @@ -131,18 +127,18 @@ END(ia64_pal_call_stacked) * Make a physical mode PAL call using the static registers calling convention. * * Inputs: - * in0 Index of PAL service - * in2 - in3 Remaning PAL arguments + * in0 Index of PAL service + * in2 - in3 Remaining PAL arguments * * PSR_LP, PSR_TB, PSR_ID, PSR_DA are never set by the kernel. * So we don't need to clear them. */ -#define PAL_PSR_BITS_TO_CLEAR \ - (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_DB | IA64_PSR_RT | \ - IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | \ +#define PAL_PSR_BITS_TO_CLEAR \ + (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_DB | IA64_PSR_RT |\ + IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | \ IA64_PSR_DFL | IA64_PSR_DFH) -#define PAL_PSR_BITS_TO_SET \ +#define PAL_PSR_BITS_TO_SET \ (IA64_PSR_BN) @@ -178,7 +174,7 @@ GLOBAL_ENTRY(ia64_pal_call_phys_static) ;; andcm r16=loc3,r16 // removes bits to clear from psr br.call.sptk.many rp=ia64_switch_mode_phys -.ret1: mov rp = r8 // install return address (physical) + mov rp = r8 // install return address (physical) mov loc5 = r19 mov loc6 = r20 br.cond.sptk.many b7 @@ -188,7 +184,6 @@ GLOBAL_ENTRY(ia64_pal_call_phys_static) mov r19=loc5 mov r20=loc6 br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode -.ret2: mov psr.l = loc3 // restore init PSR mov ar.pfs = loc1 @@ -203,8 +198,8 @@ END(ia64_pal_call_phys_static) * Make a PAL call using the stacked registers in physical mode. * * Inputs: - * in0 Index of PAL service - * in2 - in3 Remaning PAL arguments + * in0 Index of PAL service + * in2 - in3 Remaining PAL arguments */ GLOBAL_ENTRY(ia64_pal_call_phys_stacked) .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5) @@ -212,7 +207,7 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked) movl loc2 = pal_entry_point 1: { mov r28 = in0 // copy procedure index - mov loc0 = rp // save rp + mov loc0 = rp // save rp } .body ;; @@ -245,7 +240,7 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked) mov r16=loc3 // r16= original psr mov r19=loc5 mov r20=loc6 - br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode + br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode mov psr.l = loc3 // restore init PSR mov ar.pfs = loc1 @@ -257,10 +252,11 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked) END(ia64_pal_call_phys_stacked) /* - * Save scratch fp scratch regs which aren't saved in pt_regs already (fp10-fp15). + * Save scratch fp scratch regs which aren't saved in pt_regs already + * (fp10-fp15). * - * NOTE: We need to do this since firmware (SAL and PAL) may use any of the scratch - * regs fp-low partition. + * NOTE: We need to do this since firmware (SAL and PAL) may use any of the + * scratch regs fp-low partition. * * Inputs: * in0 Address of stack storage for fp regs diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 281004ff7b0..3aaede0d698 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -5558,12 +5558,13 @@ report_spurious2: } static irqreturn_t -pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs) +pfm_interrupt_handler(int irq, void *arg) { unsigned long start_cycles, total_cycles; unsigned long min, max; int this_cpu; int ret; + struct pt_regs *regs = get_irq_regs(); this_cpu = get_cpu(); if (likely(!pfm_alt_intr_handler)) { diff --git a/arch/ia64/kernel/sal.c b/arch/ia64/kernel/sal.c index 642fdc7b969..20bad78b507 100644 --- a/arch/ia64/kernel/sal.c +++ b/arch/ia64/kernel/sal.c @@ -223,12 +223,13 @@ static void __init sal_desc_ap_wakeup(void *p) { } */ static int sal_cache_flush_drops_interrupts; -static void __init +void __init check_sal_cache_flush (void) { unsigned long flags; int cpu; - u64 vector; + u64 vector, cache_type = 3; + struct ia64_sal_retval isrv; cpu = get_cpu(); local_irq_save(flags); @@ -243,7 +244,10 @@ check_sal_cache_flush (void) while (!ia64_get_irr(IA64_TIMER_VECTOR)) cpu_relax(); - ia64_sal_cache_flush(3); + SAL_CALL(isrv, SAL_CACHE_FLUSH, cache_type, 0, 0, 0, 0, 0, 0); + + if (isrv.status) + printk(KERN_ERR "SAL_CAL_FLUSH failed with %ld\n", isrv.status); if (ia64_get_irr(IA64_TIMER_VECTOR)) { vector = ia64_get_ivr(); @@ -331,7 +335,6 @@ ia64_sal_init (struct ia64_sal_systab *systab) p += SAL_DESC_SIZE(*p); } - check_sal_cache_flush(); } int diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index c4caa800349..d10404a4175 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -457,6 +457,8 @@ setup_arch (char **cmdline_p) cpu_init(); /* initialize the bootstrap CPU */ mmu_context_init(); /* initialize context_id bitmap */ + check_sal_cache_flush(); + #ifdef CONFIG_ACPI acpi_boot_init(); #endif diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 657ac99a451..6ab95ceaf9d 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c @@ -108,7 +108,7 @@ cpu_die(void) } irqreturn_t -handle_IPI (int irq, void *dev_id, struct pt_regs *regs) +handle_IPI (int irq, void *dev_id) { int this_cpu = get_cpu(); unsigned long *pending_ipis = &__ia64_per_cpu_var(ipi_operation); @@ -328,10 +328,14 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait) { struct call_data_struct data; - int cpus = num_online_cpus()-1; + int cpus; - if (!cpus) + spin_lock(&call_lock); + cpus = num_online_cpus() - 1; + if (!cpus) { + spin_unlock(&call_lock); return 0; + } /* Can deadlock when called with interrupts disabled */ WARN_ON(irqs_disabled()); @@ -343,8 +347,6 @@ smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wai if (wait) atomic_set(&data.finished, 0); - spin_lock(&call_lock); - call_data = &data; mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */ send_IPI_allbutself(IPI_CALL_FUNC); diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 41169a9bc30..39e0cd3a088 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -84,6 +84,12 @@ timer_interrupt (int irq, void *dev_id) if (time_after(new_itm, ia64_get_itc())) break; + + /* + * Allow IPIs to interrupt the timer loop. + */ + local_irq_enable(); + local_irq_disable(); } do { diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index b3b2e389d6b..d6083a0936f 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -128,13 +128,7 @@ SECTIONS .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) { __initcall_start = .; - *(.initcall1.init) - *(.initcall2.init) - *(.initcall3.init) - *(.initcall4.init) - *(.initcall5.init) - *(.initcall6.init) - *(.initcall7.init) + INITCALLS __initcall_end = .; } |