From d662ed26734473d4cb5f3d78cebfec8f9126e97c Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Fri, 9 Jan 2009 17:01:53 +1100 Subject: powerpc/perf_counter: Add perf_counter system call on powerpc ... with an empty/dummy asm/perf_counter.h so it builds. Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/perf_counter.h | 10 ++++++++++ arch/powerpc/include/asm/systbl.h | 1 + arch/powerpc/include/asm/unistd.h | 3 ++- 3 files changed, 13 insertions(+), 1 deletion(-) create mode 100644 arch/powerpc/include/asm/perf_counter.h (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_counter.h new file mode 100644 index 00000000000..59530ae1d53 --- /dev/null +++ b/arch/powerpc/include/asm/perf_counter.h @@ -0,0 +1,10 @@ +/* + * Performance counter support - PowerPC-specific definitions. + * + * Copyright 2008-2009 Paul Mackerras, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index 803def23665..da300c4d288 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h @@ -322,3 +322,4 @@ SYSCALL_SPU(epoll_create1) SYSCALL_SPU(dup3) SYSCALL_SPU(pipe2) SYSCALL(inotify_init1) +SYSCALL(perf_counter_open) diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index e07d0c76ed7..7cef5afe89d 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -341,10 +341,11 @@ #define __NR_dup3 316 #define __NR_pipe2 317 #define __NR_inotify_init1 318 +#define __NR_perf_counter_open 319 #ifdef __KERNEL__ -#define __NR_syscalls 319 +#define __NR_syscalls 320 #define __NR__exit __NR_exit #define NR_syscalls __NR_syscalls -- cgit v1.2.3 From 93a6d3ce6962044fe9badf528fed46b455d58292 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Fri, 9 Jan 2009 16:52:19 +1100 Subject: powerpc: Provide a way to defer perf counter work until interrupts are enabled Because 64-bit powerpc uses lazy (soft) interrupt disabling, it is possible for a performance monitor exception to come in when the kernel thinks interrupts are disabled (i.e. when they are soft-disabled but hard-enabled). In such a situation the performance monitor exception handler might have some processing to do (such as process wakeups) which can't be done in what is effectively an NMI handler. This provides a way to defer that work until interrupts get enabled, either in raw_local_irq_restore() or by returning from an interrupt handler to code that had interrupts enabled. We have a per-processor flag that indicates that there is work pending to do when interrupts subsequently get re-enabled. This flag is checked in the interrupt return path and in raw_local_irq_restore(), and if it is set, perf_counter_do_pending() is called to do the pending work. Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/hw_irq.h | 31 +++++++++++++++++++++++++++++++ arch/powerpc/include/asm/paca.h | 1 + 2 files changed, 32 insertions(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index f75a5fc64d2..e10f151c3db 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -131,5 +131,36 @@ static inline int irqs_disabled_flags(unsigned long flags) */ struct hw_interrupt_type; +#ifdef CONFIG_PERF_COUNTERS +static inline unsigned long get_perf_counter_pending(void) +{ + unsigned long x; + + asm volatile("lbz %0,%1(13)" + : "=r" (x) + : "i" (offsetof(struct paca_struct, perf_counter_pending))); + return x; +} + +static inline void set_perf_counter_pending(int x) +{ + asm volatile("stb %0,%1(13)" : : + "r" (x), + "i" (offsetof(struct paca_struct, perf_counter_pending))); +} + +extern void perf_counter_do_pending(void); + +#else + +static inline unsigned long get_perf_counter_pending(void) +{ + return 0; +} + +static inline void set_perf_counter_pending(int x) {} +static inline void perf_counter_do_pending(void) {} +#endif /* CONFIG_PERF_COUNTERS */ + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_HW_IRQ_H */ diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 082b3aedf14..6ef05572301 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -99,6 +99,7 @@ struct paca_struct { u8 soft_enabled; /* irq soft-enable flag */ u8 hard_enabled; /* set if irqs are enabled in MSR */ u8 io_sync; /* writel() needs spin_unlock sync */ + u8 perf_counter_pending; /* PM interrupt while soft-disabled */ /* Stuff for accurate time accounting */ u64 user_time; /* accumulated usermode TB ticks */ -- cgit v1.2.3 From 4574910e5087085a1f330ff8373cee4503f5c77c Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Fri, 9 Jan 2009 20:21:55 +1100 Subject: powerpc/perf_counter: Add generic support for POWER-family PMU hardware This provides the architecture-specific functions needed to access PMU hardware on the 64-bit PowerPC processors. It has been designed for the IBM POWER family (POWER 4/4+/5/5+/6 and PPC970) but will hopefully also suit other 64-bit PowerPC machines (although probably not Cell given how different it is in this area). This doesn't include back-ends for any specific processors. This implements a system which allows back-ends to express the constraints that their hardware has on what events can be counted simultaneously. The constraints are expressed as a 64-bit mask + 64-bit value for each event, and the encoding is capable of expressing the constraints arising from having a set of multiplexers feeding an event bus, with some events being available through multiple multiplexer settings, such as we get on POWER4 and PPC970. Furthermore, the back-end can supply alternative event codes for each event, and the constraint checking code will try all possible combinations of alternative event codes to try to find a combination that will fit. Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/perf_counter.h | 62 +++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_counter.h index 59530ae1d53..9d7ff6d7fb5 100644 --- a/arch/powerpc/include/asm/perf_counter.h +++ b/arch/powerpc/include/asm/perf_counter.h @@ -8,3 +8,65 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ +#include + +#define MAX_HWCOUNTERS 8 +#define MAX_EVENT_ALTERNATIVES 8 + +/* + * This struct provides the constants and functions needed to + * describe the PMU on a particular POWER-family CPU. + */ +struct power_pmu { + int n_counter; + int max_alternatives; + u64 add_fields; + u64 test_adder; + int (*compute_mmcr)(unsigned int events[], int n_ev, + unsigned int hwc[], u64 mmcr[]); + int (*get_constraint)(unsigned int event, u64 *mskp, u64 *valp); + int (*get_alternatives)(unsigned int event, unsigned int alt[]); + void (*disable_pmc)(unsigned int pmc, u64 mmcr[]); + int n_generic; + int *generic_events; +}; + +extern struct power_pmu *ppmu; + +/* + * The power_pmu.get_constraint function returns a 64-bit value and + * a 64-bit mask that express the constraints between this event and + * other events. + * + * The value and mask are divided up into (non-overlapping) bitfields + * of three different types: + * + * Select field: this expresses the constraint that some set of bits + * in MMCR* needs to be set to a specific value for this event. For a + * select field, the mask contains 1s in every bit of the field, and + * the value contains a unique value for each possible setting of the + * MMCR* bits. The constraint checking code will ensure that two events + * that set the same field in their masks have the same value in their + * value dwords. + * + * Add field: this expresses the constraint that there can be at most + * N events in a particular class. A field of k bits can be used for + * N <= 2^(k-1) - 1. The mask has the most significant bit of the field + * set (and the other bits 0), and the value has only the least significant + * bit of the field set. In addition, the 'add_fields' and 'test_adder' + * in the struct power_pmu for this processor come into play. The + * add_fields value contains 1 in the LSB of the field, and the + * test_adder contains 2^(k-1) - 1 - N in the field. + * + * NAND field: this expresses the constraint that you may not have events + * in all of a set of classes. (For example, on PPC970, you can't select + * events from the FPU, ISU and IDU simultaneously, although any two are + * possible.) For N classes, the field is N+1 bits wide, and each class + * is assigned one bit from the least-significant N bits. The mask has + * only the most-significant bit set, and the value has only the bit + * for the event's class set. The test_adder has the least significant + * bit set in the field. + * + * If an event is not subject to the constraint expressed by a particular + * field, then it will have 0 in both the mask and value for that field. + */ -- cgit v1.2.3 From f3dfd2656deb81a0addee4f4ceff66b50a387388 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Thu, 26 Feb 2009 22:43:46 +1100 Subject: perfcounters: fix a few minor cleanliness issues This fixes three issues noticed by Arnd Bergmann: - Add #ifdef __KERNEL__ and move some things around in perf_counter.h to make sure only the bits that userspace needs are exported to userspace. - Use __u64, __s64, __u32 types in the structs exported to userspace rather than u64, s64, u32. - Make the sys_perf_counter_open syscall available to the SPUs on Cell platforms. And one issue that I noticed in looking at the code again: - Wrap the perf_counter_open syscall with SYSCALL_DEFINE4 so we get the proper handling of int arguments on ppc64 (and some other 64-bit architectures). Reported-by: Arnd Bergmann Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/systbl.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index 4c8095f6bec..d312eec8abb 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h @@ -322,4 +322,4 @@ SYSCALL_SPU(epoll_create1) SYSCALL_SPU(dup3) SYSCALL_SPU(pipe2) SYSCALL(inotify_init1) -SYSCALL(perf_counter_open) +SYSCALL_SPU(perf_counter_open) -- cgit v1.2.3 From b6c5a71da1477d261bc36254fe1f20d32b57598d Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 16 Mar 2009 21:00:00 +1100 Subject: perf_counter: abstract wakeup flag setting in core to fix powerpc build Impact: build fix for powerpc Commit bd753921015e7905 ("perf_counter: software counter event infrastructure") introduced a use of TIF_PERF_COUNTERS into the core perfcounter code. This breaks the build on powerpc because we use a flag in a per-cpu area to signal wakeups on powerpc rather than a thread_info flag, because the thread_info flags have to be manipulated with atomic operations and are thus slower than per-cpu flags. This fixes the by changing the core to use an abstracted set_perf_counter_pending() function, which is defined on x86 to set the TIF_PERF_COUNTERS flag and on powerpc to set the per-cpu flag (paca->perf_counter_pending). It changes the previous powerpc definition of set_perf_counter_pending to not take an argument and adds a clear_perf_counter_pending, so as to simplify the definition on x86. On x86, set_perf_counter_pending() is defined as a macro. Defining it as a static inline in arch/x86/include/asm/perf_counters.h causes compile failures because gets included early in , and the definitions of set_tsk_thread_flag etc. are therefore not available in . (On powerpc this problem is avoided by defining set_perf_counter_pending etc. in .) Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/hw_irq.h | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index b43076ff92c..cb32d571c9c 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -142,10 +142,17 @@ static inline unsigned long get_perf_counter_pending(void) return x; } -static inline void set_perf_counter_pending(int x) +static inline void set_perf_counter_pending(void) { asm volatile("stb %0,%1(13)" : : - "r" (x), + "r" (1), + "i" (offsetof(struct paca_struct, perf_counter_pending))); +} + +static inline void clear_perf_counter_pending(void) +{ + asm volatile("stb %0,%1(13)" : : + "r" (0), "i" (offsetof(struct paca_struct, perf_counter_pending))); } @@ -158,7 +165,8 @@ static inline unsigned long get_perf_counter_pending(void) return 0; } -static inline void set_perf_counter_pending(int x) {} +static inline void set_perf_counter_pending(void) {} +static inline void clear_perf_counter_pending(void) {} static inline void perf_counter_do_pending(void) {} #endif /* CONFIG_PERF_COUNTERS */ -- cgit v1.2.3 From 925d519ab82b6dd7aca9420d809ee83819c08db2 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 30 Mar 2009 19:07:02 +0200 Subject: perf_counter: unify and fix delayed counter wakeup While going over the wakeup code I noticed delayed wakeups only work for hardware counters but basically all software counters rely on them. This patch unifies and generalizes the delayed wakeup to fix this issue. Since we're dealing with NMI context bits here, use a cmpxchg() based single link list implementation to track counters that have pending wakeups. [ This should really be generic code for delayed wakeups, but since we cannot use cmpxchg()/xchg() in generic code, I've let it live in the perf_counter code. -- Eric Dumazet could use it to aggregate the network wakeups. ] Furthermore, the x86 method of using TIF flags was flawed in that its quite possible to end up setting the bit on the idle task, loosing the wakeup. The powerpc method uses per-cpu storage and does appear to be sufficient. Signed-off-by: Peter Zijlstra Acked-by: Paul Mackerras Orig-LKML-Reference: <20090330171023.153932974@chello.nl> Signed-off-by: Ingo Molnar --- arch/powerpc/include/asm/hw_irq.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index cb32d571c9c..20a44d0c9fd 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -132,7 +132,7 @@ static inline int irqs_disabled_flags(unsigned long flags) struct irq_chip; #ifdef CONFIG_PERF_COUNTERS -static inline unsigned long get_perf_counter_pending(void) +static inline unsigned long test_perf_counter_pending(void) { unsigned long x; @@ -160,7 +160,7 @@ extern void perf_counter_do_pending(void); #else -static inline unsigned long get_perf_counter_pending(void) +static inline unsigned long test_perf_counter_pending(void) { return 0; } -- cgit v1.2.3 From ab7ef2e50a557af92f4f90689f51fadadafc16b2 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 29 Apr 2009 22:38:51 +1000 Subject: perf_counter: powerpc: allow use of limited-function counters POWER5+ and POWER6 have two hardware counters with limited functionality: PMC5 counts instructions completed in run state and PMC6 counts cycles in run state. (Run state is the state when a hardware RUN bit is 1; the idle task clears RUN while waiting for work to do and sets it when there is work to do.) These counters can't be written to by the kernel, can't generate interrupts, and don't obey the freeze conditions. That means we can only use them for per-task counters (where we know we'll always be in run state; we can't put a per-task counter on an idle task), and only if we don't want interrupts and we do want to count in all processor modes. Obviously some counters can't go on a limited hardware counter, but there are also situations where we can only put a counter on a limited hardware counter - if there are already counters on that exclude some processor modes and we want to put on a per-task cycle or instruction counter that doesn't exclude any processor mode, it could go on if it can use a limited hardware counter. To keep track of these constraints, this adds a flags argument to the processor-specific get_alternatives() functions, with three bits defined: one to say that we can accept alternative event codes that go on limited counters, one to say we only want alternatives on limited counters, and one to say that this is a per-task counter and therefore events that are gated by run state are equivalent to those that aren't (e.g. a "cycles" event is equivalent to a "cycles in run state" event). These flags are computed for each counter and stored in the counter->hw.counter_base field (slightly wonky name for what it does, but it was an existing unused field). Since the limited counters don't freeze when we freeze the other counters, we need some special handling to avoid getting skew between things counted on the limited counters and those counted on normal counters. To minimize this skew, if we are using any limited counters, we read PMC5 and PMC6 immediately after setting and clearing the freeze bit. This is done in a single asm in the new write_mmcr0() function. The code here is specific to PMC5 and PMC6 being the limited hardware counters. Being more general (e.g. having a bitmap of limited hardware counter numbers) would have meant more complex code to read the limited counters when freezing and unfreezing the normal counters, with conditional branches, which would have increased the skew. Since it isn't necessary for the code to be more general at this stage, it isn't. This also extends the back-ends for POWER5+ and POWER6 to be able to handle up to 6 counters rather than the 4 they previously handled. Signed-off-by: Paul Mackerras Acked-by: Peter Zijlstra Cc: Robert Richter LKML-Reference: <18936.19035.163066.892208@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- arch/powerpc/include/asm/perf_counter.h | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_counter.h index 9d7ff6d7fb5..56d66c38143 100644 --- a/arch/powerpc/include/asm/perf_counter.h +++ b/arch/powerpc/include/asm/perf_counter.h @@ -12,6 +12,7 @@ #define MAX_HWCOUNTERS 8 #define MAX_EVENT_ALTERNATIVES 8 +#define MAX_LIMITED_HWCOUNTERS 2 /* * This struct provides the constants and functions needed to @@ -25,14 +26,24 @@ struct power_pmu { int (*compute_mmcr)(unsigned int events[], int n_ev, unsigned int hwc[], u64 mmcr[]); int (*get_constraint)(unsigned int event, u64 *mskp, u64 *valp); - int (*get_alternatives)(unsigned int event, unsigned int alt[]); + int (*get_alternatives)(unsigned int event, unsigned int flags, + unsigned int alt[]); void (*disable_pmc)(unsigned int pmc, u64 mmcr[]); + int (*limited_pmc_event)(unsigned int event); + int limited_pmc5_6; /* PMC5 and PMC6 have limited function */ int n_generic; int *generic_events; }; extern struct power_pmu *ppmu; +/* + * Values for flags to get_alternatives() + */ +#define PPMU_LIMITED_PMC_OK 1 /* can put this on a limited PMC */ +#define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */ +#define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */ + /* * The power_pmu.get_constraint function returns a 64-bit value and * a 64-bit mask that express the constraints between this event and -- cgit v1.2.3 From ef923214a4816c289e4af2d67a9ebb1a31e4ac61 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Thu, 14 May 2009 13:29:14 +1000 Subject: perf_counter: powerpc: use u64 for event codes internally Although the perf_counter API allows 63-bit raw event codes, internally in the powerpc back-end we had been using 32-bit event codes. This expands them to 64 bits so that we can add bits for specifying threshold start/stop events and instruction sampling modes later. This also corrects the return value of can_go_on_limited_pmc; we were returning an event code rather than just a 0/1 value in some circumstances. That didn't particularly matter while event codes were 32-bit, but now that event codes are 64-bit it might, so this fixes it. [ Impact: extend PowerPC perfcounter interfaces from u32 to u64 ] Signed-off-by: Paul Mackerras Acked-by: Peter Zijlstra Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo LKML-Reference: <18955.36874.472452.353104@drongo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- arch/powerpc/include/asm/perf_counter.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_counter.h index 56d66c38143..ceea76a48e3 100644 --- a/arch/powerpc/include/asm/perf_counter.h +++ b/arch/powerpc/include/asm/perf_counter.h @@ -23,13 +23,13 @@ struct power_pmu { int max_alternatives; u64 add_fields; u64 test_adder; - int (*compute_mmcr)(unsigned int events[], int n_ev, + int (*compute_mmcr)(u64 events[], int n_ev, unsigned int hwc[], u64 mmcr[]); - int (*get_constraint)(unsigned int event, u64 *mskp, u64 *valp); - int (*get_alternatives)(unsigned int event, unsigned int flags, - unsigned int alt[]); + int (*get_constraint)(u64 event, u64 *mskp, u64 *valp); + int (*get_alternatives)(u64 event, unsigned int flags, + u64 alt[]); void (*disable_pmc)(unsigned int pmc, u64 mmcr[]); - int (*limited_pmc_event)(unsigned int event); + int (*limited_pmc_event)(u64 event); int limited_pmc5_6; /* PMC5 and PMC6 have limited function */ int n_generic; int *generic_events; -- cgit v1.2.3 From 0bbd0d4be8d5d3676c126e06e3c75c16def00441 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Thu, 14 May 2009 13:31:48 +1000 Subject: perf_counter: powerpc: supply more precise information on counter overflow events This uses values from the MMCRA, SIAR and SDAR registers on powerpc to supply more precise information for overflow events, including a data address when PERF_RECORD_ADDR is specified. Since POWER6 uses different bit positions in MMCRA from earlier processors, this converts the struct power_pmu limited_pmc5_6 field, which only had 0/1 values, into a flags field and defines bit values for its previous use (PPMU_LIMITED_PMC5_6) and a new flag (PPMU_ALT_SIPR) to indicate that the processor uses the POWER6 bit positions rather than the earlier positions. It also adds definitions in reg.h for the new and old positions of the bit that indicates that the SIAR and SDAR values come from the same instruction. For the data address, the SDAR value is supplied if we are not doing instruction sampling. In that case there is no guarantee that the address given in the PERF_RECORD_ADDR subrecord will correspond to the instruction whose address is given in the PERF_RECORD_IP subrecord. If instruction sampling is enabled (e.g. because this counter is counting a marked instruction event), then we only supply the SDAR value for the PERF_RECORD_ADDR subrecord if it corresponds to the instruction whose address is in the PERF_RECORD_IP subrecord. Otherwise we supply 0. [ Impact: support more PMU hardware features on PowerPC ] Signed-off-by: Paul Mackerras Acked-by: Peter Zijlstra Cc: Corey Ashford Cc: Arnaldo Carvalho de Melo LKML-Reference: <18955.37028.48861.555309@drongo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- arch/powerpc/include/asm/perf_counter.h | 14 +++++++++++++- arch/powerpc/include/asm/reg.h | 2 ++ 2 files changed, 15 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_counter.h index ceea76a48e3..1c60f0ca792 100644 --- a/arch/powerpc/include/asm/perf_counter.h +++ b/arch/powerpc/include/asm/perf_counter.h @@ -30,13 +30,19 @@ struct power_pmu { u64 alt[]); void (*disable_pmc)(unsigned int pmc, u64 mmcr[]); int (*limited_pmc_event)(u64 event); - int limited_pmc5_6; /* PMC5 and PMC6 have limited function */ + u32 flags; int n_generic; int *generic_events; }; extern struct power_pmu *ppmu; +/* + * Values for power_pmu.flags + */ +#define PPMU_LIMITED_PMC5_6 1 /* PMC5/6 have limited function */ +#define PPMU_ALT_SIPR 2 /* uses alternate posn for SIPR/HV */ + /* * Values for flags to get_alternatives() */ @@ -44,6 +50,12 @@ extern struct power_pmu *ppmu; #define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */ #define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */ +struct pt_regs; +extern unsigned long perf_misc_flags(struct pt_regs *regs); +#define perf_misc_flags(regs) perf_misc_flags(regs) + +extern unsigned long perf_instruction_pointer(struct pt_regs *regs); + /* * The power_pmu.get_constraint function returns a 64-bit value and * a 64-bit mask that express the constraints between this event and diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index e8018d540e8..fb359b0a693 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -492,11 +492,13 @@ #define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */ #define SPRN_MMCR1 798 #define SPRN_MMCRA 0x312 +#define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */ #define MMCRA_SIHV 0x10000000UL /* state of MSR HV when SIAR set */ #define MMCRA_SIPR 0x08000000UL /* state of MSR PR when SIAR set */ #define MMCRA_SLOT 0x07000000UL /* SLOT bits (37-39) */ #define MMCRA_SLOT_SHIFT 24 #define MMCRA_SAMPLE_ENABLE 0x00000001UL /* enable sampling */ +#define POWER6_MMCRA_SDSYNC 0x0000080000000000ULL /* SDAR/SIAR synced */ #define POWER6_MMCRA_SIHV 0x0000040000000000ULL #define POWER6_MMCRA_SIPR 0x0000020000000000ULL #define POWER6_MMCRA_THRM 0x00000020UL -- cgit v1.2.3 From 89d93347d1f66832c43e6b25a669fddff89929b5 Mon Sep 17 00:00:00 2001 From: Becky Bruce Date: Mon, 20 Apr 2009 11:26:48 -0500 Subject: powerpc: make dma_window_* in pci_controller struct avail on 32b Also, convert them to resource_size_t (which is unsigned long on 64-bit, so it's not a change there). We will be using these on fsl 32b to indicate the start and size address of memory that the pci controller can actually reach - this is needed to determine if an address requires bounce buffering. For now, initialize them to a standard value; in the near future, the value will be calculated based on how the inbound windows are programmed. Signed-off-by: Becky Bruce Acked-by: Ben Herrenschmidt Signed-off-by: Kumar Gala --- arch/powerpc/include/asm/pci-bridge.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h index 84007afabdb..9861258f6a4 100644 --- a/arch/powerpc/include/asm/pci-bridge.h +++ b/arch/powerpc/include/asm/pci-bridge.h @@ -140,10 +140,12 @@ struct pci_controller { struct resource io_resource; struct resource mem_resources[3]; int global_number; /* PCI domain number */ + + resource_size_t dma_window_base_cur; + resource_size_t dma_window_size; + #ifdef CONFIG_PPC64 unsigned long buid; - unsigned long dma_window_base_cur; - unsigned long dma_window_size; void *private_data; #endif /* CONFIG_PPC64 */ -- cgit v1.2.3 From ca851c783ccf9784fb6ffebcb25e2b6fd0b8dccc Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Wed, 22 Apr 2009 13:44:24 -0500 Subject: powerpc/cpm: Remove some cruft code and defines Kill of some old defines and macros that we no longer use like CPM_MAP_ADDR and CPM_IRQ_OFFSET. Signed-off-by: Kumar Gala --- arch/powerpc/include/asm/cpm2.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/cpm2.h b/arch/powerpc/include/asm/cpm2.h index 0f5e8ff59a8..990ff191da8 100644 --- a/arch/powerpc/include/asm/cpm2.h +++ b/arch/powerpc/include/asm/cpm2.h @@ -14,10 +14,6 @@ #include #include -#ifdef CONFIG_PPC_85xx -#define CPM_MAP_ADDR (get_immrbase() + 0x80000) -#endif - /* CPM Command register. */ #define CPM_CR_RST ((uint)0x80000000) -- cgit v1.2.3 From 1e76dff22ce45bc8b869a9956b24a77877915364 Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Wed, 22 Apr 2009 12:32:09 -0500 Subject: powerpc/86xx: clean up smp init code Removed the need for asm/mpc86xx.h as it was only used in mpc86xx_smp.c and just moved the defines it cared about into there. Also fixed up the ioremap to only map the one 4k page we need access to and to iounmap when we are done. Signed-off-by: Kumar Gala --- arch/powerpc/include/asm/mpc86xx.h | 33 --------------------------------- 1 file changed, 33 deletions(-) delete mode 100644 arch/powerpc/include/asm/mpc86xx.h (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/mpc86xx.h b/arch/powerpc/include/asm/mpc86xx.h deleted file mode 100644 index 15f650f987e..00000000000 --- a/arch/powerpc/include/asm/mpc86xx.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * MPC86xx definitions - * - * Author: Jeff Brown - * - * Copyright 2004 Freescale Semiconductor, Inc - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#ifdef __KERNEL__ -#ifndef __ASM_POWERPC_MPC86xx_H__ -#define __ASM_POWERPC_MPC86xx_H__ - -#include - -#ifdef CONFIG_PPC_86xx - -#define CPU0_BOOT_RELEASE 0x01000000 -#define CPU1_BOOT_RELEASE 0x02000000 -#define CPU_ALL_RELEASED (CPU0_BOOT_RELEASE | CPU1_BOOT_RELEASE) -#define MCM_PORT_CONFIG_OFFSET 0x1010 - -/* Offset from CCSRBAR */ -#define MPC86xx_MCM_OFFSET (0x00000) -#define MPC86xx_MCM_SIZE (0x02000) - -#endif /* CONFIG_PPC_86xx */ -#endif /* __ASM_POWERPC_MPC86xx_H__ */ -#endif /* __KERNEL__ */ -- cgit v1.2.3 From 06c4435021f4856261edd01e2691071edeb8fa51 Mon Sep 17 00:00:00 2001 From: Haiying Wang Date: Fri, 1 May 2009 15:40:47 -0400 Subject: powerpc/qe: update risc allocation for QE Change the RISC allocation to macros instead of enum, add function to read the number of risc engines from the new property "fsl,qe-num-riscs" under the qe node in dts. Add new property "fsl,qe-num-riscs" description in qe.txt Signed-off-by: Haiying Wang Acked-by: Timur Tabi Signed-off-by: Kumar Gala --- arch/powerpc/include/asm/qe.h | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/qe.h b/arch/powerpc/include/asm/qe.h index 2701753d993..60314ef58d1 100644 --- a/arch/powerpc/include/asm/qe.h +++ b/arch/powerpc/include/asm/qe.h @@ -152,6 +152,8 @@ unsigned int qe_get_brg_clk(void); int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier); int qe_get_snum(void); void qe_put_snum(u8 snum); +unsigned int qe_get_num_of_risc(void); + /* we actually use cpm_muram implementation, define this for convenience */ #define qe_muram_init cpm_muram_init #define qe_muram_alloc cpm_muram_alloc @@ -231,12 +233,16 @@ struct qe_bd { #define QE_ALIGNMENT_OF_PRAM 64 /* RISC allocation */ -enum qe_risc_allocation { - QE_RISC_ALLOCATION_RISC1 = 1, /* RISC 1 */ - QE_RISC_ALLOCATION_RISC2 = 2, /* RISC 2 */ - QE_RISC_ALLOCATION_RISC1_AND_RISC2 = 3 /* Dynamically choose - RISC 1 or RISC 2 */ -}; +#define QE_RISC_ALLOCATION_RISC1 0x1 /* RISC 1 */ +#define QE_RISC_ALLOCATION_RISC2 0x2 /* RISC 2 */ +#define QE_RISC_ALLOCATION_RISC3 0x4 /* RISC 3 */ +#define QE_RISC_ALLOCATION_RISC4 0x8 /* RISC 4 */ +#define QE_RISC_ALLOCATION_RISC1_AND_RISC2 (QE_RISC_ALLOCATION_RISC1 | \ + QE_RISC_ALLOCATION_RISC2) +#define QE_RISC_ALLOCATION_FOUR_RISCS (QE_RISC_ALLOCATION_RISC1 | \ + QE_RISC_ALLOCATION_RISC2 | \ + QE_RISC_ALLOCATION_RISC3 | \ + QE_RISC_ALLOCATION_RISC4) /* QE extended filtering Table Lookup Key Size */ enum qe_fltr_tbl_lookup_key_size { -- cgit v1.2.3 From 98ca77af23da6682bb3e34961a3f32e2c064a4ce Mon Sep 17 00:00:00 2001 From: Haiying Wang Date: Fri, 1 May 2009 15:40:48 -0400 Subject: powerpc/qe: update QE Serial Number The latest QE chip may have more Serial Number(SNUM)s of thread to use. We will get the number of SNUMs from device tree by reading the new property "fsl,qe-num-snums", and set 28 as the default number of SNUMs so that it is compatible with the old QE chips' device trees which don't have this new property. The macro QE_NUM_OF_SNUM is defined as the maximum number in QE snum table which is 256. Also we update the snum_init[] array with 18 more new SNUMs which are confirmed to be useful on new chip. Signed-off-by: Haiying Wang Acked-by: Timur Tabi Signed-off-by: Kumar Gala --- arch/powerpc/include/asm/qe.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/qe.h b/arch/powerpc/include/asm/qe.h index 60314ef58d1..e0faf332c9c 100644 --- a/arch/powerpc/include/asm/qe.h +++ b/arch/powerpc/include/asm/qe.h @@ -22,7 +22,7 @@ #include #include -#define QE_NUM_OF_SNUM 28 +#define QE_NUM_OF_SNUM 256 /* There are 256 serial number in QE */ #define QE_NUM_OF_BRGS 16 #define QE_NUM_OF_PORTS 1024 @@ -153,6 +153,7 @@ int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier); int qe_get_snum(void); void qe_put_snum(u8 snum); unsigned int qe_get_num_of_risc(void); +unsigned int qe_get_num_of_snums(void); /* we actually use cpm_muram implementation, define this for convenience */ #define qe_muram_init cpm_muram_init -- cgit v1.2.3 From 14f966e79445015cd89d0fa0ceb6b33702e951b6 Mon Sep 17 00:00:00 2001 From: Robert Jennings Date: Wed, 15 Apr 2009 05:55:32 +0000 Subject: powerpc/pseries: CMO unused page hinting Adds support for the "unused" page hint which can be used in shared memory partitions to flag pages not in use, which will then be stolen before active pages by the hypervisor when memory needs to be moved to LPARs in need of additional memory. Failure to mark pages as 'unused' makes the LPAR slower to give up unused memory to other partitions. This adds the kernel parameter 'cmo_free_hint' to disable this functionality. Signed-off-by: Brian King Signed-off-by: Robert Jennings Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/page.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 32cbf16f10e..4940662ee87 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -231,6 +231,11 @@ extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *p); extern int page_is_ram(unsigned long pfn); +#ifdef CONFIG_PPC_SMLPAR +void arch_free_page(struct page *page, int order); +#define HAVE_ARCH_FREE_PAGE +#endif + struct vm_area_struct; typedef struct page *pgtable_t; -- cgit v1.2.3 From 9b647a30cbc228259555d6b0b6bc7d9ec798f907 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 22 Apr 2009 15:31:38 +0000 Subject: powerpc/irq: Move get_irq() comment into header The guts of do_IRQ() isn't really the right place to be documenting the ppc_md.get_irq() interface. So move the comment into machdep.h Signed-off-by: Michael Ellerman Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/machdep.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 0efdb1dfdc5..11d1fc3a896 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -110,6 +110,10 @@ struct machdep_calls { void (*show_percpuinfo)(struct seq_file *m, int i); void (*init_IRQ)(void); + + /* Return an irq, or NO_IRQ to indicate there are none pending. + * If for some reason there is no irq, but the interrupt + * shouldn't be counted as spurious, return NO_IRQ_IGNORE. */ unsigned int (*get_irq)(void); #ifdef CONFIG_KEXEC void (*kexec_cpu_down)(int crash_shutdown, int secondary); -- cgit v1.2.3 From d89ebca2248ab03a5928e7dd30b6c304283fc207 Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Tue, 28 Apr 2009 03:32:36 +0000 Subject: powerpc: Fix up elf_read_implies_exec() usage We believe if a toolchain supports PT_GNU_STACK that it sets the proper PHDR permissions. Therefor elf_read_implies_exec() should only be true if we don't see PT_GNU_STACK set. Signed-off-by: Kumar Gala Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/elf.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h index d6b4a12cdef..014a624f4c8 100644 --- a/arch/powerpc/include/asm/elf.h +++ b/arch/powerpc/include/asm/elf.h @@ -256,11 +256,11 @@ do { \ * even if we have an executable stack. */ # define elf_read_implies_exec(ex, exec_stk) (test_thread_flag(TIF_32BIT) ? \ - (exec_stk != EXSTACK_DISABLE_X) : 0) + (exec_stk == EXSTACK_DEFAULT) : 0) #else # define SET_PERSONALITY(ex) \ set_personality(PER_LINUX | (current->personality & (~PER_MASK))) -# define elf_read_implies_exec(ex, exec_stk) (exec_stk != EXSTACK_DISABLE_X) +# define elf_read_implies_exec(ex, exec_stk) (exec_stk == EXSTACK_DEFAULT) #endif /* __powerpc64__ */ extern int dcache_bsize; -- cgit v1.2.3 From da6b43c833f885d8604aee7f9711bb457a40d863 Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Wed, 29 Apr 2009 20:58:01 +0000 Subject: powerpc: Cleanup macros in ppc-opcode.h Make macros more braces happy. Signed-off-by: Michael Neuling Acked-by: Kumar Gala Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/ppc-opcode.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 640ccbbc097..1c819501de2 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -47,10 +47,10 @@ #define PPC_INST_WAIT 0x7c00007c /* macros to insert fields into opcodes */ -#define __PPC_RA(a) ((a & 0x1f) << 16) -#define __PPC_RB(b) ((b & 0x1f) << 11) -#define __PPC_T_TLB(t) ((t & 0x3) << 21) -#define __PPC_WC(w) ((w & 0x3) << 21) +#define __PPC_RA(a) (((a) & 0x1f) << 16) +#define __PPC_RB(b) (((b) & 0x1f) << 11) +#define __PPC_T_TLB(t) (((t) & 0x3) << 21) +#define __PPC_WC(w) (((w) & 0x3) << 21) /* Deal with instructions that older assemblers aren't aware of */ #define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \ -- cgit v1.2.3 From dfb432cb960bfcbdf668d0a228bc909897156b31 Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Wed, 29 Apr 2009 20:58:01 +0000 Subject: powerpc: Move VSX load/stores into ppc-opcode.h Cleans up the VSX load/store instructions by moving them into ppc-opcode.h. Signed-off-by: Michael Neuling Acked-by: Kumar Gala Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/ppc-opcode.h | 13 +++++++++++++ arch/powerpc/include/asm/ppc_asm.h | 10 ---------- 2 files changed, 13 insertions(+), 10 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 1c819501de2..39bcc9f9ff6 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -25,6 +25,7 @@ #define PPC_INST_LSWI 0x7c0004aa #define PPC_INST_LSWX 0x7c00042a #define PPC_INST_LWSYNC 0x7c2004ac +#define PPC_INST_LXVD2X 0x7c000698 #define PPC_INST_MCRXR 0x7c000400 #define PPC_INST_MCRXR_MASK 0xfc0007fe #define PPC_INST_MFSPR_PVR 0x7c1f42a6 @@ -43,12 +44,14 @@ #define PPC_INST_STSWI 0x7c0005aa #define PPC_INST_STSWX 0x7c00052a +#define PPC_INST_STXVD2X 0x7c000798 #define PPC_INST_TLBILX 0x7c000024 #define PPC_INST_WAIT 0x7c00007c /* macros to insert fields into opcodes */ #define __PPC_RA(a) (((a) & 0x1f) << 16) #define __PPC_RB(b) (((b) & 0x1f) << 11) +#define __PPC_XS(s) ((((s) & 0x1f) << 21) | (((s) & 0x20) >> 5)) #define __PPC_T_TLB(t) (((t) & 0x3) << 21) #define __PPC_WC(w) (((w) & 0x3) << 21) @@ -70,4 +73,14 @@ #define PPC_WAIT(w) stringify_in_c(.long PPC_INST_WAIT | \ __PPC_WC(w)) +/* + * Define what the VSX XX1 form instructions will look like, then add + * the 128 bit load store instructions based on that. + */ +#define VSX_XX1(s, a, b) (__PPC_XS(s) | __PPC_RA(a) | __PPC_RB(b)) +#define STXVD2X(s, a, b) stringify_in_c(.long PPC_INST_STXVD2X | \ + VSX_XX1((s), (a), (b))) +#define LXVD2X(s, a, b) stringify_in_c(.long PPC_INST_LXVD2X | \ + VSX_XX1((s), (a), (b))) + #endif /* _ASM_POWERPC_PPC_OPCODE_H */ diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 384d90c9c27..f9729529c20 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -76,16 +76,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \ REST_10GPRS(22, base) #endif -/* - * Define what the VSX XX1 form instructions will look like, then add - * the 128 bit load store instructions based on that. - */ -#define VSX_XX1(xs, ra, rb) (((xs) & 0x1f) << 21 | ((ra) << 16) | \ - ((rb) << 11) | (((xs) >> 5))) - -#define STXVD2X(xs, ra, rb) .long (0x7c000798 | VSX_XX1((xs), (ra), (rb))) -#define LXVD2X(xs, ra, rb) .long (0x7c000698 | VSX_XX1((xs), (ra), (rb))) - #define SAVE_2GPRS(n, base) SAVE_GPR(n, base); SAVE_GPR(n+1, base) #define SAVE_4GPRS(n, base) SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base) #define SAVE_8GPRS(n, base) SAVE_4GPRS(n, base); SAVE_4GPRS(n+4, base) -- cgit v1.2.3 From af20aeb1a3292ae7ecfc492a4dc059e35465e016 Mon Sep 17 00:00:00 2001 From: Milton Miller Date: Wed, 29 Apr 2009 20:58:01 +0000 Subject: powerpc: Enable MMU feature sections for inline asm powerpc: Enable MMU feature sections for inline asm This adds the ability to do MMU feature sections for inline asm. Signed-off-by: Milton Miller Signed-off-by: Michael Neuling Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/feature-fixups.h | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h index e4094a5cb05..cbd4dfa4bce 100644 --- a/arch/powerpc/include/asm/feature-fixups.h +++ b/arch/powerpc/include/asm/feature-fixups.h @@ -8,8 +8,6 @@ * 2 of the License, or (at your option) any later version. */ -#ifdef __ASSEMBLY__ - /* * Feature section common macros * @@ -23,10 +21,12 @@ /* 64 bits kernel, 32 bits code (ie. vdso32) */ #define FTR_ENTRY_LONG .llong #define FTR_ENTRY_OFFSET .long 0xffffffff; .long +#elif defined(CONFIG_PPC64) +#define FTR_ENTRY_LONG .llong +#define FTR_ENTRY_OFFSET .llong #else -/* 64 bit kernel 64 bit code, or 32 bit kernel 32 bit code */ -#define FTR_ENTRY_LONG PPC_LONG -#define FTR_ENTRY_OFFSET PPC_LONG +#define FTR_ENTRY_LONG .long +#define FTR_ENTRY_OFFSET .long #endif #define START_FTR_SECTION(label) label##1: @@ -141,6 +141,21 @@ label##5: \ #define ALT_FW_FTR_SECTION_END_IFCLR(msk) \ ALT_FW_FTR_SECTION_END_NESTED_IFCLR(msk, 97) +#ifndef __ASSEMBLY__ + +#define ASM_MMU_FTR_IF(section_if, section_else, msk, val) \ + stringify_in_c(BEGIN_MMU_FTR_SECTION) \ + section_if "; " \ + stringify_in_c(MMU_FTR_SECTION_ELSE) \ + section_else "; " \ + stringify_in_c(ALT_MMU_FTR_SECTION_END((msk), (val))) + +#define ASM_MMU_FTR_IFSET(section_if, section_else, msk) \ + ASM_MMU_FTR_IF(section_if, section_else, (msk), (msk)) + +#define ASM_MMU_FTR_IFCLR(section_if, section_else, msk) \ + ASM_MMU_FTR_IF(section_if, section_else, (msk), 0) + #endif /* __ASSEMBLY__ */ /* LWSYNC feature sections */ -- cgit v1.2.3 From 60dbf4385130136847ea73657da329f8e7dbe16e Mon Sep 17 00:00:00 2001 From: Milton Miller Date: Wed, 29 Apr 2009 20:58:01 +0000 Subject: powerpc: Add 2.06 tlbie mnemonics This adds the PowerPC 2.06 tlbie mnemonics and keeps backwards compatibilty for CPUs before 2.06. Only useful for bare metal systems. Signed-off-by: Milton Miller Signed-off-by: Michael Neuling Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/mmu.h | 5 +++++ arch/powerpc/include/asm/ppc-opcode.h | 4 ++++ 2 files changed, 9 insertions(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index cbf15438709..325b7208a14 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -52,6 +52,11 @@ */ #define MMU_FTR_NEED_DTLB_SW_LRU ASM_CONST(0x00200000) +/* This indicates that the processor uses the ISA 2.06 server tlbie + * mnemonics + */ +#define MMU_FTR_TLBIE_206 ASM_CONST(0x00400000) + #ifndef __ASSEMBLY__ #include diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 39bcc9f9ff6..b74f16d45cb 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -45,12 +45,14 @@ #define PPC_INST_STSWI 0x7c0005aa #define PPC_INST_STSWX 0x7c00052a #define PPC_INST_STXVD2X 0x7c000798 +#define PPC_INST_TLBIE 0x7c000264 #define PPC_INST_TLBILX 0x7c000024 #define PPC_INST_WAIT 0x7c00007c /* macros to insert fields into opcodes */ #define __PPC_RA(a) (((a) & 0x1f) << 16) #define __PPC_RB(b) (((b) & 0x1f) << 11) +#define __PPC_RS(s) (((s) & 0x1f) << 21) #define __PPC_XS(s) ((((s) & 0x1f) << 21) | (((s) & 0x20) >> 5)) #define __PPC_T_TLB(t) (((t) & 0x3) << 21) #define __PPC_WC(w) (((w) & 0x3) << 21) @@ -72,6 +74,8 @@ #define PPC_TLBILX_VA(a, b) PPC_TLBILX(3, a, b) #define PPC_WAIT(w) stringify_in_c(.long PPC_INST_WAIT | \ __PPC_WC(w)) +#define PPC_TLBIE(lp,a) stringify_in_c(.long PPC_INST_TLBIE | \ + __PPC_RB(a) | __PPC_RS(lp)) /* * Define what the VSX XX1 form instructions will look like, then add -- cgit v1.2.3 From 2eb4afb69ff3cdd357d5e68ed82cd131916bd8a7 Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Thu, 30 Apr 2009 09:26:21 +0000 Subject: powerpc/pci: Move pseries code into pseries platform specific area There doesn't appear to be any specific reason that we need to setup the pseries specific notifier in generic arch pci code. Move it into pseries land. Signed-off-by: Kumar Gala Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/pci-bridge.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h index 9861258f6a4..0dbf66bdb01 100644 --- a/arch/powerpc/include/asm/pci-bridge.h +++ b/arch/powerpc/include/asm/pci-bridge.h @@ -223,6 +223,7 @@ struct pci_dn { #define PCI_DN(dn) ((struct pci_dn *) (dn)->data) extern struct device_node *fetch_dev_dn(struct pci_dev *dev); +extern void * update_dn_pci_info(struct device_node *dn, void *data); /* Get a device_node from a pci_dev. This code must be fast except * in the case where the sysdata is incorrect and needs to be fixed -- cgit v1.2.3 From cf6692c07ab4cba8a28780b67706ac804319086b Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Tue, 5 May 2009 02:20:08 +0000 Subject: powerpc/pci: Cleanup some minor cruft * Removed building setup-irq on ppc32, we don't use it anymore * Remove duplicate prototype for setup_grackle() code that needs it gets it from * Removed gratuitous pci_io_size type differences between ppc32/ppc64 Signed-off-by: Kumar Gala Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/pci-bridge.h | 6 ------ 1 file changed, 6 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h index 0dbf66bdb01..4c61fa0b8d7 100644 --- a/arch/powerpc/include/asm/pci-bridge.h +++ b/arch/powerpc/include/asm/pci-bridge.h @@ -86,17 +86,12 @@ struct pci_controller { void *io_base_alloc; #endif resource_size_t io_base_phys; -#ifndef CONFIG_PPC64 resource_size_t pci_io_size; -#endif /* Some machines (PReP) have a non 1:1 mapping of * the PCI memory space in the CPU bus space */ resource_size_t pci_mem_offset; -#ifdef CONFIG_PPC64 - unsigned long pci_io_size; -#endif /* Some machines have a special region to forward the ISA * "memory" cycles such as VGA memory regions. Left to 0 @@ -187,7 +182,6 @@ extern int early_find_capability(struct pci_controller *hose, int bus, extern void setup_indirect_pci(struct pci_controller* hose, resource_size_t cfg_addr, resource_size_t cfg_data, u32 flags); -extern void setup_grackle(struct pci_controller *hose); #else /* CONFIG_PPC64 */ /* -- cgit v1.2.3 From 2138422bbab91c3e924c9836e394f4925b456b79 Mon Sep 17 00:00:00 2001 From: Becky Bruce Date: Thu, 14 May 2009 12:42:27 +0000 Subject: powerpc: Use sg->dma_length in sg_dma_len() macro on 32-bit Currently, the 32-bit code uses sg->length instead of sg->dma_lentgh to report sg_dma_len. However, since the default dma code for 32-bit (the dma_direct case) sets dma_length and length to the same thing, we should be able to use dma_length there as well. This gets rid of some 32-vs-64-bit ifdefs, and is needed by the swiotlb code which actually distinguishes between dma_length and length. Signed-off-by: Becky Bruce Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/scatterlist.h | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/scatterlist.h b/arch/powerpc/include/asm/scatterlist.h index fcf7d55afe4..912bf597870 100644 --- a/arch/powerpc/include/asm/scatterlist.h +++ b/arch/powerpc/include/asm/scatterlist.h @@ -21,7 +21,7 @@ struct scatterlist { unsigned int offset; unsigned int length; - /* For TCE support */ + /* For TCE or SWIOTLB support */ dma_addr_t dma_address; u32 dma_length; }; @@ -34,11 +34,7 @@ struct scatterlist { * is 0. */ #define sg_dma_address(sg) ((sg)->dma_address) -#ifdef __powerpc64__ #define sg_dma_len(sg) ((sg)->dma_length) -#else -#define sg_dma_len(sg) ((sg)->length) -#endif #ifdef __powerpc64__ #define ISA_DMA_THRESHOLD (~0UL) -- cgit v1.2.3 From 80947e7c99c29ce3a78bdc1933b310468455a82f Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 18 May 2009 02:10:05 +0000 Subject: powerpc: Keep track of emulated instructions If CONFIG_PPC_EMULATED_STATS is enabled, make available counters for the various classes of emulated instructions under /sys/kernel/debug/powerpc/emulated_instructions/ (assumed debugfs is mounted on /sys/kernel/debug). Optionally (controlled by /sys/kernel/debug/powerpc/emulated_instructions/do_warn), rate-limited warnings can be printed to the console when instructions are emulated. Signed-off-by: Geert Uytterhoeven Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/emulated_ops.h | 73 +++++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) create mode 100644 arch/powerpc/include/asm/emulated_ops.h (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/emulated_ops.h b/arch/powerpc/include/asm/emulated_ops.h new file mode 100644 index 00000000000..9154e852673 --- /dev/null +++ b/arch/powerpc/include/asm/emulated_ops.h @@ -0,0 +1,73 @@ +/* + * Copyright 2007 Sony Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. + * If not, see . + */ + +#ifndef _ASM_POWERPC_EMULATED_OPS_H +#define _ASM_POWERPC_EMULATED_OPS_H + +#include + + +#ifdef CONFIG_PPC_EMULATED_STATS + +struct ppc_emulated_entry { + const char *name; + atomic_t val; +}; + +extern struct ppc_emulated { +#ifdef CONFIG_ALTIVEC + struct ppc_emulated_entry altivec; +#endif + struct ppc_emulated_entry dcba; + struct ppc_emulated_entry dcbz; + struct ppc_emulated_entry fp_pair; + struct ppc_emulated_entry isel; + struct ppc_emulated_entry mcrxr; + struct ppc_emulated_entry mfpvr; + struct ppc_emulated_entry multiple; + struct ppc_emulated_entry popcntb; + struct ppc_emulated_entry spe; + struct ppc_emulated_entry string; + struct ppc_emulated_entry unaligned; +#ifdef CONFIG_MATH_EMULATION + struct ppc_emulated_entry math; +#elif defined(CONFIG_8XX_MINIMAL_FPEMU) + struct ppc_emulated_entry 8xx; +#endif +#ifdef CONFIG_VSX + struct ppc_emulated_entry vsx; +#endif +} ppc_emulated; + +extern u32 ppc_warn_emulated; + +extern void ppc_warn_emulated_print(const char *type); + +#define PPC_WARN_EMULATED(type) \ + do { \ + atomic_inc(&ppc_emulated.type.val); \ + if (ppc_warn_emulated) \ + ppc_warn_emulated_print(ppc_emulated.type.name); \ + } while (0) + +#else /* !CONFIG_PPC_EMULATED_STATS */ + +#define PPC_WARN_EMULATED(type) do { } while (0) + +#endif /* !CONFIG_PPC_EMULATED_STATS */ + +#endif /* _ASM_POWERPC_EMULATED_OPS_H */ -- cgit v1.2.3 From 0bc53a67ac831ec84f730a657dbcadd80a589ef5 Mon Sep 17 00:00:00 2001 From: Jon Smirl Date: Sat, 23 May 2009 19:13:03 -0400 Subject: ASoC: Add a few more mpc5200 PSC defines Add a few more mpc5200 PSC defines. More bit fields defines for mpc5200 PSC registers. Signed-off-by: Jon Smirl Acked-by: Grant Likely Signed-off-by: Mark Brown --- arch/powerpc/include/asm/mpc52xx_psc.h | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/mpc52xx_psc.h b/arch/powerpc/include/asm/mpc52xx_psc.h index a218da6bec7..fb841205745 100644 --- a/arch/powerpc/include/asm/mpc52xx_psc.h +++ b/arch/powerpc/include/asm/mpc52xx_psc.h @@ -28,6 +28,10 @@ #define MPC52xx_PSC_MAXNUM 6 /* Programmable Serial Controller (PSC) status register bits */ +#define MPC52xx_PSC_SR_UNEX_RX 0x0001 +#define MPC52xx_PSC_SR_DATA_VAL 0x0002 +#define MPC52xx_PSC_SR_DATA_OVR 0x0004 +#define MPC52xx_PSC_SR_CMDSEND 0x0008 #define MPC52xx_PSC_SR_CDE 0x0080 #define MPC52xx_PSC_SR_RXRDY 0x0100 #define MPC52xx_PSC_SR_RXFULL 0x0200 @@ -61,6 +65,12 @@ #define MPC52xx_PSC_RXTX_FIFO_EMPTY 0x0001 /* PSC interrupt status/mask bits */ +#define MPC52xx_PSC_IMR_UNEX_RX_SLOT 0x0001 +#define MPC52xx_PSC_IMR_DATA_VALID 0x0002 +#define MPC52xx_PSC_IMR_DATA_OVR 0x0004 +#define MPC52xx_PSC_IMR_CMD_SEND 0x0008 +#define MPC52xx_PSC_IMR_ERROR 0x0040 +#define MPC52xx_PSC_IMR_DEOF 0x0080 #define MPC52xx_PSC_IMR_TXRDY 0x0100 #define MPC52xx_PSC_IMR_RXRDY 0x0200 #define MPC52xx_PSC_IMR_DB 0x0400 @@ -117,6 +127,7 @@ #define MPC52xx_PSC_SICR_SIM_FIR (0x6 << 24) #define MPC52xx_PSC_SICR_SIM_CODEC_24 (0x7 << 24) #define MPC52xx_PSC_SICR_SIM_CODEC_32 (0xf << 24) +#define MPC52xx_PSC_SICR_AWR (1 << 30) #define MPC52xx_PSC_SICR_GENCLK (1 << 23) #define MPC52xx_PSC_SICR_I2S (1 << 22) #define MPC52xx_PSC_SICR_CLKPOL (1 << 21) -- cgit v1.2.3 From 047584ce94108012288554a5f84585d792cc7f8f Mon Sep 17 00:00:00 2001 From: Haiying Wang Date: Tue, 2 Jun 2009 04:04:15 +0000 Subject: net/ucc_geth: Add SGMII support for UEC GETH driver Signed-off-by: Haiying Wang Signed-off-by: David S. Miller --- arch/powerpc/include/asm/qe.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/qe.h b/arch/powerpc/include/asm/qe.h index 2701753d993..4459d20dc76 100644 --- a/arch/powerpc/include/asm/qe.h +++ b/arch/powerpc/include/asm/qe.h @@ -668,6 +668,8 @@ struct ucc_slow_pram { #define UCC_GETH_UPSMR_RMM 0x00001000 #define UCC_GETH_UPSMR_CAM 0x00000400 #define UCC_GETH_UPSMR_BRO 0x00000200 +#define UCC_GETH_UPSMR_SMM 0x00000080 +#define UCC_GETH_UPSMR_SGMM 0x00000020 /* UCC Transmit On Demand Register (UTODR) */ #define UCC_SLOW_TOD 0x8000 -- cgit v1.2.3 From 64f16502475ddf663169369fffff6da9b10ea9fb Mon Sep 17 00:00:00 2001 From: Roderick Colenbrander Date: Sat, 6 Jun 2009 10:14:22 -0600 Subject: powerpc/virtex: Add support for Xilinx PCI host bridge This patch adds support for the Xilinx plbv46-pci-1.03.a PCI host bridge IPcore. Signed-off-by: Roderick Colenbrander Signed-off-by: Grant Likely --- arch/powerpc/include/asm/xilinx_pci.h | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 arch/powerpc/include/asm/xilinx_pci.h (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/xilinx_pci.h b/arch/powerpc/include/asm/xilinx_pci.h new file mode 100644 index 00000000000..7a8275caf6a --- /dev/null +++ b/arch/powerpc/include/asm/xilinx_pci.h @@ -0,0 +1,21 @@ +/* + * Xilinx pci external definitions + * + * Copyright 2009 Roderick Colenbrander + * Copyright 2009 Secret Lab Technologies Ltd. + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#ifndef INCLUDE_XILINX_PCI +#define INCLUDE_XILINX_PCI + +#ifdef CONFIG_XILINX_PCI +extern void __init xilinx_pci_init(void); +#else +static inline void __init xilinx_pci_init(void) { return; } +#endif + +#endif /* INCLUDE_XILINX_PCI */ -- cgit v1.2.3 From ec097c84dff17511f2693e6ef6c3064dfbf0a3af Mon Sep 17 00:00:00 2001 From: Roland McGrath Date: Thu, 28 May 2009 21:26:38 +0000 Subject: powerpc: Add PTRACE_SINGLEBLOCK support Reworked by: Benjamin Herrenschmidt This adds block-step support on powerpc, including a PTRACE_SINGLEBLOCK request for ptrace. The BookE implementation is tweaked to fire a single step after a block step in order to mimmic the server behaviour. Signed-off-by: Roland McGrath Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/ptrace.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index c9c678fb253..8c341490cfc 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h @@ -135,7 +135,9 @@ do { \ * These are defined as per linux/ptrace.h, which see. */ #define arch_has_single_step() (1) +#define arch_has_block_step() (!cpu_has_feature(CPU_FTR_601)) extern void user_enable_single_step(struct task_struct *); +extern void user_enable_block_step(struct task_struct *); extern void user_disable_single_step(struct task_struct *); #endif /* __ASSEMBLY__ */ @@ -288,4 +290,6 @@ extern void user_disable_single_step(struct task_struct *); #define PPC_PTRACE_PEEKUSR_3264 0x91 #define PPC_PTRACE_POKEUSR_3264 0x90 +#define PTRACE_SINGLEBLOCK 0x100 /* resume execution until next branch */ + #endif /* _ASM_POWERPC_PTRACE_H */ -- cgit v1.2.3 From d3f6204a7d65030ba92bf43a278b3f3054353e0b Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 2 Jun 2009 21:16:38 +0000 Subject: powerpc: Set init_bootmem_done on NUMA platforms as well For some obscure reason, we only set init_bootmem_done after initializing bootmem when NUMA isn't enabled. We even document this next to the declaration of that global in system.h which of course I didn't read before I had to debug why some WIP code wasn't working properly... This patch changes it so that we always set it after bootmem is initialized which should have always been the case... go figure ! Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/system.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h index 2b2420a4988..bb8e006a47c 100644 --- a/arch/powerpc/include/asm/system.h +++ b/arch/powerpc/include/asm/system.h @@ -211,7 +211,7 @@ extern struct task_struct *_switch(struct thread_struct *prev, extern unsigned int rtas_data; extern int mem_init_done; /* set on boot once kmalloc can be called */ -extern int init_bootmem_done; /* set on !NUMA once bootmem is available */ +extern int init_bootmem_done; /* set once bootmem is available */ extern phys_addr_t memory_limit; extern unsigned long klimit; -- cgit v1.2.3 From 91c60b5b8209627590b31c07262e40c27d27d272 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 2 Jun 2009 21:17:41 +0000 Subject: powerpc: Separate PACA fields for server CPUs This patch has no effect other than re-ordering PACA fields on current server CPUs. It however is a pre-requisite for future support of BookE 64-bit processors. Various parts of the PACA struct are now moved under some ifdef's, either the new CONFIG_PPC_BOOK3S or CONFIG_PPC_STD_MMU_64, whatever seems more appropriate. Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/paca.h | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 082b3aedf14..0ea1985bdb8 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -43,6 +43,7 @@ struct task_struct; * processor. */ struct paca_struct { +#ifdef CONFIG_PPC_BOOK3S /* * Because hw_cpu_id, unlike other paca fields, is accessed * routinely from other CPUs (from the IRQ code), we stick to @@ -51,7 +52,7 @@ struct paca_struct { */ struct lppaca *lppaca_ptr; /* Pointer to LpPaca for PLIC */ - +#endif /* CONFIG_PPC_BOOK3S */ /* * MAGIC: the spinlock functions in arch/powerpc/lib/locks.c * load lock_token and paca_index with a single lwz @@ -64,13 +65,16 @@ struct paca_struct { u64 kernel_toc; /* Kernel TOC address */ u64 kernelbase; /* Base address of kernel */ u64 kernel_msr; /* MSR while running in kernel */ +#ifdef CONFIG_PPC_STD_MMU_64 u64 stab_real; /* Absolute address of segment table */ u64 stab_addr; /* Virtual address of segment table */ +#endif /* CONFIG_PPC_STD_MMU_64 */ void *emergency_sp; /* pointer to emergency stack */ u64 data_offset; /* per cpu data offset */ s16 hw_cpu_id; /* Physical processor number */ u8 cpu_start; /* At startup, processor spins until */ /* this becomes non-zero. */ +#ifdef CONFIG_PPC_STD_MMU_64 struct slb_shadow *slb_shadow_ptr; /* @@ -81,11 +85,13 @@ struct paca_struct { u64 exmc[10]; /* used for machine checks */ u64 exslb[10]; /* used for SLB/segment table misses * on the linear mapping */ - - mm_context_t context; + /* SLB related definitions */ u16 vmalloc_sllp; u16 slb_cache_ptr; u16 slb_cache[SLB_CACHE_ENTRIES]; +#endif /* CONFIG_PPC_STD_MMU_64 */ + + mm_context_t context; /* * then miscellaneous read-write fields -- cgit v1.2.3 From 944916858a430a0627e483657d4cfa2cd2dfb4f7 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 2 Jun 2009 21:17:45 +0000 Subject: powerpc: Shield code specific to 64-bit server processors This is a random collection of added ifdef's around portions of code that only mak sense on server processors. Using either CONFIG_PPC_STD_MMU_64 or CONFIG_PPC_BOOK3S as seems appropriate. This is meant to make the future merging of Book3E 64-bit support easier. Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/lppaca.h | 6 ++++++ arch/powerpc/include/asm/mmu.h | 4 ++-- arch/powerpc/include/asm/pgtable-ppc64.h | 5 +++++ 3 files changed, 13 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h index d2a65e8ca6a..f78f65c38f0 100644 --- a/arch/powerpc/include/asm/lppaca.h +++ b/arch/powerpc/include/asm/lppaca.h @@ -20,6 +20,11 @@ #define _ASM_POWERPC_LPPACA_H #ifdef __KERNEL__ +/* These definitions relate to hypervisors that only exist when using + * a server type processor + */ +#ifdef CONFIG_PPC_BOOK3S + //============================================================================= // // This control block contains the data that is shared between the @@ -158,5 +163,6 @@ struct slb_shadow { extern struct slb_shadow slb_shadow[]; +#endif /* CONFIG_PPC_BOOK3S */ #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_LPPACA_H */ diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 325b7208a14..fb57ded592f 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -74,10 +74,10 @@ extern void early_init_mmu_secondary(void); #endif /* !__ASSEMBLY__ */ -#ifdef CONFIG_PPC64 +#if defined(CONFIG_PPC_STD_MMU_64) /* 64-bit classic hash table MMU */ # include -#elif defined(CONFIG_PPC_STD_MMU) +#elif defined(CONFIG_PPC_STD_MMU_32) /* 32-bit classic hash table MMU */ # include #elif defined(CONFIG_40x) diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h index c40db05f21e..8cd083c6150 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/pgtable-ppc64.h @@ -31,9 +31,11 @@ #error TASK_SIZE_USER64 exceeds pagetable range #endif +#ifdef CONFIG_PPC_STD_MMU_64 #if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT)) #error TASK_SIZE_USER64 exceeds user VSID range #endif +#endif /* * Define the address range of the vmalloc VM area. @@ -199,8 +201,11 @@ static inline unsigned long pte_update(struct mm_struct *mm, if (!huge) assert_pte_locked(mm, addr); +#ifdef CONFIG_PPC_STD_MMU_64 if (old & _PAGE_HASHPTE) hpte_need_flush(mm, addr, ptep, old, huge); +#endif + return old; } -- cgit v1.2.3 From ec3cf2ece22a8ede7478bf38e2a818986322662b Mon Sep 17 00:00:00 2001 From: Becky Bruce Date: Thu, 14 May 2009 12:42:28 +0000 Subject: powerpc: Add support for swiotlb on 32-bit This patch includes the basic infrastructure to use swiotlb bounce buffering on 32-bit powerpc. It is not yet enabled on any platforms. Probably the most interesting bit is the addition of addr_needs_map to dma_ops - we need this as a dma_op because the decision of whether or not an addr can be mapped by a device is device-specific. Signed-off-by: Becky Bruce Acked-by: Kumar Gala Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/dma-mapping.h | 11 +++++++++++ arch/powerpc/include/asm/swiotlb.h | 27 +++++++++++++++++++++++++++ 2 files changed, 38 insertions(+) create mode 100644 arch/powerpc/include/asm/swiotlb.h (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index cb448d68452..3d9e887c3c0 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -15,9 +15,18 @@ #include #include #include +#include #define DMA_ERROR_CODE (~(dma_addr_t)0x0) +/* Some dma direct funcs must be visible for use in other dma_ops */ +extern void *dma_direct_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag); +extern void dma_direct_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle); + +extern unsigned long get_dma_direct_offset(struct device *dev); + #ifdef CONFIG_NOT_COHERENT_CACHE /* * DMA-consistent mapping functions for PowerPCs that don't support @@ -78,6 +87,8 @@ struct dma_mapping_ops { dma_addr_t dma_address, size_t size, enum dma_data_direction direction, struct dma_attrs *attrs); + int (*addr_needs_map)(struct device *dev, dma_addr_t addr, + size_t size); #ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS void (*sync_single_range_for_cpu)(struct device *hwdev, dma_addr_t dma_handle, unsigned long offset, diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h new file mode 100644 index 00000000000..30891d6e2bc --- /dev/null +++ b/arch/powerpc/include/asm/swiotlb.h @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2009 Becky Bruce, Freescale Semiconductor + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __ASM_SWIOTLB_H +#define __ASM_SWIOTLB_H + +#include + +extern struct dma_mapping_ops swiotlb_dma_ops; +extern struct dma_mapping_ops swiotlb_pci_dma_ops; + +int swiotlb_arch_address_needs_mapping(struct device *, dma_addr_t, + size_t size); + +static inline void dma_mark_clean(void *addr, size_t size) {} + +extern unsigned int ppc_swiotlb_enable; +int __init swiotlb_setup_bus_notifier(void); + +#endif /* __ASM_SWIOTLB_H */ -- cgit v1.2.3 From 106b506c3a8b74daa5751e83ed3e46438fcf9a52 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Thu, 11 Jun 2009 14:55:42 +1000 Subject: perf_counter: powerpc: Implement generalized cache events for POWER processors This adds tables of event codes for the generalized cache events for all the currently supported powerpc processors: POWER{4,5,5+,6,7} and PPC970*, plus powerpc-specific code to use these tables when a generalized cache event is requested. Signed-off-by: Paul Mackerras Cc: Peter Zijlstra LKML-Reference: <18992.36430.933526.742969@drongo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- arch/powerpc/include/asm/perf_counter.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_counter.h index 1c60f0ca792..cc7c887705b 100644 --- a/arch/powerpc/include/asm/perf_counter.h +++ b/arch/powerpc/include/asm/perf_counter.h @@ -33,6 +33,9 @@ struct power_pmu { u32 flags; int n_generic; int *generic_events; + int (*cache_events)[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX]; }; extern struct power_pmu *ppmu; -- cgit v1.2.3 From 63b852a6b67d0820d388b0ecd0da83ccb4048b8d Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 13 May 2009 22:56:24 +0000 Subject: asm-generic: rename termios.h, signal.h and mman.h The existing asm-generic versions are incomplete and included by some architectures. New architectures should be able to use a generic version, so rename the existing files and change all users, which lets us add the new files. Signed-off-by: Remis Lima Baima Signed-off-by: Arnd Bergmann --- arch/powerpc/include/asm/mman.h | 2 +- arch/powerpc/include/asm/signal.h | 2 +- arch/powerpc/include/asm/termios.h | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h index e7b99bac9f4..7b1c49811a2 100644 --- a/arch/powerpc/include/asm/mman.h +++ b/arch/powerpc/include/asm/mman.h @@ -1,7 +1,7 @@ #ifndef _ASM_POWERPC_MMAN_H #define _ASM_POWERPC_MMAN_H -#include +#include /* * This program is free software; you can redistribute it and/or diff --git a/arch/powerpc/include/asm/signal.h b/arch/powerpc/include/asm/signal.h index 69f709d8e8e..3eb13be11d8 100644 --- a/arch/powerpc/include/asm/signal.h +++ b/arch/powerpc/include/asm/signal.h @@ -94,7 +94,7 @@ typedef struct { #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 -#include +#include struct old_sigaction { __sighandler_t sa_handler; diff --git a/arch/powerpc/include/asm/termios.h b/arch/powerpc/include/asm/termios.h index 2c14fea07c8..a24f48704a3 100644 --- a/arch/powerpc/include/asm/termios.h +++ b/arch/powerpc/include/asm/termios.h @@ -78,7 +78,7 @@ struct termio { #ifdef __KERNEL__ -#include +#include #endif /* __KERNEL__ */ -- cgit v1.2.3 From c31ae4bb4a9fa4606a74c0a4fb61b74f804e861e Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 13 May 2009 22:56:25 +0000 Subject: asm-generic: introduce asm/bitsperlong.h This provides a reliable way for asm-generic/types.h and other files to find out if it is running on a 32 or 64 bit platform. We cannot use CONFIG_64BIT for this in headers that are included from user space because CONFIG symbols are not available there. We also cannot do it inside of asm/types.h because some headers need the word size but cannot include types.h. The solution is to introduce a new header that defines both __BITS_PER_LONG for user space and BITS_PER_LONG for usage in the kernel. The asm-generic version falls back to 32 bit unless the architecture overrides it, which I did for all 64 bit platforms. Signed-off-by: Remis Lima Baima Signed-off-by: Arnd Bergmann --- arch/powerpc/include/asm/bitsperlong.h | 12 ++++++++++++ arch/powerpc/include/asm/types.h | 9 --------- 2 files changed, 12 insertions(+), 9 deletions(-) create mode 100644 arch/powerpc/include/asm/bitsperlong.h (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/bitsperlong.h b/arch/powerpc/include/asm/bitsperlong.h new file mode 100644 index 00000000000..5f1659032c4 --- /dev/null +++ b/arch/powerpc/include/asm/bitsperlong.h @@ -0,0 +1,12 @@ +#ifndef __ASM_POWERPC_BITSPERLONG_H +#define __ASM_POWERPC_BITSPERLONG_H + +#if defined(__powerpc64__) +# define __BITS_PER_LONG 64 +#else +# define __BITS_PER_LONG 32 +#endif + +#include + +#endif /* __ASM_POWERPC_BITSPERLONG_H */ diff --git a/arch/powerpc/include/asm/types.h b/arch/powerpc/include/asm/types.h index 7ce27a52bb3..a5aea0ca34e 100644 --- a/arch/powerpc/include/asm/types.h +++ b/arch/powerpc/include/asm/types.h @@ -40,15 +40,6 @@ typedef struct { #endif /* __ASSEMBLY__ */ #ifdef __KERNEL__ -/* - * These aren't exported outside the kernel to avoid name space clashes - */ -#ifdef __powerpc64__ -#define BITS_PER_LONG 64 -#else -#define BITS_PER_LONG 32 -#endif - #ifndef __ASSEMBLY__ typedef __vector128 vector128; -- cgit v1.2.3 From 72099ed2719fc5829bd79c6ca9d1783ed026eb37 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 13 May 2009 22:56:29 +0000 Subject: asm-generic: rename atomic.h to atomic-long.h The existing asm-generic/atomic.h only defines the atomic_long type. This renames it to atomic-long.h so we have a place to add a truly generic atomic.h that can be used on all non-SMP systems. Signed-off-by: Remis Lima Baima Signed-off-by: Arnd Bergmann Acked-by: Ingo Molnar --- arch/powerpc/include/asm/atomic.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index b401950f525..b7d2d07b6f9 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -472,6 +472,6 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) #endif /* __powerpc64__ */ -#include +#include #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_ATOMIC_H_ */ -- cgit v1.2.3 From 5b17e1cd8928ae65932758ce6478ac6d3e9a86b2 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 13 May 2009 22:56:30 +0000 Subject: asm-generic: rename page.h and uaccess.h The current asm-generic/page.h only contains the get_order function, and asm-generic/uaccess.h only implements unaligned accesses. This renames the file to getorder.h and uaccess-unaligned.h to make room for new page.h and uaccess.h file that will be usable by all simple (e.g. nommu) architectures. Signed-off-by: Remis Lima Baima Signed-off-by: Arnd Bergmann --- arch/powerpc/include/asm/page_32.h | 2 +- arch/powerpc/include/asm/page_64.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/page_32.h b/arch/powerpc/include/asm/page_32.h index a0e3f6e6b4e..bd0849dbcaa 100644 --- a/arch/powerpc/include/asm/page_32.h +++ b/arch/powerpc/include/asm/page_32.h @@ -41,7 +41,7 @@ extern void clear_pages(void *page, int order); static inline void clear_page(void *page) { clear_pages(page, 0); } extern void copy_page(void *to, void *from); -#include +#include #define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1) #define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1) diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h index 043bfdfe4f7..5817a3b747e 100644 --- a/arch/powerpc/include/asm/page_64.h +++ b/arch/powerpc/include/asm/page_64.h @@ -180,6 +180,6 @@ do { \ (test_thread_flag(TIF_32BIT) ? \ VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64) -#include +#include #endif /* _ASM_POWERPC_PAGE_64_H */ -- cgit v1.2.3 From e14112d1bd5e193166b54be19119cf6440470560 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Fri, 12 Jun 2009 10:14:22 +1000 Subject: perfcounters: remove powerpc definitions of perf_counter_do_pending Commit 925d519ab82b6dd7aca9420d809ee83819c08db2 ("perf_counter: unify and fix delayed counter wakeup") added global definitions. Signed-off-by: Stephen Rothwell Acked-by: Paul Mackerras Acked-by: Benjamin Herrenschmidt Signed-off-by: Linus Torvalds --- arch/powerpc/include/asm/hw_irq.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 20a44d0c9fd..53512374e1c 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -156,8 +156,6 @@ static inline void clear_perf_counter_pending(void) "i" (offsetof(struct paca_struct, perf_counter_pending))); } -extern void perf_counter_do_pending(void); - #else static inline unsigned long test_perf_counter_pending(void) @@ -167,7 +165,6 @@ static inline unsigned long test_perf_counter_pending(void) static inline void set_perf_counter_pending(void) {} static inline void clear_perf_counter_pending(void) {} -static inline void perf_counter_do_pending(void) {} #endif /* CONFIG_PERF_COUNTERS */ #endif /* __KERNEL__ */ -- cgit v1.2.3 From 5c6fc8db768fb9990ee67ab052896fd46fbe2651 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 10 Jun 2009 04:38:45 +0000 Subject: powerpc/cell: Extract duplicated IOPTE_* to Both arch/powerpc/platforms/cell/iommu.c and arch/powerpc/platforms/ps3/mm.c contain the same Cell IOMMU page table entry definitions. Extract them and move them to , while adding a CBE_ prefix. This also allows them to be used by drivers. Signed-off-by: Geert Uytterhoeven Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/iommu.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index 7464c0daddd..7ead7c16fb7 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -35,6 +35,16 @@ #define IOMMU_PAGE_MASK (~((1 << IOMMU_PAGE_SHIFT) - 1)) #define IOMMU_PAGE_ALIGN(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE) +/* Cell page table entries */ +#define CBE_IOPTE_PP_W 0x8000000000000000ul /* protection: write */ +#define CBE_IOPTE_PP_R 0x4000000000000000ul /* protection: read */ +#define CBE_IOPTE_M 0x2000000000000000ul /* coherency required */ +#define CBE_IOPTE_SO_R 0x1000000000000000ul /* ordering: writes */ +#define CBE_IOPTE_SO_RW 0x1800000000000000ul /* ordering: r & w */ +#define CBE_IOPTE_RPN_Mask 0x07fffffffffff000ul /* RPN */ +#define CBE_IOPTE_H 0x0000000000000800ul /* cache hint */ +#define CBE_IOPTE_IOID_Mask 0x00000000000007fful /* ioid */ + /* Boot time flags */ extern int iommu_is_off; extern int iommu_force_on; -- cgit v1.2.3 From d3352c9f1e8e2f2989d9686c8aa8acb4842fe75e Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 10 Jun 2009 04:38:48 +0000 Subject: ps3fb/vram: Extract common GPU stuff into Signed-off-by: Geert Uytterhoeven Cc: linux-fbdev-devel@lists.sourceforge.net Cc: Jim Paris Cc: Jens Axboe Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/ps3.h | 3 -- arch/powerpc/include/asm/ps3gpu.h | 78 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 78 insertions(+), 3 deletions(-) create mode 100644 arch/powerpc/include/asm/ps3gpu.h (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h index cdb6fd814de..b9e4987986d 100644 --- a/arch/powerpc/include/asm/ps3.h +++ b/arch/powerpc/include/asm/ps3.h @@ -520,7 +520,4 @@ void ps3_sync_irq(int node); u32 ps3_get_hw_thread_id(int cpu); u64 ps3_get_spe_id(void *arg); -/* mutex synchronizing GPU accesses and video mode changes */ -extern struct mutex ps3_gpu_mutex; - #endif diff --git a/arch/powerpc/include/asm/ps3gpu.h b/arch/powerpc/include/asm/ps3gpu.h new file mode 100644 index 00000000000..1037efae4a5 --- /dev/null +++ b/arch/powerpc/include/asm/ps3gpu.h @@ -0,0 +1,78 @@ +/* + * PS3 GPU declarations. + * + * Copyright 2009 Sony Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. + * If not, see . + */ + +#ifndef _ASM_POWERPC_PS3GPU_H +#define _ASM_POWERPC_PS3GPU_H + +#include + +#include + + +#define L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_SYNC 0x101 +#define L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_FLIP 0x102 + +#define L1GPU_CONTEXT_ATTRIBUTE_FB_SETUP 0x600 +#define L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT 0x601 +#define L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT_SYNC 0x602 + +#define L1GPU_FB_BLIT_WAIT_FOR_COMPLETION (1ULL << 32) + +#define L1GPU_DISPLAY_SYNC_HSYNC 1 +#define L1GPU_DISPLAY_SYNC_VSYNC 2 + + +/* mutex synchronizing GPU accesses and video mode changes */ +extern struct mutex ps3_gpu_mutex; + + +static inline int lv1_gpu_display_sync(u64 context_handle, u64 head, + u64 ddr_offset) +{ + return lv1_gpu_context_attribute(context_handle, + L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_SYNC, + head, ddr_offset, 0, 0); +} + +static inline int lv1_gpu_display_flip(u64 context_handle, u64 head, + u64 ddr_offset) +{ + return lv1_gpu_context_attribute(context_handle, + L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_FLIP, + head, ddr_offset, 0, 0); +} + +static inline int lv1_gpu_fb_setup(u64 context_handle, u64 xdr_lpar, + u64 xdr_size, u64 ioif_offset) +{ + return lv1_gpu_context_attribute(context_handle, + L1GPU_CONTEXT_ATTRIBUTE_FB_SETUP, + xdr_lpar, xdr_size, ioif_offset, 0); +} + +static inline int lv1_gpu_fb_blit(u64 context_handle, u64 ddr_offset, + u64 ioif_offset, u64 sync_width, u64 pitch) +{ + return lv1_gpu_context_attribute(context_handle, + L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT, + ddr_offset, ioif_offset, sync_width, + pitch); +} + +#endif /* _ASM_POWERPC_PS3GPU_H */ -- cgit v1.2.3 From c204ff65590837e6a9c50ca549497b4682682ec6 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 10 Jun 2009 04:38:49 +0000 Subject: ps3fb: Tear down FB setup during cleanup During cleanup, use L1GPU_CONTEXT_ATTRIBUTE_FB_CLOSE to tear down the setup done by L1GPU_CONTEXT_ATTRIBUTE_FB_SETUP. This allows unloading and reloading of ps3fb while the sound driver keeps the GPU open. Signed-off-by: Geert Uytterhoeven Cc: linux-fbdev-devel@lists.sourceforge.net Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/ps3gpu.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/ps3gpu.h b/arch/powerpc/include/asm/ps3gpu.h index 1037efae4a5..b2b89591907 100644 --- a/arch/powerpc/include/asm/ps3gpu.h +++ b/arch/powerpc/include/asm/ps3gpu.h @@ -31,6 +31,7 @@ #define L1GPU_CONTEXT_ATTRIBUTE_FB_SETUP 0x600 #define L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT 0x601 #define L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT_SYNC 0x602 +#define L1GPU_CONTEXT_ATTRIBUTE_FB_CLOSE 0x603 #define L1GPU_FB_BLIT_WAIT_FOR_COMPLETION (1ULL << 32) @@ -75,4 +76,11 @@ static inline int lv1_gpu_fb_blit(u64 context_handle, u64 ddr_offset, pitch); } +static inline int lv1_gpu_fb_close(u64 context_handle) +{ + return lv1_gpu_context_attribute(context_handle, + L1GPU_CONTEXT_ATTRIBUTE_FB_CLOSE, 0, + 0, 0, 0); +} + #endif /* _ASM_POWERPC_PS3GPU_H */ -- cgit v1.2.3 From 3240776ce290a3be4ca77bacf8b1e8d36b4a691d Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Wed, 10 Jun 2009 22:22:08 +0000 Subject: powerpc: Wire up sys_rt_tgsigqueueinfo Signed-off-by: Stephen Rothwell Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/systbl.h | 1 + arch/powerpc/include/asm/unistd.h | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index a0b92de51c7..370600ca276 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h @@ -325,3 +325,4 @@ SYSCALL(inotify_init1) SYSCALL_SPU(perf_counter_open) COMPAT_SYS_SPU(preadv) COMPAT_SYS_SPU(pwritev) +COMPAT_SYS(rt_tgsigqueueinfo) diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index 4badac2d11d..cef080bfc60 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -344,10 +344,11 @@ #define __NR_perf_counter_open 319 #define __NR_preadv 320 #define __NR_pwritev 321 +#define __NR_rt_tgsigqueueinfo 322 #ifdef __KERNEL__ -#define __NR_syscalls 322 +#define __NR_syscalls 323 #define __NR__exit __NR_exit #define NR_syscalls __NR_syscalls -- cgit v1.2.3 From 4c75f84f2c781beb230031234ed961d28771a764 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Fri, 12 Jun 2009 02:00:50 +0000 Subject: powerpc: Add compiler memory barrier to mtmsr macro On 32-bit non-Book E, local_irq_restore() turns into just mtmsr(), which doesn't currently have a compiler memory barrier. This means that accesses to memory inside a local_irq_save/restore section, or a spin_lock_irqsave/spin_unlock_irqrestore section on UP, can be reordered by the compiler to occur outside that section. To fix this, this adds a compiler memory barrier to mtmsr for both 32-bit and 64-bit. Having a compiler memory barrier in mtmsr makes sense because it will almost always be changing something about the context in which memory accesses are done, so in general we don't want memory accesses getting moved from one side of an mtmsr to the other. With the barrier in mtmsr(), some of the explicit barriers in hw_irq.h are now redundant, so this removes them. Signed-off-by: Paul Mackerras Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/hw_irq.h | 5 ++--- arch/powerpc/include/asm/reg.h | 4 ++-- 2 files changed, 4 insertions(+), 5 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 53512374e1c..b7f8f4a87cc 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -80,7 +80,7 @@ static inline void local_irq_disable(void) __asm__ __volatile__("wrteei 0": : :"memory"); #else unsigned long msr; - __asm__ __volatile__("": : :"memory"); + msr = mfmsr(); SET_MSR_EE(msr & ~MSR_EE); #endif @@ -92,7 +92,7 @@ static inline void local_irq_enable(void) __asm__ __volatile__("wrteei 1": : :"memory"); #else unsigned long msr; - __asm__ __volatile__("": : :"memory"); + msr = mfmsr(); SET_MSR_EE(msr | MSR_EE); #endif @@ -108,7 +108,6 @@ static inline void local_irq_save_ptr(unsigned long *flags) #else SET_MSR_EE(msr & ~MSR_EE); #endif - __asm__ __volatile__("": : :"memory"); } #define local_save_flags(flags) ((flags) = mfmsr()) diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index fb359b0a693..a3c28e46947 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -745,11 +745,11 @@ asm volatile("mfmsr %0" : "=r" (rval)); rval;}) #ifdef CONFIG_PPC64 #define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \ - : : "r" (v)) + : : "r" (v) : "memory") #define mtmsrd(v) __mtmsrd((v), 0) #define mtmsr(v) mtmsrd(v) #else -#define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v)) +#define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v) : "memory") #endif #define mfspr(rn) ({unsigned long rval; \ -- cgit v1.2.3 From c2e95c6d7a9b9d8f023c3639edbb1da65ccd15ac Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Fri, 12 Jun 2009 21:10:41 +0000 Subject: powerpc: Use generic atomic64_t implementation on 32-bit processors This makes 32-bit powerpc use the generic atomic64_t implementation. Signed-off-by: Paul Mackerras Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/atomic.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index b7d2d07b6f9..4012483b189 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -470,6 +470,9 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) +#else /* __powerpc64__ */ +#include + #endif /* __powerpc64__ */ #include -- cgit v1.2.3 From 9f08e9db84c1e9234e07b9b595f5b2508c621823 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 10 Jun 2009 04:38:53 +0000 Subject: ps3: Use dev_[gs]et_drvdata() instead of direct access for system bus devices Signed-off-by: Geert Uytterhoeven Cc: Geoff Levand Acked-by: Geoff Levand Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/ps3.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h index b9e4987986d..dcd302f489f 100644 --- a/arch/powerpc/include/asm/ps3.h +++ b/arch/powerpc/include/asm/ps3.h @@ -421,12 +421,12 @@ static inline struct ps3_system_bus_driver * static inline void ps3_system_bus_set_driver_data( struct ps3_system_bus_device *dev, void *data) { - dev->core.driver_data = data; + dev_set_drvdata(&dev->core, data); } static inline void *ps3_system_bus_get_driver_data( struct ps3_system_bus_device *dev) { - return dev->core.driver_data; + return dev_get_drvdata(&dev->core); } /* These two need global scope for get_dma_ops(). */ -- cgit v1.2.3 From 03fa68c245cccbcb99035cbabaa13b408ba91ab5 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 10 Jun 2009 04:38:54 +0000 Subject: ps3: shorten ps3_system_bus_[gs]et_driver_data to ps3_system_bus_[gs]et_drvdata Signed-off-by: Geert Uytterhoeven Cc: Geoff Levand Cc: Jim Paris Acked-by: Geoff Levand Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/ps3.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h index dcd302f489f..7660694ab3c 100644 --- a/arch/powerpc/include/asm/ps3.h +++ b/arch/powerpc/include/asm/ps3.h @@ -418,12 +418,12 @@ static inline struct ps3_system_bus_driver * * @data: Data to set */ -static inline void ps3_system_bus_set_driver_data( +static inline void ps3_system_bus_set_drvdata( struct ps3_system_bus_device *dev, void *data) { dev_set_drvdata(&dev->core, data); } -static inline void *ps3_system_bus_get_driver_data( +static inline void *ps3_system_bus_get_drvdata( struct ps3_system_bus_device *dev) { return dev_get_drvdata(&dev->core); -- cgit v1.2.3 From a4e623fbc9b201930abcf78df6db5e49aa8e00cb Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 10 Jun 2009 04:39:06 +0000 Subject: ps3: Replace direct file operations by callback Currently the FLASH database is updated by the kernel using file operations, meant for userspace only. While this works for us because copy_{from,to}_user() on powerpc can handle kernel pointers, this is unportable and a bad example. Replace the file operations by callbacks, registered by the ps3flash driver. Signed-off-by: Geert Uytterhoeven Cc: Geoff Levand Acked-by: Geoff Levand Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/ps3.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h index 7660694ab3c..7f065e178ec 100644 --- a/arch/powerpc/include/asm/ps3.h +++ b/arch/powerpc/include/asm/ps3.h @@ -53,6 +53,13 @@ enum ps3_param_av_multi_out ps3_os_area_get_av_multi_out(void); extern u64 ps3_os_area_get_rtc_diff(void); extern void ps3_os_area_set_rtc_diff(u64 rtc_diff); +struct ps3_os_area_flash_ops { + ssize_t (*read)(void *buf, size_t count, loff_t pos); + ssize_t (*write)(const void *buf, size_t count, loff_t pos); +}; + +extern void ps3_os_area_flash_register(const struct ps3_os_area_flash_ops *ops); + /* dma routines */ enum ps3_dma_page_size { -- cgit v1.2.3 From 9974458e2f9a11dbd2f4bd14fab5a79af4907b41 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 15 Jun 2009 21:45:16 +1000 Subject: perf_counter: Make set_perf_counter_pending() declaration common At present, every architecture that supports perf_counters has to declare set_perf_counter_pending() in its arch-specific headers. This consolidates the declarations into a single declaration in one common place, include/linux/perf_counter.h. On powerpc, we continue to provide a static inline definition of set_perf_counter_pending() in the powerpc hw_irq.h. Also, this removes from the x86 perf_counter.h the unused null definitions of {test,clear}_perf_counter_pending. Reported-by: Mike Frysinger Signed-off-by: Paul Mackerras Cc: Peter Zijlstra Cc: benh@kernel.crashing.org LKML-Reference: <18998.13388.920691.523227@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- arch/powerpc/include/asm/hw_irq.h | 1 - arch/powerpc/include/asm/perf_counter.h | 2 ++ 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 53512374e1c..1974cf191b0 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -163,7 +163,6 @@ static inline unsigned long test_perf_counter_pending(void) return 0; } -static inline void set_perf_counter_pending(void) {} static inline void clear_perf_counter_pending(void) {} #endif /* CONFIG_PERF_COUNTERS */ diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_counter.h index cc7c887705b..b398a84edce 100644 --- a/arch/powerpc/include/asm/perf_counter.h +++ b/arch/powerpc/include/asm/perf_counter.h @@ -10,6 +10,8 @@ */ #include +#include + #define MAX_HWCOUNTERS 8 #define MAX_EVENT_ALTERNATIVES 8 #define MAX_LIMITED_HWCOUNTERS 2 -- cgit v1.2.3 From cab888e678d0986ebce95464d3842a6aeca1e3d8 Mon Sep 17 00:00:00 2001 From: Nate Case Date: Wed, 10 Jun 2009 15:37:28 -0500 Subject: powerpc/fsl-booke: Enable L1 cache on e500v1/e500v2/e500mc CPUs Some boot loaders may not enable L1 instruction/data cache. Check if data and instruction caches are enabled, and enable them if needed. Signed-off-by: Nate Case Signed-off-by: Kumar Gala --- arch/powerpc/include/asm/reg_booke.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index 601ddbc4600..6bcf364cbb2 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h @@ -389,12 +389,14 @@ #define ICCR_CACHE 1 /* Cacheable */ /* Bit definitions for L1CSR0. */ +#define L1CSR0_CPE 0x00010000 /* Data Cache Parity Enable */ #define L1CSR0_CLFC 0x00000100 /* Cache Lock Bits Flash Clear */ #define L1CSR0_DCFI 0x00000002 /* Data Cache Flash Invalidate */ #define L1CSR0_CFI 0x00000002 /* Cache Flash Invalidate */ #define L1CSR0_DCE 0x00000001 /* Data Cache Enable */ /* Bit definitions for L1CSR1. */ +#define L1CSR1_CPE 0x00010000 /* Instruction Cache Parity Enable */ #define L1CSR1_ICLFR 0x00000100 /* Instr Cache Lock Bits Flash Reset */ #define L1CSR1_ICFI 0x00000002 /* Instr Cache Flash Invalidate */ #define L1CSR1_ICE 0x00000001 /* Instr Cache Enable */ -- cgit v1.2.3 From e86b4998f00b51f60b4baab9bbef5e07c9407614 Mon Sep 17 00:00:00 2001 From: "mware@internode.on.net" Date: Wed, 10 Jun 2009 17:01:19 +0000 Subject: powerpc/fsl: Increase the number of possible localbus banks Currently the fsl,*lbc devices support 8 banks (ie OR and BR registers). This is adequate for most pq2 and pq3 processors, but not the MPC8280 which has 12 banks. Signed-Off-By: Mark Ware Signed-off-by: Kumar Gala --- arch/powerpc/include/asm/fsl_lbc.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/fsl_lbc.h b/arch/powerpc/include/asm/fsl_lbc.h index 63a4f779f53..1b5a21041f9 100644 --- a/arch/powerpc/include/asm/fsl_lbc.h +++ b/arch/powerpc/include/asm/fsl_lbc.h @@ -95,8 +95,8 @@ struct fsl_lbc_bank { }; struct fsl_lbc_regs { - struct fsl_lbc_bank bank[8]; - u8 res0[0x28]; + struct fsl_lbc_bank bank[12]; + u8 res0[0x8]; __be32 mar; /**< UPM Address Register */ u8 res1[0x4]; __be32 mamr; /**< UPMA Mode Register */ -- cgit v1.2.3 From 9317726de42a157c377f7fe9110a63260800582f Mon Sep 17 00:00:00 2001 From: Timur Tabi Date: Tue, 26 May 2009 05:21:41 +0000 Subject: powerpc: Introduce macro spin_event_timeout() The macro spin_event_timeout() takes a condition and timeout value (in microseconds) as parameters. It spins until either the condition is true or the timeout expires. It returns the result of the condition when the loop was terminated. This primary purpose of this macro is to poll on a hardware register until a status bit changes. The timeout ensures that the loop still terminates if the bit doesn't change as expected. This macro makes it easier for driver developers to perform this kind of operation properly. Signed-off-by: Timur Tabi Acked-by: Geoff Thorpe Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/delay.h | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/delay.h b/arch/powerpc/include/asm/delay.h index f9200a65c63..1e2eb41fa05 100644 --- a/arch/powerpc/include/asm/delay.h +++ b/arch/powerpc/include/asm/delay.h @@ -2,8 +2,11 @@ #define _ASM_POWERPC_DELAY_H #ifdef __KERNEL__ +#include + /* * Copyright 1996, Paul Mackerras. + * Copyright (C) 2009 Freescale Semiconductor, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -30,5 +33,38 @@ extern void udelay(unsigned long usecs); #define mdelay(n) udelay((n) * 1000) #endif +/** + * spin_event_timeout - spin until a condition gets true or a timeout elapses + * @condition: a C expression to evalate + * @timeout: timeout, in microseconds + * @delay: the number of microseconds to delay between each evaluation of + * @condition + * + * The process spins until the condition evaluates to true (non-zero) or the + * timeout elapses. The return value of this macro is the value of + * @condition when the loop terminates. This allows you to determine the cause + * of the loop terminates. If the return value is zero, then you know a + * timeout has occurred. + * + * This primary purpose of this macro is to poll on a hardware register + * until a status bit changes. The timeout ensures that the loop still + * terminates even if the bit never changes. The delay is for devices that + * need a delay in between successive reads. + * + * gcc will optimize out the if-statement if @delay is a constant. + */ +#define spin_event_timeout(condition, timeout, delay) \ +({ \ + typeof(condition) __ret; \ + unsigned long __loops = tb_ticks_per_usec * timeout; \ + unsigned long __start = get_tbl(); \ + while (!(__ret = (condition)) && (tb_ticks_since(__start) <= __loops)) \ + if (delay) \ + udelay(delay); \ + else \ + cpu_relax(); \ + __ret; \ +}) + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_DELAY_H */ -- cgit v1.2.3 From 2fae0a524b193e200b71778407ad29b22417056a Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Sun, 14 Jun 2009 16:16:10 +0000 Subject: powerpc: Add memory clobber to mtspr() Without this clobber, mtspr can be re-ordered by gcc vs. surrounding memory accesses. While this might be ok for some cases, it's not in others and I'm not confident that all callers get it right (In fact I'm sure some of them don't). So for now, let's make mtspr() itself contain a memory clobber until we can audit and fix everything, at which point we can remove it if we think it's worth doing so. Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/reg.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index a3c28e46947..1170267736d 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -755,7 +755,8 @@ #define mfspr(rn) ({unsigned long rval; \ asm volatile("mfspr %0," __stringify(rn) \ : "=r" (rval)); rval;}) -#define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v)) +#define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v)\ + : "memory") #ifdef __powerpc64__ #ifdef CONFIG_PPC_CELL -- cgit v1.2.3 From 08604bd9935dc98fb62ef61d5b7baa7ccc10f8c2 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 16 Jun 2009 15:31:12 -0700 Subject: time: move PIT_TICK_RATE to linux/timex.h PIT_TICK_RATE is currently defined in four architectures, but in three different places. While linux/timex.h is not the perfect place for it, it is still a reasonable replacement for those drivers that traditionally use asm/timex.h to get CLOCK_TICK_RATE and expect it to be the PIT frequency. Note that for Alpha, the actual value changed from 1193182UL to 1193180UL. This is unlikely to make a difference, and probably can only improve accuracy. There was a discussion on the correct value of CLOCK_TICK_RATE a few years ago, after which every existing instance was getting changed to 1193182. According to the specification, it should be 1193181.818181... Signed-off-by: Arnd Bergmann Cc: Richard Henderson Cc: Ivan Kokshaysky Cc: Ralf Baechle Cc: Benjamin Herrenschmidt Cc: Ingo Molnar Cc: Thomas Gleixner Cc: "H. Peter Anvin" Cc: Len Brown Cc: john stultz Cc: Dmitry Torokhov Cc: Takashi Iwai Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/include/asm/8253pit.h | 7 ------- 1 file changed, 7 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/8253pit.h b/arch/powerpc/include/asm/8253pit.h index b70d6e53b30..a71c9c1455a 100644 --- a/arch/powerpc/include/asm/8253pit.h +++ b/arch/powerpc/include/asm/8253pit.h @@ -1,10 +1,3 @@ -#ifndef _ASM_POWERPC_8253PIT_H -#define _ASM_POWERPC_8253PIT_H - /* * 8253/8254 Programmable Interval Timer */ - -#define PIT_TICK_RATE 1193182UL - -#endif /* _ASM_POWERPC_8253PIT_H */ -- cgit v1.2.3 From 87c441e54dfcf9f45593ecaf68e7e18ea53d5e13 Mon Sep 17 00:00:00 2001 From: Wolfgang Denk Date: Wed, 17 Jun 2009 00:30:22 -0600 Subject: powerpc/5xxx: Add common mpc5xxx_get_bus_frequency() function So far, MPC512x used mpc512x_find_ips_freq() to get the bus frequency, while MPC52xx used mpc52xx_find_ipb_freq(). Despite the different clock names (IPS vs. IPB) the code was identical. Use common code for both processor families. Signed-off-by: Wolfgang Denk Signed-off-by: Grant Likely --- arch/powerpc/include/asm/mpc512x.h | 22 ---------------------- arch/powerpc/include/asm/mpc52xx.h | 2 +- arch/powerpc/include/asm/mpc5xxx.h | 22 ++++++++++++++++++++++ 3 files changed, 23 insertions(+), 23 deletions(-) delete mode 100644 arch/powerpc/include/asm/mpc512x.h create mode 100644 arch/powerpc/include/asm/mpc5xxx.h (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/mpc512x.h b/arch/powerpc/include/asm/mpc512x.h deleted file mode 100644 index c48a1658eea..00000000000 --- a/arch/powerpc/include/asm/mpc512x.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. - * - * Author: John Rigby, , Friday Apr 13 2007 - * - * Description: - * MPC5121 Prototypes and definitions - * - * This is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - */ - -#ifndef __ASM_POWERPC_MPC512x_H__ -#define __ASM_POWERPC_MPC512x_H__ - -extern unsigned long mpc512x_find_ips_freq(struct device_node *node); - -#endif /* __ASM_POWERPC_MPC512x_H__ */ - diff --git a/arch/powerpc/include/asm/mpc52xx.h b/arch/powerpc/include/asm/mpc52xx.h index 52e049cd9e6..1b4f697abbd 100644 --- a/arch/powerpc/include/asm/mpc52xx.h +++ b/arch/powerpc/include/asm/mpc52xx.h @@ -16,6 +16,7 @@ #ifndef __ASSEMBLY__ #include #include +#include #endif /* __ASSEMBLY__ */ #include @@ -268,7 +269,6 @@ struct mpc52xx_intr { #ifndef __ASSEMBLY__ /* mpc52xx_common.c */ -extern unsigned int mpc52xx_find_ipb_freq(struct device_node *node); extern void mpc5200_setup_xlb_arbiter(void); extern void mpc52xx_declare_of_platform_devices(void); extern void mpc52xx_map_common_devices(void); diff --git a/arch/powerpc/include/asm/mpc5xxx.h b/arch/powerpc/include/asm/mpc5xxx.h new file mode 100644 index 00000000000..5ce9c5fa434 --- /dev/null +++ b/arch/powerpc/include/asm/mpc5xxx.h @@ -0,0 +1,22 @@ +/* + * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. + * + * Author: John Rigby, , Friday Apr 13 2007 + * + * Description: + * MPC5xxx Prototypes and definitions + * + * This is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#ifndef __ASM_POWERPC_MPC5xxx_H__ +#define __ASM_POWERPC_MPC5xxx_H__ + +extern unsigned long mpc5xxx_get_bus_frequency(struct device_node *node); + +#endif /* __ASM_POWERPC_MPC5xxx_H__ */ + -- cgit v1.2.3 From a6c140969b4685f9b9f6773c0760f55ca66d1825 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 17 Jun 2009 16:33:34 -0400 Subject: Delete pcibios_select_root This function was only used by pci_claim_resource(), and the last commit deleted that use. Signed-off-by: Matthew Wilcox Signed-off-by: Linus Torvalds --- arch/powerpc/include/asm/pci.h | 13 ------------- 1 file changed, 13 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index ba17d5d90a4..d9483c504d2 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h @@ -195,19 +195,6 @@ extern void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, struct pci_bus_region *region); -static inline struct resource *pcibios_select_root(struct pci_dev *pdev, - struct resource *res) -{ - struct resource *root = NULL; - - if (res->flags & IORESOURCE_IO) - root = &ioport_resource; - if (res->flags & IORESOURCE_MEM) - root = &iomem_resource; - - return root; -} - extern void pcibios_claim_one_bus(struct pci_bus *b); extern void pcibios_finish_adding_to_bus(struct pci_bus *bus); -- cgit v1.2.3 From 105988c015943e77092a6568bc5fb7e386df6ccd Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 17 Jun 2009 21:50:04 +1000 Subject: perf_counter: powerpc: Enable use of software counters on 32-bit powerpc This enables the perf_counter subsystem on 32-bit powerpc. Since we don't have any support for hardware counters on 32-bit powerpc yet, only software counters can be used. Besides selecting HAVE_PERF_COUNTERS for 32-bit powerpc as well as 64-bit, the main thing this does is add an implementation of set_perf_counter_pending(). This needs to arrange for perf_counter_do_pending() to be called when interrupts are enabled. Rather than add code to local_irq_restore as 64-bit does, the 32-bit set_perf_counter_pending() generates an interrupt by setting the decrementer to 1 so that a decrementer interrupt will become pending in 1 or 2 timebase ticks (if a decrementer interrupt isn't already pending). When interrupts are enabled, timer_interrupt() will be called, and some new code in there calls perf_counter_do_pending(). We use a per-cpu array of flags to indicate whether we need to call perf_counter_do_pending() or not. This introduces a couple of new Kconfig symbols: PPC_HAVE_PMU_SUPPORT, which is selected by processor families for which we have hardware PMU support (currently only PPC64), and PPC_PERF_CTRS, which enables the powerpc-specific perf_counter back-end. Signed-off-by: Paul Mackerras Cc: Peter Zijlstra Cc: linuxppc-dev@ozlabs.org Cc: benh@kernel.crashing.org LKML-Reference: <19000.55404.103840.393470@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- arch/powerpc/include/asm/hw_irq.h | 5 ++++- arch/powerpc/include/asm/perf_counter.h | 10 ++++++++-- 2 files changed, 12 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 10a642df014..867ab8ed69b 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -131,6 +131,8 @@ static inline int irqs_disabled_flags(unsigned long flags) struct irq_chip; #ifdef CONFIG_PERF_COUNTERS + +#ifdef CONFIG_PPC64 static inline unsigned long test_perf_counter_pending(void) { unsigned long x; @@ -154,8 +156,9 @@ static inline void clear_perf_counter_pending(void) "r" (0), "i" (offsetof(struct paca_struct, perf_counter_pending))); } +#endif /* CONFIG_PPC64 */ -#else +#else /* CONFIG_PERF_COUNTERS */ static inline unsigned long test_perf_counter_pending(void) { diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_counter.h index b398a84edce..2c2d9f643df 100644 --- a/arch/powerpc/include/asm/perf_counter.h +++ b/arch/powerpc/include/asm/perf_counter.h @@ -57,10 +57,16 @@ extern struct power_pmu *ppmu; struct pt_regs; extern unsigned long perf_misc_flags(struct pt_regs *regs); -#define perf_misc_flags(regs) perf_misc_flags(regs) - extern unsigned long perf_instruction_pointer(struct pt_regs *regs); +/* + * Only override the default definitions in include/linux/perf_counter.h + * if we have hardware PMU support. + */ +#ifdef CONFIG_PPC_PERF_CTRS +#define perf_misc_flags(regs) perf_misc_flags(regs) +#endif + /* * The power_pmu.get_constraint function returns a 64-bit value and * a 64-bit mask that express the constraints between this event and -- cgit v1.2.3 From 448d64f8f4c147db466c549550767cc515a4d34c Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 17 Jun 2009 21:51:13 +1000 Subject: perf_counter: powerpc: Use unsigned long for register and constraint values This changes the powerpc perf_counter back-end to use unsigned long types for hardware register values and for the value/mask pairs used in checking whether a given set of events fit within the hardware constraints. This is in preparation for adding support for the PMU on some 32-bit powerpc processors. On 32-bit processors the hardware registers are only 32 bits wide, and the PMU structure is generally simpler, so 32 bits should be ample for expressing the hardware constraints. On 64-bit processors, unsigned long is 64 bits wide, so using unsigned long vs. u64 (unsigned long long) makes no actual difference. This makes some other very minor changes: adjusting whitespace to line things up in initialized structures, and simplifying some code in hw_perf_disable(). Signed-off-by: Paul Mackerras Cc: Peter Zijlstra Cc: linuxppc-dev@ozlabs.org Cc: benh@kernel.crashing.org LKML-Reference: <19000.55473.26174.331511@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- arch/powerpc/include/asm/perf_counter.h | 35 +++++++++++++++++---------------- 1 file changed, 18 insertions(+), 17 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_counter.h index 2c2d9f643df..2ceb0fefa93 100644 --- a/arch/powerpc/include/asm/perf_counter.h +++ b/arch/powerpc/include/asm/perf_counter.h @@ -21,21 +21,22 @@ * describe the PMU on a particular POWER-family CPU. */ struct power_pmu { - int n_counter; - int max_alternatives; - u64 add_fields; - u64 test_adder; - int (*compute_mmcr)(u64 events[], int n_ev, - unsigned int hwc[], u64 mmcr[]); - int (*get_constraint)(u64 event, u64 *mskp, u64 *valp); - int (*get_alternatives)(u64 event, unsigned int flags, - u64 alt[]); - void (*disable_pmc)(unsigned int pmc, u64 mmcr[]); - int (*limited_pmc_event)(u64 event); - u32 flags; - int n_generic; - int *generic_events; - int (*cache_events)[PERF_COUNT_HW_CACHE_MAX] + int n_counter; + int max_alternatives; + unsigned long add_fields; + unsigned long test_adder; + int (*compute_mmcr)(u64 events[], int n_ev, + unsigned int hwc[], unsigned long mmcr[]); + int (*get_constraint)(u64 event, unsigned long *mskp, + unsigned long *valp); + int (*get_alternatives)(u64 event, unsigned int flags, + u64 alt[]); + void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]); + int (*limited_pmc_event)(u64 event); + u32 flags; + int n_generic; + int *generic_events; + int (*cache_events)[PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX]; }; @@ -68,8 +69,8 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs); #endif /* - * The power_pmu.get_constraint function returns a 64-bit value and - * a 64-bit mask that express the constraints between this event and + * The power_pmu.get_constraint function returns a 32/64-bit value and + * a 32/64-bit mask that express the constraints between this event and * other events. * * The value and mask are divided up into (non-overlapping) bitfields -- cgit v1.2.3 From 079b3c569c87819e7a19d9b9f51d4746fc47bf9a Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 17 Jun 2009 21:52:09 +1000 Subject: perf_counter: powerpc: Change how processor-specific back-ends get selected At present, the powerpc generic (processor-independent) perf_counter code has list of processor back-end modules, and at initialization, it looks at the PVR (processor version register) and has a switch statement to select a suitable processor-specific back-end. This is going to become inconvenient as we add more processor-specific back-ends, so this inverts the order: now each back-end checks whether it applies to the current processor, and registers itself if so. Furthermore, instead of looking at the PVR, back-ends now check the cur_cpu_spec->oprofile_cpu_type string and match on that. Lastly, each back-end now specifies a name for itself so the core can print a nice message when a back-end registers itself. This doesn't provide any support for unregistering back-ends, but that wouldn't be hard to do and would allow back-ends to be modules. Signed-off-by: Paul Mackerras Cc: Peter Zijlstra Cc: linuxppc-dev@ozlabs.org Cc: benh@kernel.crashing.org LKML-Reference: <19000.55529.762227.518531@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- arch/powerpc/include/asm/perf_counter.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_counter.h index 2ceb0fefa93..8ccd4e15576 100644 --- a/arch/powerpc/include/asm/perf_counter.h +++ b/arch/powerpc/include/asm/perf_counter.h @@ -21,6 +21,7 @@ * describe the PMU on a particular POWER-family CPU. */ struct power_pmu { + const char *name; int n_counter; int max_alternatives; unsigned long add_fields; @@ -41,8 +42,6 @@ struct power_pmu { [PERF_COUNT_HW_CACHE_RESULT_MAX]; }; -extern struct power_pmu *ppmu; - /* * Values for power_pmu.flags */ @@ -56,6 +55,8 @@ extern struct power_pmu *ppmu; #define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */ #define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */ +extern int register_power_pmu(struct power_pmu *); + struct pt_regs; extern unsigned long perf_misc_flags(struct pt_regs *regs); extern unsigned long perf_instruction_pointer(struct pt_regs *regs); -- cgit v1.2.3 From bbea0b6e0d214ef1511b9c6ccf3af26b38f0af7d Mon Sep 17 00:00:00 2001 From: Ira Snyder Date: Tue, 8 Sep 2009 17:53:04 -0700 Subject: fsldma: Add DMA_SLAVE support Use the DMA_SLAVE capability of the DMAEngine API to copy/from a scatterlist into an arbitrary list of hardware address/length pairs. This allows a single DMA transaction to copy data from several different devices into a scatterlist at the same time. This also adds support to enable some controller-specific features such as external start and external pause for a DMA transaction. [dan.j.williams@intel.com: rebased on tx_list movement] Signed-off-by: Ira W. Snyder Acked-by: Li Yang Acked-by: Kumar Gala Signed-off-by: Dan Williams --- arch/powerpc/include/asm/fsldma.h | 136 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 136 insertions(+) create mode 100644 arch/powerpc/include/asm/fsldma.h (limited to 'arch/powerpc/include/asm') diff --git a/arch/powerpc/include/asm/fsldma.h b/arch/powerpc/include/asm/fsldma.h new file mode 100644 index 00000000000..a67aeed17d4 --- /dev/null +++ b/arch/powerpc/include/asm/fsldma.h @@ -0,0 +1,136 @@ +/* + * Freescale MPC83XX / MPC85XX DMA Controller + * + * Copyright (c) 2009 Ira W. Snyder + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#ifndef __ARCH_POWERPC_ASM_FSLDMA_H__ +#define __ARCH_POWERPC_ASM_FSLDMA_H__ + +#include + +/* + * Definitions for the Freescale DMA controller's DMA_SLAVE implemention + * + * The Freescale DMA_SLAVE implementation was designed to handle many-to-many + * transfers. An example usage would be an accelerated copy between two + * scatterlists. Another example use would be an accelerated copy from + * multiple non-contiguous device buffers into a single scatterlist. + * + * A DMA_SLAVE transaction is defined by a struct fsl_dma_slave. This + * structure contains a list of hardware addresses that should be copied + * to/from the scatterlist passed into device_prep_slave_sg(). The structure + * also has some fields to enable hardware-specific features. + */ + +/** + * struct fsl_dma_hw_addr + * @entry: linked list entry + * @address: the hardware address + * @length: length to transfer + * + * Holds a single physical hardware address / length pair for use + * with the DMAEngine DMA_SLAVE API. + */ +struct fsl_dma_hw_addr { + struct list_head entry; + + dma_addr_t address; + size_t length; +}; + +/** + * struct fsl_dma_slave + * @addresses: a linked list of struct fsl_dma_hw_addr structures + * @request_count: value for DMA request count + * @src_loop_size: setup and enable constant source-address DMA transfers + * @dst_loop_size: setup and enable constant destination address DMA transfers + * @external_start: enable externally started DMA transfers + * @external_pause: enable externally paused DMA transfers + * + * Holds a list of address / length pairs for use with the DMAEngine + * DMA_SLAVE API implementation for the Freescale DMA controller. + */ +struct fsl_dma_slave { + + /* List of hardware address/length pairs */ + struct list_head addresses; + + /* Support for extra controller features */ + unsigned int request_count; + unsigned int src_loop_size; + unsigned int dst_loop_size; + bool external_start; + bool external_pause; +}; + +/** + * fsl_dma_slave_append - add an address/length pair to a struct fsl_dma_slave + * @slave: the &struct fsl_dma_slave to add to + * @address: the hardware address to add + * @length: the length of bytes to transfer from @address + * + * Add a hardware address/length pair to a struct fsl_dma_slave. Returns 0 on + * success, -ERRNO otherwise. + */ +static inline int fsl_dma_slave_append(struct fsl_dma_slave *slave, + dma_addr_t address, size_t length) +{ + struct fsl_dma_hw_addr *addr; + + addr = kzalloc(sizeof(*addr), GFP_ATOMIC); + if (!addr) + return -ENOMEM; + + INIT_LIST_HEAD(&addr->entry); + addr->address = address; + addr->length = length; + + list_add_tail(&addr->entry, &slave->addresses); + return 0; +} + +/** + * fsl_dma_slave_free - free a struct fsl_dma_slave + * @slave: the struct fsl_dma_slave to free + * + * Free a struct fsl_dma_slave and all associated address/length pairs + */ +static inline void fsl_dma_slave_free(struct fsl_dma_slave *slave) +{ + struct fsl_dma_hw_addr *addr, *tmp; + + if (slave) { + list_for_each_entry_safe(addr, tmp, &slave->addresses, entry) { + list_del(&addr->entry); + kfree(addr); + } + + kfree(slave); + } +} + +/** + * fsl_dma_slave_alloc - allocate a struct fsl_dma_slave + * @gfp: the flags to pass to kmalloc when allocating this structure + * + * Allocate a struct fsl_dma_slave for use by the DMA_SLAVE API. Returns a new + * struct fsl_dma_slave on success, or NULL on failure. + */ +static inline struct fsl_dma_slave *fsl_dma_slave_alloc(gfp_t gfp) +{ + struct fsl_dma_slave *slave; + + slave = kzalloc(sizeof(*slave), gfp); + if (!slave) + return NULL; + + INIT_LIST_HEAD(&slave->addresses); + return slave; +} + +#endif /* __ARCH_POWERPC_ASM_FSLDMA_H__ */ -- cgit v1.2.3